id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
166,312 | import collections
import contextlib
import copy
import threading
import typing
from typing import Callable, Iterator, MutableMapping, Optional
import cachetools
from tfx.orchestration import metadata
from google.protobuf.internal import containers
from ml_metadata.proto import metadata_store_pb2
_execution_cache = _ExecutionCache()
The provided code snippet includes necessary dependencies for implementing the `clear_in_memory_state` function. Write a Python function `def clear_in_memory_state()` to solve the following problem:
Clears cached state. Useful in tests.
Here is the function:
def clear_in_memory_state():
"""Clears cached state. Useful in tests."""
_execution_cache.clear_cache() | Clears cached state. Useful in tests. |
166,313 | from concurrent import futures
import contextlib
import dataclasses
import queue
import threading
from typing import Any, Callable, List, Optional, Union
from absl import logging
from tfx.orchestration.experimental.core import task as task_lib
from tfx.utils import status as status_lib
from ml_metadata.proto import metadata_store_pb2
ObserverFn = Callable[[Event], None]
_event_observer = None
_event_observer_lock = threading.Lock()
The provided code snippet includes necessary dependencies for implementing the `register_observer` function. Write a Python function `def register_observer(observer_fn: ObserverFn) -> None` to solve the following problem:
Register an observer. Registers an observer. The observer function will be called whenever an event triggers. Silently does nothing if not in an init() context. Args: observer_fn: A function that takes in an Event.
Here is the function:
def register_observer(observer_fn: ObserverFn) -> None:
"""Register an observer.
Registers an observer. The observer function will be called whenever an event
triggers.
Silently does nothing if not in an init() context.
Args:
observer_fn: A function that takes in an Event.
"""
global _event_observer
global _event_observer_lock
with _event_observer_lock:
if _event_observer:
_event_observer.register_observer(observer_fn) | Register an observer. Registers an observer. The observer function will be called whenever an event triggers. Silently does nothing if not in an init() context. Args: observer_fn: A function that takes in an Event. |
166,314 | from concurrent import futures
import contextlib
import dataclasses
import queue
import threading
from typing import Any, Callable, List, Optional, Union
from absl import logging
from tfx.orchestration.experimental.core import task as task_lib
from tfx.utils import status as status_lib
from ml_metadata.proto import metadata_store_pb2
_event_observer = None
_event_observer_lock = threading.Lock()
def testonly_wait() -> None:
global _event_observer
global _event_observer_lock
with _event_observer_lock:
if not _event_observer:
raise RuntimeError(
"testonly_wait should only be called in an active init() context")
_event_observer.testonly_wait() | null |
166,315 | import builtins
import html
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union
from tfx.dsl.components.base.base_component import BaseComponent
from tfx.orchestration.experimental.interactive.execution_result import ExecutionResult
from tfx.types.artifact import Artifact
from tfx.types.channel import Channel
class NotebookFormatter:
"""Formats a TFX component in the context of an interactive notebook."""
_DEFAULT_TITLE_FORMAT = ('<span class="class-name">%s</span>',
['__class__.__name__'])
def __init__(
self,
cls: Type[Any],
attributes: Optional[List[str]] = None,
title_format: Optional[
Tuple[str, List[Union[str, Callable[..., Any]]]]] = None,
_show_artifact_attributes: Optional[bool] = False):
"""Constructs a NotebookFormatter.
Args:
cls: The TFX class to be formated by this NotebookFormatter instance.
attributes: A list of string attributes that are to be displayed by this
formatter. Can be a nested field specifier with nested attribute names
separated by "." (e.g. to get `obj.a.b`, specify the attribute string
"a.b").
title_format: A 2-tuple consisting of (1) a format string and (2) a list
of either string attribute names (possible of nested field specifiers as
in "attributes" above) or callback callable objects taking as input the
object to be formatted and returning the value for that position of the
format string. If not specified, the default title format will be used.
_show_artifact_attributes: For a formatter of an Artifact object, show
the Artifact type-specific properties for each artifact.
"""
self.cls = cls
self.attributes = attributes or []
self.title_format = title_format or NotebookFormatter._DEFAULT_TITLE_FORMAT
self._show_artifact_attributes = _show_artifact_attributes
def _extended_getattr(self, obj: object, property_name: str) -> object:
"""Get a possibly nested attribute of a given object."""
if callable(property_name):
return property_name(obj)
parts = property_name.split('.')
current = obj
for part in parts:
current = getattr(current, part)
return current
def render(
self,
obj: Any,
expanded: bool = True,
seen_elements: Optional[Set[Any]] = None) -> str:
"""Render a given object as an HTML string.
Args:
obj: The object to be rendered.
expanded: Whether the object is to be expanded by default.
seen_elements: Optionally, a set of seen elements to not re-render to
prevent a rendering cycle.
Returns:
Formatted HTML string representing the object, for notebook display.
"""
seen_elements = seen_elements or set()
if id(obj) in seen_elements:
return '(recursion in rendering object)'
seen_elements.add(id(obj))
if not isinstance(obj, self.cls):
raise ValueError('Expected object of type %s but got %s.' %
(self.cls, obj))
seen_elements.remove(id(obj))
return STATIC_HTML_CONTENTS + (
'<div class="tfx-object%s">'
'<div class = "title" onclick="toggleTfxObject(this)">'
'<span class="expansion-marker"></span>'
'%s<span class="deemphasize"> at 0x%x</span></div>%s'
'</div>') % (' expanded' if expanded else ' collapsed',
self.render_title(obj), id(obj),
self.render_attributes(obj, seen_elements))
def render_title(self, obj: object) -> str:
"""Render the title section of an object."""
title_format = self.title_format
values = []
for property_name in title_format[1]:
values.append(self._extended_getattr(obj, property_name))
return title_format[0] % tuple(values)
def render_value(self, value: Any, seen_elements: Set[Any]) -> str:
"""Render the value section of an object."""
formatted_value = html.escape(str(value))
if isinstance(value, dict):
formatted_value = self.render_dict(value, seen_elements)
if isinstance(value, list):
formatted_value = self.render_list(value, seen_elements)
if not issubclass(value.__class__, type):
# Metaclass does not have mro().
for cls in value.__class__.mro():
if cls in FORMATTER_REGISTRY:
formatted_value = FORMATTER_REGISTRY[cls].render(
value, expanded=False, seen_elements=seen_elements)
break
return formatted_value
def render_attributes(self, obj: Any, seen_elements: Set[Any]) -> str:
"""Render the attributes section of an object."""
if self._show_artifact_attributes and isinstance(obj, Artifact):
artifact_attributes = sorted((obj.PROPERTIES or {}).keys())
attributes = self.attributes + artifact_attributes
else:
attributes = self.attributes
attr_trs = []
for property_name in attributes:
value = self._extended_getattr(obj, property_name)
value = self.render_value(value, seen_elements)
attr_trs.append(
('<tr><td class="attr-name">.%s</td>'
'<td class = "attrvalue">%s</td></tr>') % (property_name, value))
return '<table class="attr-table">%s</table>' % ''.join(attr_trs)
def render_dict(self, obj: Dict[Any, Any], seen_elements: Set[Any]) -> str:
"""Render a dictionary table."""
if not obj:
return '{}'
attr_trs = []
for key, value in obj.items():
value = self.render_value(value, seen_elements)
attr_trs.append(('<tr><td class="attr-name">[%r]</td>'
'<td class = "attrvalue">%s</td></tr>') %
(html.escape(str(key)), value))
return '<table class="attr-table">%s</table>' % ''.join(attr_trs)
def render_list(self, obj: List[Any], seen_elements: Set[Any]) -> str:
"""Render a list table."""
if not obj:
return '[]'
attr_trs = []
for i, value in enumerate(obj):
value = self.render_value(value, seen_elements)
attr_trs.append(('<tr><td class="attr-name">[%d]</td>'
'<td class = "attrvalue">%s</td></tr>') % (i, value))
return '<table class="attr-table">%s</table>' % ''.join(attr_trs)
def _create_formatters(formatters_spec):
result = {}
for cls, kwargs in formatters_spec.items():
formatter = NotebookFormatter(cls, **kwargs)
result[cls] = formatter
return result | null |
166,316 | import builtins
import html
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union
from tfx.dsl.components.base.base_component import BaseComponent
from tfx.orchestration.experimental.interactive.execution_result import ExecutionResult
from tfx.types.artifact import Artifact
from tfx.types.channel import Channel
FORMATTER_REGISTRY = _create_formatters({
Artifact: {
'attributes': ['type', 'uri'],
'_show_artifact_attributes': True,
'title_format': (('<span class="class-name">Artifact</span> of type '
'<span class="class-name">%r</span> (uri: %s)'),
['type_name', 'uri']),
},
BaseComponent: {
'attributes': ['inputs', 'outputs', 'exec_properties']
},
Channel: {
'attributes': ['type_name', '_artifacts'],
'title_format': (
('<span class="class-name">Channel</span> of type '
'<span class="class-name">%r</span> (%d artifact%s)'),
[
'type_name',
lambda o: len(o._artifacts), # pylint: disable=protected-access
lambda o: '' if len(o._artifacts) == 1 else 's' # pylint: disable=protected-access
]),
},
ExecutionResult: {
'attributes': [
'execution_id', 'component', 'component.inputs', 'component.outputs'
]
},
})
The provided code snippet includes necessary dependencies for implementing the `register_formatters` function. Write a Python function `def register_formatters()` to solve the following problem:
Register HTML notebook formatters for TFX classes. This method registers HTML formatters for TFX classes for display in IPython / Jupyter / Colab notebooks. No action will be performed if called outside a notebook environment.
Here is the function:
def register_formatters():
"""Register HTML notebook formatters for TFX classes.
This method registers HTML formatters for TFX classes for display in
IPython / Jupyter / Colab notebooks. No action will be performed if called
outside a notebook environment.
"""
if getattr(builtins, '__IPYTHON__', None):
# Skip registration if (1) IPython is not installed or (2) if IPython is
# installed but we are not running in the notebook context (in this case,
# get_ipython() returns None).
try:
ipython = __import__('IPython.core.getipython').get_ipython()
if not ipython:
return
except ImportError:
return
html_formatter = ipython.display_formatter.formatters['text/html']
for cls, formatter in FORMATTER_REGISTRY.items():
html_formatter.for_type(cls, formatter.render) | Register HTML notebook formatters for TFX classes. This method registers HTML formatters for TFX classes for display in IPython / Jupyter / Colab notebooks. No action will be performed if called outside a notebook environment. |
166,317 | from IPython.core.magic import cell_magic
from IPython.core.magic import Magics
from IPython.core.magic import magics_class
class SkipMagics(Magics):
def skip_for_export(self, line, cell):
# Execute the cell normally for now. During export to pipeline, this cell
# will be skipped.
self.shell.run_cell(cell)
print('This cell will be skipped during export to pipeline.')
def load_ipython_extension(ipython):
ipython.register_magics(SkipMagics) | null |
166,318 | import builtins
import functools
from absl import logging
The provided code snippet includes necessary dependencies for implementing the `requires_ipython` function. Write a Python function `def requires_ipython(fn)` to solve the following problem:
Decorator for methods that can only be run in IPython.
Here is the function:
def requires_ipython(fn):
"""Decorator for methods that can only be run in IPython."""
@functools.wraps(fn)
def run_if_ipython(*args, **kwargs):
"""Invokes `fn` if called from IPython, otherwise just emits a warning."""
if getattr(builtins, '__IPYTHON__', None):
# __IPYTHON__ variable is set by IPython, see
# https://ipython.org/ipython-doc/rel-0.10.2/html/interactive/reference.html#embedding-ipython.
return fn(*args, **kwargs)
else:
logging.warning(
'Method "%s" is a no-op when invoked outside of IPython.',
fn.__name__)
return run_if_ipython | Decorator for methods that can only be run in IPython. |
166,319 | import os
import tensorflow_data_validation as tfdv
import tensorflow_model_analysis as tfma
from tfx import types
from tfx.components.statistics_gen import stats_artifact_utils
from tfx.orchestration.experimental.interactive import visualizations
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.utils import io_utils
from tensorflow_metadata.proto.v0 import anomalies_pb2
STANDARD_VISUALIZATIONS = frozenset([
ExampleAnomaliesVisualization,
ExampleStatisticsVisualization,
ModelEvaluationVisualization,
SchemaVisualization,
])
def register_standard_visualizations():
for visualization in STANDARD_VISUALIZATIONS:
visualizations.get_registry().register(visualization) | null |
166,320 | import hashlib
import os
from typing import Any, Callable, Dict, Iterable, List, Mapping, Optional, Sequence, Set, Tuple, Union
from absl import logging
import apache_beam as beam
import pyarrow as pa
import tensorflow as tf
import tensorflow_data_validation as tfdv
import tensorflow_transform as tft
from tensorflow_transform import impl_helper
import tensorflow_transform.beam as tft_beam
from tensorflow_transform.beam import analyzer_cache
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import metadata_io
from tensorflow_transform.tf_metadata import schema_utils
from tfx import types
from tfx.components.transform import executor_utils
from tfx.components.transform import labels
from tfx.components.transform import stats_options_util
from tfx.components.util import examples_utils
from tfx.components.util import udf_utils
from tfx.components.util import value_utils
from tfx.components.util import tfxio_utils
from tfx.dsl.components.base import base_beam_executor
from tfx.dsl.components.base import base_executor
from tfx.dsl.io import fileio
from tfx.proto import example_gen_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
import tfx_bsl
from tfx_bsl.tfxio import tfxio as tfxio_module
from google.protobuf import text_format
from tensorflow_metadata.proto.v0 import anomalies_pb2
from tensorflow_metadata.proto.v0 import schema_pb2
The provided code snippet includes necessary dependencies for implementing the `_InvokeStatsOptionsUpdaterFn` function. Write a Python function `def _InvokeStatsOptionsUpdaterFn( stats_options_updater_fn: Callable[ [stats_options_util.StatsType, tfdv.StatsOptions], tfdv.StatsOptions], stats_type: stats_options_util.StatsType, schema: Optional[schema_pb2.Schema] = None, asset_map: Optional[Dict[str, str]] = None, transform_output_path: Optional[str] = None) -> tfdv.StatsOptions` to solve the following problem:
Invokes the provided stats_options_updater_fn. Args: stats_options_updater_fn: The function to call. stats_type: The stats_type use in the function call. schema: The input schema to use in the function call. asset_map: A dictionary containing key to filename mappings. transform_output_path: The path to the transform output. Returns: The updated tfdv.StatsOptions.
Here is the function:
def _InvokeStatsOptionsUpdaterFn(
stats_options_updater_fn: Callable[
[stats_options_util.StatsType, tfdv.StatsOptions], tfdv.StatsOptions],
stats_type: stats_options_util.StatsType,
schema: Optional[schema_pb2.Schema] = None,
asset_map: Optional[Dict[str, str]] = None,
transform_output_path: Optional[str] = None) -> tfdv.StatsOptions:
"""Invokes the provided stats_options_updater_fn.
Args:
stats_options_updater_fn: The function to call.
stats_type: The stats_type use in the function call.
schema: The input schema to use in the function call.
asset_map: A dictionary containing key to filename mappings.
transform_output_path: The path to the transform output.
Returns:
The updated tfdv.StatsOptions.
"""
options = {}
if schema is not None:
schema_copy = schema_pb2.Schema()
schema_copy.CopyFrom(schema)
options['schema'] = schema_copy
if asset_map is not None:
asset_path = os.path.join(transform_output_path, 'transform_fn',
tf.saved_model.ASSETS_DIRECTORY)
vocab_paths = {k: os.path.join(asset_path, v) for k, v in asset_map.items()}
options['vocab_paths'] = vocab_paths
options['experimental_use_sketch_based_topk_uniques'] = True
return stats_options_updater_fn(stats_type, tfdv.StatsOptions(**options)) | Invokes the provided stats_options_updater_fn. Args: stats_options_updater_fn: The function to call. stats_type: The stats_type use in the function call. schema: The input schema to use in the function call. asset_map: A dictionary containing key to filename mappings. transform_output_path: The path to the transform output. Returns: The updated tfdv.StatsOptions. |
166,321 | import hashlib
import os
from typing import Any, Callable, Dict, Iterable, List, Mapping, Optional, Sequence, Set, Tuple, Union
from absl import logging
import apache_beam as beam
import pyarrow as pa
import tensorflow as tf
import tensorflow_data_validation as tfdv
import tensorflow_transform as tft
from tensorflow_transform import impl_helper
import tensorflow_transform.beam as tft_beam
from tensorflow_transform.beam import analyzer_cache
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import metadata_io
from tensorflow_transform.tf_metadata import schema_utils
from tfx import types
from tfx.components.transform import executor_utils
from tfx.components.transform import labels
from tfx.components.transform import stats_options_util
from tfx.components.util import examples_utils
from tfx.components.util import udf_utils
from tfx.components.util import value_utils
from tfx.components.util import tfxio_utils
from tfx.dsl.components.base import base_beam_executor
from tfx.dsl.components.base import base_executor
from tfx.dsl.io import fileio
from tfx.proto import example_gen_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
import tfx_bsl
from tfx_bsl.tfxio import tfxio as tfxio_module
from google.protobuf import text_format
from tensorflow_metadata.proto.v0 import anomalies_pb2
from tensorflow_metadata.proto.v0 import schema_pb2
_TRANSFORM_INTERNAL_FEATURE_FOR_KEY = '__TFT_PASS_KEY__'
The provided code snippet includes necessary dependencies for implementing the `_FilterInternalColumn` function. Write a Python function `def _FilterInternalColumn( record_batch: pa.RecordBatch, internal_column_index: Optional[int] = None) -> pa.RecordBatch` to solve the following problem:
Returns shallow copy of a RecordBatch with internal column removed.
Here is the function:
def _FilterInternalColumn(
record_batch: pa.RecordBatch,
internal_column_index: Optional[int] = None) -> pa.RecordBatch:
"""Returns shallow copy of a RecordBatch with internal column removed."""
if (internal_column_index is None and
_TRANSFORM_INTERNAL_FEATURE_FOR_KEY not in record_batch.schema.names):
return record_batch
else:
internal_column_index = (
internal_column_index or
record_batch.schema.names.index(_TRANSFORM_INTERNAL_FEATURE_FOR_KEY))
# Making shallow copy since input modification is not allowed.
filtered_columns = list(record_batch.columns)
filtered_columns.pop(internal_column_index)
filtered_schema = record_batch.schema.remove(internal_column_index)
return pa.RecordBatch.from_arrays(filtered_columns, schema=filtered_schema) | Returns shallow copy of a RecordBatch with internal column removed. |
166,322 | import hashlib
import os
from typing import Any, Callable, Dict, Iterable, List, Mapping, Optional, Sequence, Set, Tuple, Union
from absl import logging
import apache_beam as beam
import pyarrow as pa
import tensorflow as tf
import tensorflow_data_validation as tfdv
import tensorflow_transform as tft
from tensorflow_transform import impl_helper
import tensorflow_transform.beam as tft_beam
from tensorflow_transform.beam import analyzer_cache
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import metadata_io
from tensorflow_transform.tf_metadata import schema_utils
from tfx import types
from tfx.components.transform import executor_utils
from tfx.components.transform import labels
from tfx.components.transform import stats_options_util
from tfx.components.util import examples_utils
from tfx.components.util import udf_utils
from tfx.components.util import value_utils
from tfx.components.util import tfxio_utils
from tfx.dsl.components.base import base_beam_executor
from tfx.dsl.components.base import base_executor
from tfx.dsl.io import fileio
from tfx.proto import example_gen_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
import tfx_bsl
from tfx_bsl.tfxio import tfxio as tfxio_module
from google.protobuf import text_format
from tensorflow_metadata.proto.v0 import anomalies_pb2
from tensorflow_metadata.proto.v0 import schema_pb2
The provided code snippet includes necessary dependencies for implementing the `_GetCacheableDatasetsCount` function. Write a Python function `def _GetCacheableDatasetsCount(num_analyzers: int, stats_enabled: bool) -> int` to solve the following problem:
Returns the number of datasests which should be cached based on heuristic. We allow pipelines with a small amount of analyzers (0-50) to cache many datasets, and restrict it further as the number of analyzers increases, so that a pipeline which has hundreds of analyzers can only cache a few datasets. If a pipeline does not enable statistics generation then we estimate that it can cache double the amount of datasets since it will have a smaller graph representation. See go/tft-incremental-cache-design. Args: num_analyzers: The number of cacheable analyzers in the TFT pipeline. stats_enabled: Whether or not pre/post transform statistics are enabled in this pipeline. Returns: The number of datasets that this pipeline should compute cache for.
Here is the function:
def _GetCacheableDatasetsCount(num_analyzers: int, stats_enabled: bool) -> int:
"""Returns the number of datasests which should be cached based on heuristic.
We allow pipelines with a small amount of analyzers (0-50) to cache many
datasets, and restrict it further as the number of analyzers increases, so
that a pipeline which has hundreds of analyzers can only cache a few datasets.
If a pipeline does not enable statistics generation then we estimate that it
can cache double the amount of datasets since it will have a smaller graph
representation. See go/tft-incremental-cache-design.
Args:
num_analyzers: The number of cacheable analyzers in the TFT pipeline.
stats_enabled: Whether or not pre/post transform statistics are enabled in
this pipeline.
Returns:
The number of datasets that this pipeline should compute cache for.
"""
result = 0
if num_analyzers <= 10:
result = 100
elif num_analyzers <= 50:
result = 50
elif num_analyzers <= 100:
result = 10
elif num_analyzers <= 150:
result = 6
elif num_analyzers <= 200:
result = 4
elif num_analyzers <= 300:
result = 2
else:
result = 1
if not stats_enabled:
result = result * 2
return result | Returns the number of datasests which should be cached based on heuristic. We allow pipelines with a small amount of analyzers (0-50) to cache many datasets, and restrict it further as the number of analyzers increases, so that a pipeline which has hundreds of analyzers can only cache a few datasets. If a pipeline does not enable statistics generation then we estimate that it can cache double the amount of datasets since it will have a smaller graph representation. See go/tft-incremental-cache-design. Args: num_analyzers: The number of cacheable analyzers in the TFT pipeline. stats_enabled: Whether or not pre/post transform statistics are enabled in this pipeline. Returns: The number of datasets that this pipeline should compute cache for. |
166,323 | import argparse
from typing import List, Tuple
import absl
from absl import app
from absl.flags import argparse_flags
import apache_beam as beam
from tfx.components.transform import executor
from tfx.components.transform import labels
from tfx.components.util import udf_utils
from tfx.proto import example_gen_pb2
from tfx.types import standard_component_specs
The provided code snippet includes necessary dependencies for implementing the `_run_transform` function. Write a Python function `def _run_transform(args, beam_pipeline_args)` to solve the following problem:
Construct and run transform executor.
Here is the function:
def _run_transform(args, beam_pipeline_args):
"""Construct and run transform executor."""
absl.logging.set_verbosity(absl.logging.INFO)
def make_beam_pipeline():
return beam.Pipeline(beam_pipeline_args)
preprocessing_fn = udf_utils.get_fn(
{
standard_component_specs.PREPROCESSING_FN_KEY:
args.preprocessing_fn_path
}, standard_component_specs.PREPROCESSING_FN_KEY)
inputs = {
labels.ANALYZE_DATA_PATHS_LABEL:
args.analyze_examples,
labels.ANALYZE_PATHS_FILE_FORMATS_LABEL: [labels.FORMAT_TFRECORD] *
len(args.analyze_examples),
labels.TRANSFORM_DATA_PATHS_LABEL: [
args.analyze_examples + args.transform_only_examples
],
labels.TRANSFORM_PATHS_FILE_FORMATS_LABEL:
[labels.FORMAT_TFRECORD] *
(len(args.analyze_examples) + len(args.transform_only_examples)),
labels.SCHEMA_PATH_LABEL:
args.input_schema_path,
labels.PREPROCESSING_FN:
preprocessing_fn,
labels.EXAMPLES_DATA_FORMAT_LABEL:
example_gen_pb2.PayloadFormat.Value(args.example_data_format),
labels.DISABLE_STATISTICS_LABEL:
args.disable_statistics,
labels.MAKE_BEAM_PIPELINE_FN:
make_beam_pipeline,
}
outputs = {
labels.TRANSFORM_METADATA_OUTPUT_PATH_LABEL: args.transform_fn,
labels.TRANSFORM_MATERIALIZE_OUTPUT_PATHS_LABEL: (
args.transformed_examples),
labels.PER_SET_STATS_OUTPUT_PATHS_LABEL: (args.per_set_stats_outputs),
labels.TEMP_OUTPUT_LABEL: args.tmp_location,
}
executor.TransformProcessor().Transform(inputs, outputs, args.status_file) | Construct and run transform executor. |
166,324 | import argparse
from typing import List, Tuple
import absl
from absl import app
from absl.flags import argparse_flags
import apache_beam as beam
from tfx.components.transform import executor
from tfx.components.transform import labels
from tfx.components.util import udf_utils
from tfx.proto import example_gen_pb2
from tfx.types import standard_component_specs
The provided code snippet includes necessary dependencies for implementing the `_parse_flags` function. Write a Python function `def _parse_flags(argv: List[str]) -> Tuple[argparse.Namespace, List[str]]` to solve the following problem:
Command lines flag parsing.
Here is the function:
def _parse_flags(argv: List[str]) -> Tuple[argparse.Namespace, List[str]]:
"""Command lines flag parsing."""
parser = argparse_flags.ArgumentParser()
# Arguments in inputs
parser.add_argument(
'--input_schema_path',
type=str,
required=True,
help='Path to input schema')
parser.add_argument(
'--preprocessing_fn_path',
type=str,
default='',
required=True,
help='Path to a preprocessing_fn module')
parser.add_argument(
'--use_tfdv',
type=bool,
default=True,
help='Deprecated and ignored. DO NOT SET.')
parser.add_argument(
'--disable_statistics',
type=bool,
default=False,
help='Whether to disable statistics')
parser.add_argument(
'--analyze_examples',
nargs='+',
default='',
type=str,
help='A space-separated list of paths to examples to be analyzed '
'and transformed')
parser.add_argument(
'--transform_only_examples',
nargs='+',
default='',
type=str,
help='A space-separated list of paths to examples to be transformed only')
parser.add_argument(
'--example_data_format',
type=str,
default=example_gen_pb2.PayloadFormat.Name(
example_gen_pb2.FORMAT_TF_EXAMPLE),
help='Example data format')
# Arguments in outputs
parser.add_argument(
'--transform_fn',
type=str,
required=True,
help='Path that TFTransformOutput will write to')
parser.add_argument(
'--tmp_location',
type=str,
required=True,
help='Path to write temporary files. Executor does not own this '
'directory. User or caller is responsible for cleanup')
parser.add_argument(
'--transformed_examples',
nargs='+',
type=str,
default=[],
help='A space-separated list of paths to write transformed examples')
parser.add_argument(
'--per_set_stats_outputs',
nargs='+',
type=str,
default=[],
help='Paths to statistics output')
parser.add_argument(
'--status_file', type=str, default='', help='Path to write status')
return parser.parse_known_args(argv) | Command lines flag parsing. |
166,325 | import functools
import os
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence
from absl import logging
from tfx import types
from tfx.components.transform import labels
from tfx.components.util import value_utils
from tfx.proto import transform_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import json_utils
from tfx.utils import proto_utils
def MaybeBindCustomConfig(inputs: Mapping[str, Any],
fn: Any) -> Callable[..., Any]:
# For compatibility, only bind custom config if it's in the signature.
if value_utils.FunctionHasArg(fn, labels.CUSTOM_CONFIG):
custom_config_json = value_utils.GetSoleValue(inputs, labels.CUSTOM_CONFIG)
custom_config = (json_utils.loads(custom_config_json)
if custom_config_json else {}) or {}
fn = functools.partial(fn, custom_config=custom_config)
return fn | null |
166,326 | import functools
import os
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence
from absl import logging
from tfx import types
from tfx.components.transform import labels
from tfx.components.util import value_utils
from tfx.proto import transform_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import json_utils
from tfx.utils import proto_utils
The provided code snippet includes necessary dependencies for implementing the `ValidateOnlyOneSpecified` function. Write a Python function `def ValidateOnlyOneSpecified(inputs: Mapping[str, Any], keys: Sequence[str], allow_missing: bool = False) -> bool` to solve the following problem:
Check whether only one of given keys are specified in the input. NOTE: False-equivalent values like 0, '' are regarded as not specified. Args: inputs: input dictionary. keys: keys to check the existence of values. allow_missing: If False, one of keys should be set in inputs. Returns: True if one of the key has a value. Raises: ValueError: if none of the keys have non empty value in the input.
Here is the function:
def ValidateOnlyOneSpecified(inputs: Mapping[str, Any],
keys: Sequence[str],
allow_missing: bool = False) -> bool:
"""Check whether only one of given keys are specified in the input.
NOTE: False-equivalent values like 0, '' are regarded as not specified.
Args:
inputs: input dictionary.
keys: keys to check the existence of values.
allow_missing: If False, one of keys should be set in inputs.
Returns:
True if one of the key has a value.
Raises:
ValueError: if none of the keys have non empty value in the input.
"""
counter = 0
for key in keys:
counter += int(bool(value_utils.GetSoleValue(inputs, key, strict=False)))
keys_str = ', '.join(keys)
if counter > 1:
raise ValueError(
f'At most one of {keys_str} should be supplied in the input.')
elif counter == 0 and not allow_missing:
raise ValueError(f'One of {keys_str} should be supplied in the input.')
return counter > 0 | Check whether only one of given keys are specified in the input. NOTE: False-equivalent values like 0, '' are regarded as not specified. Args: inputs: input dictionary. keys: keys to check the existence of values. allow_missing: If False, one of keys should be set in inputs. Returns: True if one of the key has a value. Raises: ValueError: if none of the keys have non empty value in the input. |
166,327 | import functools
import os
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence
from absl import logging
from tfx import types
from tfx.components.transform import labels
from tfx.components.util import value_utils
from tfx.proto import transform_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import json_utils
from tfx.utils import proto_utils
The provided code snippet includes necessary dependencies for implementing the `MatchNumberOfTransformedExamplesArtifacts` function. Write a Python function `def MatchNumberOfTransformedExamplesArtifacts( input_dict: Dict[str, List[types.Artifact]], output_dict: Dict[str, List[types.Artifact]]) -> None` to solve the following problem:
Alters output_dict to have the same number of examples to input. If there are multiple input Examples artifacts, replicate Examples artifact in output_dict to have the same number of artifacts. The resulting artifact will have URIs that is located under the original output uri. No-op if there is one or less Examples artifact in the input_dict. Args: input_dict: input artifact dictionary of the Executor. output_dict: output artifact dictionary of the Executor.
Here is the function:
def MatchNumberOfTransformedExamplesArtifacts(
input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]]) -> None:
"""Alters output_dict to have the same number of examples to input.
If there are multiple input Examples artifacts, replicate Examples artifact
in output_dict to have the same number of artifacts. The resulting artifact
will have URIs that is located under the original output uri.
No-op if there is one or less Examples artifact in the input_dict.
Args:
input_dict: input artifact dictionary of the Executor.
output_dict: output artifact dictionary of the Executor.
"""
num_examples = len(input_dict[standard_component_specs.EXAMPLES_KEY])
if (num_examples > 1 and
standard_component_specs.TRANSFORMED_EXAMPLES_KEY in output_dict and
len(output_dict[standard_component_specs.TRANSFORMED_EXAMPLES_KEY]) == 1):
output_dict[standard_component_specs
.TRANSFORMED_EXAMPLES_KEY] = artifact_utils.replicate_artifacts(
output_dict[
standard_component_specs.TRANSFORMED_EXAMPLES_KEY][0],
num_examples) | Alters output_dict to have the same number of examples to input. If there are multiple input Examples artifacts, replicate Examples artifact in output_dict to have the same number of artifacts. The resulting artifact will have URIs that is located under the original output uri. No-op if there is one or less Examples artifact in the input_dict. Args: input_dict: input artifact dictionary of the Executor. output_dict: output artifact dictionary of the Executor. |
166,328 | import functools
import os
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence
from absl import logging
from tfx import types
from tfx.components.transform import labels
from tfx.components.util import value_utils
from tfx.proto import transform_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import json_utils
from tfx.utils import proto_utils
The provided code snippet includes necessary dependencies for implementing the `ResolveSplitsConfig` function. Write a Python function `def ResolveSplitsConfig( splits_config_str: Optional[str], examples: List[types.Artifact]) -> transform_pb2.SplitsConfig` to solve the following problem:
Resolve SplitsConfig proto for the transfrom request.
Here is the function:
def ResolveSplitsConfig(
splits_config_str: Optional[str],
examples: List[types.Artifact]) -> transform_pb2.SplitsConfig:
"""Resolve SplitsConfig proto for the transfrom request."""
result = transform_pb2.SplitsConfig()
if splits_config_str:
proto_utils.json_to_proto(splits_config_str, result)
if not result.analyze:
raise ValueError('analyze cannot be empty when splits_config is set.')
return result
result.analyze.append('train')
# All input artifacts should have the same set of split names.
split_names = set(artifact_utils.decode_split_names(examples[0].split_names))
for artifact in examples:
artifact_split_names = set(
artifact_utils.decode_split_names(artifact.split_names))
if split_names != artifact_split_names:
raise ValueError(
'Not all input artifacts have the same split names: (%s, %s)' %
(split_names, artifact_split_names))
result.transform.extend(split_names)
logging.info("Analyze the 'train' split and transform all splits when "
'splits_config is not set.')
return result | Resolve SplitsConfig proto for the transfrom request. |
166,329 | import functools
import os
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence
from absl import logging
from tfx import types
from tfx.components.transform import labels
from tfx.components.util import value_utils
from tfx.proto import transform_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import json_utils
from tfx.utils import proto_utils
The provided code snippet includes necessary dependencies for implementing the `SetSplitNames` function. Write a Python function `def SetSplitNames( splits: Sequence[str], transformed_examples: Optional[List[types.Artifact]]) -> None` to solve the following problem:
Sets split_names property of input artifacts.
Here is the function:
def SetSplitNames(
splits: Sequence[str],
transformed_examples: Optional[List[types.Artifact]]) -> None:
"""Sets split_names property of input artifacts."""
if not transformed_examples:
return
for artifact in transformed_examples:
artifact.split_names = artifact_utils.encode_split_names(list(splits)) | Sets split_names property of input artifacts. |
166,330 | import functools
import os
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence
from absl import logging
from tfx import types
from tfx.components.transform import labels
from tfx.components.util import value_utils
from tfx.proto import transform_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import json_utils
from tfx.utils import proto_utils
_DEFAULT_TRANSFORMED_EXAMPLES_PREFIX = 'transformed_examples'
The provided code snippet includes necessary dependencies for implementing the `GetSplitPaths` function. Write a Python function `def GetSplitPaths( transformed_examples: Optional[List[types.Artifact]]) -> List[str]` to solve the following problem:
Gets all paths for splits in the input artifacts.
Here is the function:
def GetSplitPaths(
transformed_examples: Optional[List[types.Artifact]]) -> List[str]:
"""Gets all paths for splits in the input artifacts."""
result = []
if not transformed_examples:
return result
splits = artifact_utils.decode_split_names(
transformed_examples[0].split_names)
for split in splits:
transformed_example_uris = artifact_utils.get_split_uris(
transformed_examples, split)
for output_uri in transformed_example_uris:
result.append(
os.path.join(output_uri, _DEFAULT_TRANSFORMED_EXAMPLES_PREFIX))
return result | Gets all paths for splits in the input artifacts. |
166,331 | import functools
import os
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence
from absl import logging
from tfx import types
from tfx.components.transform import labels
from tfx.components.util import value_utils
from tfx.proto import transform_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import json_utils
from tfx.utils import proto_utils
The provided code snippet includes necessary dependencies for implementing the `GetCachePathEntry` function. Write a Python function `def GetCachePathEntry( label: str, params_dict: Dict[str, List[types.Artifact]]) -> Dict[str, str]` to solve the following problem:
Returns a cachePath entry if label exists in params_dict.
Here is the function:
def GetCachePathEntry(
label: str, params_dict: Dict[str, List[types.Artifact]]) -> Dict[str, str]:
"""Returns a cachePath entry if label exists in params_dict."""
# Covers the cases: path wasn't provided, or was provided an empty list.
if not params_dict.get(label):
return {}
if label == standard_component_specs.ANALYZER_CACHE_KEY:
dict_key = labels.CACHE_INPUT_PATH_LABEL
elif label == standard_component_specs.UPDATED_ANALYZER_CACHE_KEY:
dict_key = labels.CACHE_OUTPUT_PATH_LABEL
return {dict_key: artifact_utils.get_single_uri(params_dict[label])} | Returns a cachePath entry if label exists in params_dict. |
166,332 | import functools
import os
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence
from absl import logging
from tfx import types
from tfx.components.transform import labels
from tfx.components.util import value_utils
from tfx.proto import transform_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import json_utils
from tfx.utils import proto_utils
The provided code snippet includes necessary dependencies for implementing the `GetStatsOutputPathEntries` function. Write a Python function `def GetStatsOutputPathEntries( disable_statistics: bool, output_dict: Dict[str, List[types.Artifact]]) -> Dict[str, str]` to solve the following problem:
Returns output entries for stats output path.
Here is the function:
def GetStatsOutputPathEntries(
disable_statistics: bool,
output_dict: Dict[str, List[types.Artifact]]) -> Dict[str, str]:
"""Returns output entries for stats output path."""
label_component_key_list = [
(labels.PRE_TRANSFORM_OUTPUT_STATS_PATH_LABEL,
standard_component_specs.PRE_TRANSFORM_STATS_KEY),
(labels.PRE_TRANSFORM_OUTPUT_SCHEMA_PATH_LABEL,
standard_component_specs.PRE_TRANSFORM_SCHEMA_KEY),
(labels.POST_TRANSFORM_OUTPUT_ANOMALIES_PATH_LABEL,
standard_component_specs.POST_TRANSFORM_ANOMALIES_KEY),
(labels.POST_TRANSFORM_OUTPUT_STATS_PATH_LABEL,
standard_component_specs.POST_TRANSFORM_STATS_KEY),
(labels.POST_TRANSFORM_OUTPUT_SCHEMA_PATH_LABEL,
standard_component_specs.POST_TRANSFORM_SCHEMA_KEY)
]
result = {}
if not disable_statistics:
for label, component_key in label_component_key_list:
if component_key in output_dict:
result[label] = artifact_utils.get_single_uri(
output_dict[component_key])
if result and len(result) != len(label_component_key_list):
raise ValueError(
'Either all stats_output_paths should be specified or none.')
return result | Returns output entries for stats output path. |
166,333 | import os
from typing import Any, Dict
from absl import logging
import tensorflow_data_validation as tfdv
from tensorflow_data_validation.utils import path
from tensorflow_data_validation.utils import schema_util
from tfx import types
from tfx.components.statistics_gen import stats_artifact_utils
from tfx.dsl.components.base import base_executor
from tfx.orchestration.experimental.core import component_generated_alert_pb2
from tfx.orchestration.experimental.core import constants
from tfx.proto import distribution_validator_pb2
from tfx.proto.orchestration import execution_result_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import json_utils
from tfx.utils import monitoring_utils
from tfx.utils import writer_utils
from google.protobuf import any_pb2
from tensorflow_metadata.proto.v0 import anomalies_pb2
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
_COMPARISON_ANOMALY_TYPES = frozenset([
anomalies_pb2.AnomalyInfo.Type.COMPARATOR_CONTROL_DATA_MISSING,
anomalies_pb2.AnomalyInfo.Type.COMPARATOR_TREATMENT_DATA_MISSING,
anomalies_pb2.AnomalyInfo.Type.COMPARATOR_L_INFTY_HIGH,
anomalies_pb2.AnomalyInfo.Type.COMPARATOR_JENSEN_SHANNON_DIVERGENCE_HIGH,
anomalies_pb2.AnomalyInfo.Type.COMPARATOR_LOW_NUM_EXAMPLES,
anomalies_pb2.AnomalyInfo.Type.COMPARATOR_HIGH_NUM_EXAMPLES,
# Any custom validation anomalies generated are passed through, regardless
# of whether those anomalies are generated from multiple datasets.
anomalies_pb2.AnomalyInfo.Type.CUSTOM_VALIDATION,
])
The provided code snippet includes necessary dependencies for implementing the `_get_comparison_only_anomalies` function. Write a Python function `def _get_comparison_only_anomalies( anomalies: anomalies_pb2.Anomalies, ) -> anomalies_pb2.Anomalies` to solve the following problem:
Returns new Anomalies proto with only info from statistics comparison.
Here is the function:
def _get_comparison_only_anomalies(
anomalies: anomalies_pb2.Anomalies,
) -> anomalies_pb2.Anomalies:
"""Returns new Anomalies proto with only info from statistics comparison."""
new_anomalies = anomalies_pb2.Anomalies()
new_anomalies.CopyFrom(anomalies)
for feature in list(new_anomalies.anomaly_info):
# Remove top-level anomaly info description, short_description, and
# diff_regions entries, since we don't have a good way of separating out the
# comparison-related portion from the rest.
new_anomalies.anomaly_info[feature].ClearField('description')
new_anomalies.anomaly_info[feature].ClearField('short_description')
new_anomalies.anomaly_info[feature].ClearField('diff_regions')
reasons_to_keep = [
r
for r in new_anomalies.anomaly_info[feature].reason
if r.type in _COMPARISON_ANOMALY_TYPES
]
del new_anomalies.anomaly_info[feature].reason[:]
new_anomalies.anomaly_info[feature].reason.extend(reasons_to_keep)
# If all of the reasons have been removed, remove the feature's entry from
# anomaly_info altogether.
if not new_anomalies.anomaly_info[feature].reason:
del new_anomalies.anomaly_info[feature]
new_anomalies.dataset_anomaly_info.ClearField('description')
new_anomalies.dataset_anomaly_info.ClearField('short_description')
new_anomalies.dataset_anomaly_info.ClearField('diff_regions')
dataset_reasons_to_keep = [
r
for r in new_anomalies.dataset_anomaly_info.reason
if r.type in _COMPARISON_ANOMALY_TYPES
]
del new_anomalies.dataset_anomaly_info.reason[:]
if dataset_reasons_to_keep:
new_anomalies.dataset_anomaly_info.reason.extend(dataset_reasons_to_keep)
else:
new_anomalies.ClearField('dataset_anomaly_info')
new_anomalies.ClearField('data_missing')
return new_anomalies | Returns new Anomalies proto with only info from statistics comparison. |
166,334 | import os
from typing import Any, Dict
from absl import logging
import tensorflow_data_validation as tfdv
from tensorflow_data_validation.utils import path
from tensorflow_data_validation.utils import schema_util
from tfx import types
from tfx.components.statistics_gen import stats_artifact_utils
from tfx.dsl.components.base import base_executor
from tfx.orchestration.experimental.core import component_generated_alert_pb2
from tfx.orchestration.experimental.core import constants
from tfx.proto import distribution_validator_pb2
from tfx.proto.orchestration import execution_result_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import json_utils
from tfx.utils import monitoring_utils
from tfx.utils import writer_utils
from google.protobuf import any_pb2
from tensorflow_metadata.proto.v0 import anomalies_pb2
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
The provided code snippet includes necessary dependencies for implementing the `_make_schema_from_config` function. Write a Python function `def _make_schema_from_config( config: distribution_validator_pb2.DistributionValidatorConfig, statistics_list: statistics_pb2.DatasetFeatureStatisticsList, ) -> schema_pb2.Schema` to solve the following problem:
Converts a config to a schema that can be used for data validation.
Here is the function:
def _make_schema_from_config(
config: distribution_validator_pb2.DistributionValidatorConfig,
statistics_list: statistics_pb2.DatasetFeatureStatisticsList,
) -> schema_pb2.Schema:
"""Converts a config to a schema that can be used for data validation."""
schema = tfdv.infer_schema(statistics_list)
for feature in config.default_slice_config.feature:
try:
schema_feature = schema_util.get_feature(
schema, path.FeaturePath.from_proto(feature.path)
)
except ValueError:
# Statistics could be missing for features in the config in which case
# they will not be present in the schema. Just continue; an anomaly will
# be raised for such features in the step that looks for missing
# comparisons in the result.
pass
else:
schema_feature.drift_comparator.CopyFrom(feature.distribution_comparator)
if config.default_slice_config.HasField('num_examples_comparator'):
schema.dataset_constraints.num_examples_drift_comparator.CopyFrom(
config.default_slice_config.num_examples_comparator
)
return schema | Converts a config to a schema that can be used for data validation. |
166,335 | import os
from typing import Any, Dict
from absl import logging
import tensorflow_data_validation as tfdv
from tensorflow_data_validation.utils import path
from tensorflow_data_validation.utils import schema_util
from tfx import types
from tfx.components.statistics_gen import stats_artifact_utils
from tfx.dsl.components.base import base_executor
from tfx.orchestration.experimental.core import component_generated_alert_pb2
from tfx.orchestration.experimental.core import constants
from tfx.proto import distribution_validator_pb2
from tfx.proto.orchestration import execution_result_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import json_utils
from tfx.utils import monitoring_utils
from tfx.utils import writer_utils
from google.protobuf import any_pb2
from tensorflow_metadata.proto.v0 import anomalies_pb2
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
The provided code snippet includes necessary dependencies for implementing the `_add_anomalies_for_missing_comparisons` function. Write a Python function `def _add_anomalies_for_missing_comparisons( raw_anomalies: anomalies_pb2.Anomalies, config: distribution_validator_pb2.DistributionValidatorConfig, ) -> anomalies_pb2.Anomalies` to solve the following problem:
Identifies whether comparison could be done on the configured features. If comparison was not done for a configured feature, adds an anomaly flagging that. Args: raw_anomalies: The Anomalies proto to be checked for comparison. config: The config that identifies the features for which distribution validation will be done. Returns: An Anomalies proto with anomalies added for features for which comparisons could not be done.
Here is the function:
def _add_anomalies_for_missing_comparisons(
raw_anomalies: anomalies_pb2.Anomalies,
config: distribution_validator_pb2.DistributionValidatorConfig,
) -> anomalies_pb2.Anomalies:
"""Identifies whether comparison could be done on the configured features.
If comparison was not done for a configured feature, adds an anomaly flagging
that.
Args:
raw_anomalies: The Anomalies proto to be checked for comparison.
config: The config that identifies the features for which distribution
validation will be done.
Returns:
An Anomalies proto with anomalies added for features for which comparisons
could not be done.
"""
compared_features = set(
['.'.join(info.path.step) for info in raw_anomalies.drift_skew_info]
)
anomalies = anomalies_pb2.Anomalies()
anomalies.CopyFrom(raw_anomalies)
anomalies.anomaly_name_format = (
anomalies_pb2.Anomalies.AnomalyNameFormat.SERIALIZED_PATH
)
for feature in config.default_slice_config.feature:
if '.'.join(feature.path.step) in compared_features:
continue
feature_key = '.'.join(feature.path.step)
reason = anomalies.anomaly_info[feature_key].reason.add()
# TODO(b/239734255): Update with new anomaly type.
reason.type = anomalies_pb2.AnomalyInfo.Type.STATS_NOT_AVAILABLE
reason.short_description = 'Comparison could not be done.'
reason.description = (
'Validation could not be done, which could be '
'due to missing data, use of a comparator that is not suitable for the '
'feature type, or some other reason.'
)
anomalies.anomaly_info[feature_key].path.CopyFrom(feature.path)
anomalies.anomaly_info[feature_key].severity = (
anomalies_pb2.AnomalyInfo.Severity.ERROR
)
return anomalies | Identifies whether comparison could be done on the configured features. If comparison was not done for a configured feature, adds an anomaly flagging that. Args: raw_anomalies: The Anomalies proto to be checked for comparison. config: The config that identifies the features for which distribution validation will be done. Returns: An Anomalies proto with anomalies added for features for which comparisons could not be done. |
166,336 | import os
from typing import Any, Dict
from absl import logging
import tensorflow_data_validation as tfdv
from tensorflow_data_validation.utils import path
from tensorflow_data_validation.utils import schema_util
from tfx import types
from tfx.components.statistics_gen import stats_artifact_utils
from tfx.dsl.components.base import base_executor
from tfx.orchestration.experimental.core import component_generated_alert_pb2
from tfx.orchestration.experimental.core import constants
from tfx.proto import distribution_validator_pb2
from tfx.proto.orchestration import execution_result_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import json_utils
from tfx.utils import monitoring_utils
from tfx.utils import writer_utils
from google.protobuf import any_pb2
from tensorflow_metadata.proto.v0 import anomalies_pb2
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
def _generate_alerts_info_proto(
anomaly_info: anomalies_pb2.AnomalyInfo,
split_pair: str
) -> list[component_generated_alert_pb2.ComponentGeneratedAlertInfo]:
"""Generates a list of ComponentGeneratedAlertInfo from AnomalyInfo."""
result = []
for reason in anomaly_info.reason:
result.append(
component_generated_alert_pb2.ComponentGeneratedAlertInfo(
alert_name=(
f'[{split_pair}] {reason.short_description}'
),
alert_body=f'[{split_pair}] {reason.description}',
)
)
return result
The provided code snippet includes necessary dependencies for implementing the `_create_anomalies_alerts` function. Write a Python function `def _create_anomalies_alerts( anomalies: anomalies_pb2.Anomalies, split_pair: str, ) -> list[component_generated_alert_pb2.ComponentGeneratedAlertInfo]` to solve the following problem:
Creates an alert for each anomaly in the anomalies artifact.
Here is the function:
def _create_anomalies_alerts(
anomalies: anomalies_pb2.Anomalies,
split_pair: str,
) -> list[component_generated_alert_pb2.ComponentGeneratedAlertInfo]:
"""Creates an alert for each anomaly in the anomalies artifact."""
result = []
# Information about dataset-level anomalies, such as "High num examples in
# current dataset versus the previous span."
if anomalies.HasField('dataset_anomaly_info'):
result.extend(
_generate_alerts_info_proto(anomalies.dataset_anomaly_info, split_pair)
)
# Information about feature-level anomalies, such as "High Linfty distance
# between current and previous."
for _, info in anomalies.anomaly_info.items():
result.extend(_generate_alerts_info_proto(info, split_pair))
return result | Creates an alert for each anomaly in the anomalies artifact. |
166,337 |
def get_no_validation_file_value(validation_path: str) -> str:
return (
f'No validations.tfrecords file found at {validation_path}. The '
'"blessed" custom_property will not be set.'
) | null |
166,338 | import json
import os
from typing import Any, Dict, List
import absl
from tensorflow import estimator as tf_estimator
import tensorflow_model_analysis as tfma
from tfx import types
from tfx.components.trainer import constants
from tfx.components.trainer import fn_args_utils
from tfx.components.util import udf_utils
from tfx.dsl.components.base import base_executor
from tfx.dsl.io import fileio
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import deprecation_utils
from tfx.utils import io_utils
from tfx.utils import path_utils
from tensorflow.python.lib.io import file_io
from tensorflow_metadata.proto.v0 import schema_pb2
def _all_files_pattern(file_pattern: str) -> str:
return os.path.join(file_pattern, '*') | null |
166,339 | import json
import os
from typing import Any, Dict, List
import absl
from tensorflow import estimator as tf_estimator
import tensorflow_model_analysis as tfma
from tfx import types
from tfx.components.trainer import constants
from tfx.components.trainer import fn_args_utils
from tfx.components.util import udf_utils
from tfx.dsl.components.base import base_executor
from tfx.dsl.io import fileio
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import deprecation_utils
from tfx.utils import io_utils
from tfx.utils import path_utils
from tensorflow.python.lib.io import file_io
from tensorflow_metadata.proto.v0 import schema_pb2
The provided code snippet includes necessary dependencies for implementing the `_is_chief` function. Write a Python function `def _is_chief()` to solve the following problem:
Returns true if this is run in the master (chief) of training cluster.
Here is the function:
def _is_chief():
"""Returns true if this is run in the master (chief) of training cluster."""
tf_config = json.loads(os.environ.get(constants.TF_CONFIG_ENV) or '{}')
# If non distributed mode, current process should always behave as chief.
if not tf_config or not tf_config.get('cluster', {}):
return True
task_type = tf_config['task']['type']
task_index = tf_config['task']['index']
# 'master' is a legacy notation of chief node in distributed training flock.
return task_type == 'chief' or (task_type == 'master' and task_index == 0) | Returns true if this is run in the master (chief) of training cluster. |
166,340 | import os
import time
from typing import Iterable, Optional, Sequence
import numpy as np
import tensorflow as tf
from tfx.components.trainer.rewriting import rewriter
from tfx.dsl.io import fileio
from tfx.utils import io_utils
EXTRA_ASSETS_DIRECTORY = 'assets.extra'
def _create_tflite_compatible_saved_model(src: str, dst: str):
io_utils.copy_dir(src, dst)
assets_path = os.path.join(dst, tf.saved_model.ASSETS_DIRECTORY)
if fileio.exists(assets_path):
fileio.rmtree(assets_path)
assets_extra_path = os.path.join(dst, EXTRA_ASSETS_DIRECTORY)
if fileio.exists(assets_extra_path):
fileio.rmtree(assets_extra_path) | null |
166,341 | import os
import time
from typing import Iterable, Optional, Sequence
import numpy as np
import tensorflow as tf
from tfx.components.trainer.rewriting import rewriter
from tfx.dsl.io import fileio
from tfx.utils import io_utils
def _ensure_str(value):
if isinstance(value, str):
return value
elif isinstance(value, bytes):
return value.decode('utf-8')
else:
raise TypeError(f'Unexpected type {type(value)}.') | null |
166,342 | import os
import time
from typing import Iterable, Optional, Sequence
import numpy as np
import tensorflow as tf
from tfx.components.trainer.rewriting import rewriter
from tfx.dsl.io import fileio
from tfx.utils import io_utils
def _ensure_bytes(value):
if isinstance(value, bytes):
return value
elif isinstance(value, str):
return value.encode('utf-8')
else:
raise TypeError(f'Unexpected type {type(value)}.') | null |
166,343 | import importlib
from tfx.components.trainer.rewriting import rewriter
def _load_tflite_rewriter():
importlib.import_module('tfx.components.trainer.rewriting.tflite_rewriter') | null |
166,344 | import importlib
from tfx.components.trainer.rewriting import rewriter
def _load_tfjs_rewriter():
try:
importlib.import_module('tensorflowjs')
except ImportError as e:
raise RuntimeError(
'tensorflowjs is not installed. Please install [tfjs] extra '
'dependencies to use tfjs_rewriter.') from e
else:
importlib.import_module('tfx.components.trainer.rewriting.tfjs_rewriter') | null |
166,345 | from tensorflowjs.converters import converter
from tfx.components.trainer.rewriting import rewriter
CONVERTER_SAVED_MODEL_INPUT_FLAG = '--input_format=tf_saved_model'
CONVERTER_SERVING_TAG_FLAG = '--saved_model_tags=serve'
CONVERTER_DEFAULT_SIGNATURE_FLAG = '--signature_name=serving_default'
def _convert_tfjs_model(saved_model_path: str, destination_path: str):
converter.convert([
CONVERTER_SAVED_MODEL_INPUT_FLAG, CONVERTER_SERVING_TAG_FLAG,
CONVERTER_DEFAULT_SIGNATURE_FLAG,
saved_model_path, destination_path
]) | null |
166,346 | from tensorflowjs.converters import converter
from tfx.components.trainer.rewriting import rewriter
def _ensure_str(value):
if isinstance(value, str):
return value
elif isinstance(value, bytes):
return value.decode('utf-8')
else:
raise TypeError(f'Unexpected type {type(value)}.') | null |
166,347 | import json
import os
from typing import Any, Callable, Dict, List, Optional
from absl import logging
from keras_tuner.engine import base_tuner
from keras_tuner.engine import trial
from tfx import types
from tfx.components.trainer import fn_args_utils
from tfx.components.util import udf_utils
from tfx.dsl.components.base import base_executor
from tfx.proto import tuner_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import proto_utils
The provided code snippet includes necessary dependencies for implementing the `get_tune_args` function. Write a Python function `def get_tune_args( exec_properties: Dict[str, Any]) -> Optional[tuner_pb2.TuneArgs]` to solve the following problem:
Returns TuneArgs protos from execution properties, if present.
Here is the function:
def get_tune_args(
exec_properties: Dict[str, Any]) -> Optional[tuner_pb2.TuneArgs]:
"""Returns TuneArgs protos from execution properties, if present."""
tune_args = exec_properties.get(standard_component_specs.TUNE_ARGS_KEY)
if not tune_args:
return None
result = tuner_pb2.TuneArgs()
proto_utils.json_to_proto(tune_args, result)
return result | Returns TuneArgs protos from execution properties, if present. |
166,348 | import json
import os
from typing import Any, Callable, Dict, List, Optional
from absl import logging
from keras_tuner.engine import base_tuner
from keras_tuner.engine import trial
from tfx import types
from tfx.components.trainer import fn_args_utils
from tfx.components.util import udf_utils
from tfx.dsl.components.base import base_executor
from tfx.proto import tuner_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import proto_utils
_DEFAULT_BEST_HP_FILE_NAME = 'best_hyperparameters.txt'
_DEFAULT_TUNER_RESULTS_FILE_NAME = 'tuner_results.json'
The provided code snippet includes necessary dependencies for implementing the `write_best_hyperparameters` function. Write a Python function `def write_best_hyperparameters( tuner: base_tuner.BaseTuner, output_dict: Dict[str, List[types.Artifact]]) -> None` to solve the following problem:
Writes out best hyperpeameters and tuner results.
Here is the function:
def write_best_hyperparameters(
tuner: base_tuner.BaseTuner,
output_dict: Dict[str, List[types.Artifact]]) -> None:
"""Writes out best hyperpeameters and tuner results."""
# Saves the best hyperParameters known to the given tuner instance.
best_hparams_config = tuner.get_best_hyperparameters()[0].get_config()
logging.info('Best HyperParameters: %s', best_hparams_config)
best_hparams_path = os.path.join(
artifact_utils.get_single_uri(
output_dict[standard_component_specs.BEST_HYPERPARAMETERS_KEY]),
_DEFAULT_BEST_HP_FILE_NAME)
io_utils.write_string_file(best_hparams_path, json.dumps(best_hparams_config))
logging.info('Best Hyperparameters are written to %s.', best_hparams_path)
# Saves tuner results in pandas `records` format (list of rows).
results = []
for trial_obj in tuner.oracle.get_best_trials(tuner.oracle.max_trials):
if trial_obj.status == trial.TrialStatus.COMPLETED and trial_obj.hyperparameters.values:
results.append({
'trial_id': trial_obj.trial_id,
'score': trial_obj.score,
**trial_obj.hyperparameters.values
})
tuner_results_path = os.path.join(
artifact_utils.get_single_uri(
output_dict[standard_component_specs.TUNER_RESULTS_KEY]),
_DEFAULT_TUNER_RESULTS_FILE_NAME)
io_utils.write_string_file(tuner_results_path, json.dumps(results))
logging.info('Tuner results are written to %s.', tuner_results_path) | Writes out best hyperpeameters and tuner results. |
166,349 | import json
import os
from typing import Any, Callable, Dict, List, Optional
from absl import logging
from keras_tuner.engine import base_tuner
from keras_tuner.engine import trial
from tfx import types
from tfx.components.trainer import fn_args_utils
from tfx.components.util import udf_utils
from tfx.dsl.components.base import base_executor
from tfx.proto import tuner_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import proto_utils
def _get_tuner_fn(exec_properties: Dict[str, Any]) -> Callable[..., Any]:
"""Returns tuner_fn from execution properties."""
return udf_utils.get_fn(exec_properties, 'tuner_fn')
The provided code snippet includes necessary dependencies for implementing the `search` function. Write a Python function `def search(input_dict: Dict[str, List[types.Artifact]], exec_properties: Dict[str, Any], working_dir: str, print_tuning_summary: bool = True) -> base_tuner.BaseTuner` to solve the following problem:
Conduct a single hyperparameter search loop, and return the Tuner.
Here is the function:
def search(input_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any],
working_dir: str,
print_tuning_summary: bool = True) -> base_tuner.BaseTuner:
"""Conduct a single hyperparameter search loop, and return the Tuner."""
tuner_fn = _get_tuner_fn(exec_properties)
fn_args = fn_args_utils.get_common_fn_args(input_dict, exec_properties,
working_dir)
tuner_fn_result = tuner_fn(fn_args)
result = tuner_fn_result.tuner
# TODO(b/156966497): set logger for printing.
if print_tuning_summary:
result.search_space_summary()
logging.info('Start tuning... Tuner ID: %s', result.tuner_id)
result.search(**tuner_fn_result.fit_kwargs)
logging.info('Finished tuning... Tuner ID: %s', result.tuner_id)
if print_tuning_summary:
result.results_summary()
return result | Conduct a single hyperparameter search loop, and return the Tuner. |
166,350 | import importlib
import os
from typing import Any, Callable, Dict, List, Optional, Union
from absl import logging
import apache_beam as beam
import tensorflow as tf
from tfx import types
from tfx.components.bulk_inferrer import prediction_to_example_utils
from tfx.components.util import model_utils
from tfx.components.util import tfxio_utils
from tfx.dsl.components.base import base_beam_executor
from tfx.proto import bulk_inferrer_pb2
from tfx.proto import example_gen_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import path_utils
from tfx.utils import proto_utils
from tfx_bsl.public.beam import run_inference
from tfx_bsl.public.proto import model_spec_pb2
from tfx_bsl.tfxio import record_based_tfxio
from tensorflow_serving.apis import prediction_log_pb2
def _MakeParseFn(
payload_format: int
) -> Union[Callable[[bytes], tf.train.Example], Callable[
[bytes], tf.train.SequenceExample]]:
"""Returns a function to parse bytes to payload."""
if payload_format == example_gen_pb2.PayloadFormat.FORMAT_TF_EXAMPLE:
return tf.train.Example.FromString
elif (payload_format ==
example_gen_pb2.PayloadFormat.FORMAT_TF_SEQUENCE_EXAMPLE):
return tf.train.SequenceExample.FromString
else:
raise NotImplementedError(
'Payload format %s is not supported.' %
example_gen_pb2.PayloadFormat.Name(payload_format))
The provided code snippet includes necessary dependencies for implementing the `_RunInference` function. Write a Python function `def _RunInference( pipeline: beam.pvalue.PCollection, payload_format: int, inference_endpoint: model_spec_pb2.InferenceSpecType ) -> beam.pvalue.PCollection` to solve the following problem:
Runs model inference on given examples data.
Here is the function:
def _RunInference(
pipeline: beam.pvalue.PCollection,
payload_format: int,
inference_endpoint: model_spec_pb2.InferenceSpecType
) -> beam.pvalue.PCollection:
"""Runs model inference on given examples data."""
return (
pipeline
| 'ParseExamples' >> beam.Map(_MakeParseFn(payload_format))
| 'RunInference' >> run_inference.RunInference(inference_endpoint)) | Runs model inference on given examples data. |
166,351 | import importlib
import os
from typing import Any, Callable, Dict, List, Optional, Union
from absl import logging
import apache_beam as beam
import tensorflow as tf
from tfx import types
from tfx.components.bulk_inferrer import prediction_to_example_utils
from tfx.components.util import model_utils
from tfx.components.util import tfxio_utils
from tfx.dsl.components.base import base_beam_executor
from tfx.proto import bulk_inferrer_pb2
from tfx.proto import example_gen_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import path_utils
from tfx.utils import proto_utils
from tfx_bsl.public.beam import run_inference
from tfx_bsl.public.proto import model_spec_pb2
from tfx_bsl.tfxio import record_based_tfxio
from tensorflow_serving.apis import prediction_log_pb2
_EXAMPLES_FILE_NAME = 'examples'
The provided code snippet includes necessary dependencies for implementing the `_WriteExamples` function. Write a Python function `def _WriteExamples(prediction_log: beam.pvalue.PCollection, output_example_spec: bulk_inferrer_pb2.OutputExampleSpec, output_path: str) -> beam.pvalue.PDone` to solve the following problem:
Converts `prediction_log` to `tf.train.Example` and materializes.
Here is the function:
def _WriteExamples(prediction_log: beam.pvalue.PCollection,
output_example_spec: bulk_inferrer_pb2.OutputExampleSpec,
output_path: str) -> beam.pvalue.PDone:
"""Converts `prediction_log` to `tf.train.Example` and materializes."""
return (prediction_log
| 'ConvertToExamples' >> beam.Map(
prediction_to_example_utils.convert,
output_example_spec=output_example_spec)
| 'WriteExamples' >> beam.io.WriteToTFRecord(
os.path.join(output_path, _EXAMPLES_FILE_NAME),
file_name_suffix='.gz',
coder=beam.coders.ProtoCoder(tf.train.Example))) | Converts `prediction_log` to `tf.train.Example` and materializes. |
166,352 | import os
from typing import Any, Dict, Union
from absl import logging
import apache_beam as beam
import pyarrow.parquet as pq
import tensorflow as tf
from tfx.components.example_gen import base_example_gen_executor
from tfx.dsl.io import fileio
from tfx.proto import example_gen_pb2
from tfx.types import standard_component_specs
The provided code snippet includes necessary dependencies for implementing the `_ImportSerializedRecord` function. Write a Python function `def _ImportSerializedRecord( # pylint: disable=invalid-name pipeline: beam.Pipeline, exec_properties: Dict[str, Any], split_pattern: str) -> beam.pvalue.PCollection` to solve the following problem:
Read TFRecord files to PCollection of records. Note that each input split will be transformed by this function separately. Args: pipeline: Beam pipeline. exec_properties: A dict of execution properties. - input_base: input dir that contains input data. split_pattern: Split.pattern in Input config, glob relative file pattern that maps to input files with root directory given by input_base. Returns: PCollection of records (tf.Example, tf.SequenceExample, bytes or dictionaries).
Here is the function:
def _ImportSerializedRecord( # pylint: disable=invalid-name
pipeline: beam.Pipeline, exec_properties: Dict[str, Any],
split_pattern: str) -> beam.pvalue.PCollection:
"""Read TFRecord files to PCollection of records.
Note that each input split will be transformed by this function separately.
Args:
pipeline: Beam pipeline.
exec_properties: A dict of execution properties.
- input_base: input dir that contains input data.
split_pattern: Split.pattern in Input config, glob relative file pattern
that maps to input files with root directory given by input_base.
Returns:
PCollection of records (tf.Example, tf.SequenceExample, bytes or
dictionaries).
"""
input_base_uri = exec_properties[standard_component_specs.INPUT_BASE_KEY]
input_split_pattern = os.path.join(input_base_uri, split_pattern)
logging.info('Reading input TFRecord data %s.', input_split_pattern)
# TODO(jyzhao): profile input examples.
return (pipeline
# TODO(jyzhao): support multiple input container format.
| 'ReadFromTFRecord' >>
beam.io.ReadFromTFRecord(file_pattern=input_split_pattern)) | Read TFRecord files to PCollection of records. Note that each input split will be transformed by this function separately. Args: pipeline: Beam pipeline. exec_properties: A dict of execution properties. - input_base: input dir that contains input data. split_pattern: Split.pattern in Input config, glob relative file pattern that maps to input files with root directory given by input_base. Returns: PCollection of records (tf.Example, tf.SequenceExample, bytes or dictionaries). |
166,353 | import os
from typing import Any, Dict, Union
from absl import logging
import apache_beam as beam
import pyarrow.parquet as pq
import tensorflow as tf
from tfx.components.example_gen import base_example_gen_executor
from tfx.dsl.io import fileio
from tfx.proto import example_gen_pb2
from tfx.types import standard_component_specs
The provided code snippet includes necessary dependencies for implementing the `_ImportParquetRecord` function. Write a Python function `def _ImportParquetRecord( # pylint: disable=invalid-name pipeline: beam.Pipeline, exec_properties: Dict[str, Any], split_pattern: str) -> beam.pvalue.PCollection` to solve the following problem:
Read parquet files to PCollection of records represented by dicts. Note that each input split will be transformed by this function separately. Args: pipeline: Beam pipeline. exec_properties: A dict of execution properties. - input_base: input dir that contains input data. split_pattern: Split.pattern in Input config, glob relative file pattern that maps to input files with root directory given by input_base. Returns: PCollection of parquet records represented by dictionaries.
Here is the function:
def _ImportParquetRecord( # pylint: disable=invalid-name
pipeline: beam.Pipeline, exec_properties: Dict[str, Any],
split_pattern: str) -> beam.pvalue.PCollection:
"""Read parquet files to PCollection of records represented by dicts.
Note that each input split will be transformed by this function separately.
Args:
pipeline: Beam pipeline.
exec_properties: A dict of execution properties. - input_base: input dir
that contains input data.
split_pattern: Split.pattern in Input config, glob relative file pattern
that maps to input files with root directory given by input_base.
Returns:
PCollection of parquet records represented by dictionaries.
"""
input_base_uri = exec_properties[standard_component_specs.INPUT_BASE_KEY]
input_split_pattern = os.path.join(input_base_uri, split_pattern)
logging.info('Reading input Parquet data %s.', input_split_pattern)
def _InferArrowSchema(file_pattern):
any_file = fileio.glob(file_pattern)[0]
return pq.read_schema(any_file)
schema = _InferArrowSchema(input_split_pattern)
exec_properties['pyarrow_schema'] = schema
return (pipeline
| 'ReadParquet' >>
beam.io.ReadFromParquet(file_pattern=input_split_pattern)) | Read parquet files to PCollection of records represented by dicts. Note that each input split will be transformed by this function separately. Args: pipeline: Beam pipeline. exec_properties: A dict of execution properties. - input_base: input dir that contains input data. split_pattern: Split.pattern in Input config, glob relative file pattern that maps to input files with root directory given by input_base. Returns: PCollection of parquet records represented by dictionaries. |
166,354 | import datetime
import os
import re
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
from absl import logging
import numpy as np
from tfx.dsl.io import fileio
from tfx.proto import example_gen_pb2
from tfx.proto import range_config_pb2
from tfx.utils import io_utils
from google.protobuf import json_format
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
The provided code snippet includes necessary dependencies for implementing the `generate_output_split_names` function. Write a Python function `def generate_output_split_names( input_config: Union[example_gen_pb2.Input, Dict[str, Any]], output_config: Union[example_gen_pb2.Output, Dict[str, Any]]) -> List[str]` to solve the following problem:
Return output split name based on input and output config. Return output split name if it's specified and input only contains one split, otherwise output split will be same as input. Args: input_config: example_gen_pb2.Input instance. If any field is provided as a RuntimeParameter, input_config should be constructed as a dict with the same field names as Input proto message. output_config: example_gen_pb2.Output instance. If any field is provided as a RuntimeParameter, output_config should be constructed as a dict with the same field names as Output proto message. Returns: List of split names. Raises: RuntimeError: if configs are not valid, including: - Missing field. - Duplicated split. - Output split is specified while input has more than one split. - Missing train and eval split.
Here is the function:
def generate_output_split_names(
input_config: Union[example_gen_pb2.Input, Dict[str, Any]],
output_config: Union[example_gen_pb2.Output, Dict[str, Any]]) -> List[str]:
"""Return output split name based on input and output config.
Return output split name if it's specified and input only contains one split,
otherwise output split will be same as input.
Args:
input_config: example_gen_pb2.Input instance. If any field is provided as a
RuntimeParameter, input_config should be constructed as a dict with the
same field names as Input proto message.
output_config: example_gen_pb2.Output instance. If any field is provided as
a RuntimeParameter, output_config should be constructed as a dict with the
same field names as Output proto message.
Returns:
List of split names.
Raises:
RuntimeError: if configs are not valid, including:
- Missing field.
- Duplicated split.
- Output split is specified while input has more than one split.
- Missing train and eval split.
"""
result = []
# Convert proto to dict for easy sanity check. Otherwise we need to branch the
# logic based on parameter types.
if isinstance(output_config, example_gen_pb2.Output):
output_config = json_format.MessageToDict(
output_config,
including_default_value_fields=True,
preserving_proto_field_name=True)
if isinstance(input_config, example_gen_pb2.Input):
input_config = json_format.MessageToDict(
input_config,
including_default_value_fields=True,
preserving_proto_field_name=True)
if 'split_config' in output_config and 'splits' in output_config[
'split_config']:
if 'splits' not in input_config:
raise RuntimeError(
'ExampleGen instance specified output splits but no input split '
'is specified.')
if len(input_config['splits']) != 1:
# If output is specified, then there should only be one input split.
raise RuntimeError(
'ExampleGen instance specified output splits but at the same time '
'input has more than one split.')
for split in output_config['split_config']['splits']:
if not split['name'] or (isinstance(split['hash_buckets'], int) and
split['hash_buckets'] <= 0):
raise RuntimeError('Str-typed output split name and int-typed '
'hash buckets are required.')
result.append(split['name'])
else:
# If output is not specified, it will have the same split as the input.
if 'splits' in input_config:
for split in input_config['splits']:
if not split['name'] or not split['pattern']:
raise RuntimeError('Str-typed input split name and pattern '
'are required.')
result.append(split['name'])
if not result:
raise RuntimeError('ExampleGen splits are missing.')
if len(result) != len(set(result)):
raise RuntimeError('Duplicated split name {}.'.format(result))
return result | Return output split name based on input and output config. Return output split name if it's specified and input only contains one split, otherwise output split will be same as input. Args: input_config: example_gen_pb2.Input instance. If any field is provided as a RuntimeParameter, input_config should be constructed as a dict with the same field names as Input proto message. output_config: example_gen_pb2.Output instance. If any field is provided as a RuntimeParameter, output_config should be constructed as a dict with the same field names as Output proto message. Returns: List of split names. Raises: RuntimeError: if configs are not valid, including: - Missing field. - Duplicated split. - Output split is specified while input has more than one split. - Missing train and eval split. |
166,355 | import datetime
import os
import re
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
from absl import logging
import numpy as np
from tfx.dsl.io import fileio
from tfx.proto import example_gen_pb2
from tfx.proto import range_config_pb2
from tfx.utils import io_utils
from google.protobuf import json_format
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
The provided code snippet includes necessary dependencies for implementing the `make_default_input_config` function. Write a Python function `def make_default_input_config( split_pattern: str = '*') -> example_gen_pb2.Input` to solve the following problem:
Returns default input config.
Here is the function:
def make_default_input_config(
split_pattern: str = '*') -> example_gen_pb2.Input:
"""Returns default input config."""
# Treats input base dir as a single split.
return example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(name='single_split', pattern=split_pattern)
]) | Returns default input config. |
166,356 | import datetime
import os
import re
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
from absl import logging
import numpy as np
from tfx.dsl.io import fileio
from tfx.proto import example_gen_pb2
from tfx.proto import range_config_pb2
from tfx.utils import io_utils
from google.protobuf import json_format
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
The provided code snippet includes necessary dependencies for implementing the `make_default_output_config` function. Write a Python function `def make_default_output_config( input_config: Union[example_gen_pb2.Input, Dict[str, Any]] ) -> example_gen_pb2.Output` to solve the following problem:
Returns default output config based on input config.
Here is the function:
def make_default_output_config(
input_config: Union[example_gen_pb2.Input, Dict[str, Any]]
) -> example_gen_pb2.Output:
"""Returns default output config based on input config."""
if isinstance(input_config, example_gen_pb2.Input):
input_config = json_format.MessageToDict(
input_config,
including_default_value_fields=True,
preserving_proto_field_name=True)
if len(input_config['splits']) > 1:
# Returns empty output split config as output split will be same as input.
return example_gen_pb2.Output()
else:
# Returns 'train' and 'eval' splits with size 2:1.
return example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(splits=[
example_gen_pb2.SplitConfig.Split(name='train', hash_buckets=2),
example_gen_pb2.SplitConfig.Split(name='eval', hash_buckets=1)
])) | Returns default output config based on input config. |
166,357 | import datetime
import os
import re
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
from absl import logging
import numpy as np
from tfx.dsl.io import fileio
from tfx.proto import example_gen_pb2
from tfx.proto import range_config_pb2
from tfx.utils import io_utils
from google.protobuf import json_format
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
SPAN_BEGIN_TIMESTAMP = '@span_begin_timestamp'
SPAN_END_TIMESTMAP = '@span_end_timestamp'
SPAN_YYYYMMDD_UTC = '@span_yyyymmdd_utc'
UNIX_EPOCH_DATE_UTC = datetime.datetime( # pylint: disable=g-tzinfo-datetime
1970,
1,
1,
tzinfo=datetime.timezone.utc)
The provided code snippet includes necessary dependencies for implementing the `get_query_for_span` function. Write a Python function `def get_query_for_span(pattern: str, span: int) -> str` to solve the following problem:
Return query with timestamp placeholders filled.
Here is the function:
def get_query_for_span(pattern: str, span: int) -> str:
"""Return query with timestamp placeholders filled."""
# TODO(b/179853017): make UNIX_EPOCH_DATE_UTC timezone configurable.
begin = UNIX_EPOCH_DATE_UTC + datetime.timedelta(days=span)
end = begin + datetime.timedelta(days=1)
pattern = pattern.replace(SPAN_BEGIN_TIMESTAMP, str(int(begin.timestamp())))
pattern = pattern.replace(SPAN_END_TIMESTMAP, str(int(end.timestamp())))
pattern = pattern.replace(
SPAN_YYYYMMDD_UTC,
begin.astimezone(datetime.timezone.utc).strftime("'%Y%m%d'"))
return pattern | Return query with timestamp placeholders filled. |
166,358 | import datetime
import os
import re
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
from absl import logging
import numpy as np
from tfx.dsl.io import fileio
from tfx.proto import example_gen_pb2
from tfx.proto import range_config_pb2
from tfx.utils import io_utils
from google.protobuf import json_format
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
def _get_target_span_version(
uri: str,
split: example_gen_pb2.Input.Split,
range_config: Optional[range_config_pb2.RangeConfig] = None
) -> Tuple[Optional[int], Optional[int]]:
"""Retrieves a target span and version for a given split pattern.
If both Span and Version spec occur in the split pattern, searches for and
returns both the target Span and Version. If only Span exists in the split
pattern, searches for the target Span, and Version is returned as None.
If Version is present, but not Span, an error is raised. If neither Span
nor Version is present, returns both as None.
Additonally, supports parsing span number from date stamps using the Date.
specs. Once the calendar date is parsed from the Date specs, it is converted
into a span number by counting the number of days since 01/01/1970.
Args:
uri: The base path from which files will be searched.
split: An example_gen_pb2.Input.Split object which contains a split pattern,
to be searched on.
range_config: An instance of range_config_pb2.RangeConfig, which specifies
which spans to consider when finding the most recent span and version. If
unset, search for latest span number with no restrictions.
Returns:
Tuple of two ints, Span (optional) and Version (optional). Note
that this function will update the {SPAN} or Date tags as well as the
{VERSION} tags in the split config to actual Span and Version numbers.
Raises:
ValueError: if any of the following occurs:
- If either Span or Version spec is occurs in the split pattern
more than once.
- If Version spec is provided, but Span spec is not present.
- If Span or Version found is not an integer.
- If a matching cannot be found for split pattern provided.
"""
is_match_span, is_match_date, is_match_version = verify_split_pattern_specs(
split)
if not is_match_span and not is_match_date:
return (None, None)
split_glob_pattern, split_regex_pattern = _create_matching_glob_and_regex(
uri=uri,
split=split,
is_match_span=is_match_span,
is_match_date=is_match_date,
is_match_version=is_match_version,
range_config=range_config)
logging.info('Glob pattern for split %s: %s', split.name, split_glob_pattern)
logging.info('Regex pattern for split %s: %s', split.name,
split_regex_pattern)
latest_span_tokens = None
latest_span_int = None
latest_version = None
latest_version_int = None
files = fileio.glob(split_glob_pattern)
for file_path in files:
match_span_tokens, match_span_int, match_version, match_version_int = (
_find_matched_span_version_from_path(file_path, split_regex_pattern,
is_match_span, is_match_date,
is_match_version))
if latest_span_int is None or match_span_int > latest_span_int:
# Uses str instead of int because of zero padding digits.
latest_span_tokens = match_span_tokens
latest_span_int = match_span_int
latest_version = match_version
latest_version_int = match_version_int
elif (latest_span_int == match_span_int and
(latest_version is None or match_version_int >= latest_version_int)):
latest_version = match_version
latest_version_int = match_version_int
if latest_span_int is None or (is_match_version and latest_version is None):
raise ValueError('Cannot find matching for split %s based on %s' %
(split.name, split.pattern))
# Update split pattern so executor can find the files to ingest.
if is_match_span:
split.pattern = re.sub(SPAN_FULL_REGEX, latest_span_tokens[0],
split.pattern)
elif is_match_date:
for spec, value in zip(DATE_SPECS, latest_span_tokens):
split.pattern = split.pattern.replace(spec, value)
if is_match_version:
split.pattern = re.sub(VERSION_FULL_REGEX, latest_version, split.pattern)
return latest_span_int, latest_version_int
The provided code snippet includes necessary dependencies for implementing the `calculate_splits_fingerprint_span_and_version` function. Write a Python function `def calculate_splits_fingerprint_span_and_version( input_base_uri: str, splits: Iterable[example_gen_pb2.Input.Split], range_config: Optional[range_config_pb2.RangeConfig] = None ) -> Tuple[str, int, Optional[int]]` to solve the following problem:
Calculates the fingerprint of files in a URI matching split patterns. If a pattern has the {SPAN} placeholder or the Date spec placeholders, {YYYY}, {MM}, and {DD}, and optionally, the {VERSION} placeholder, attempts to find aligned values that results in all splits having the target span and most recent version for that span. Args: input_base_uri: The base path from which files will be searched. splits: An iterable collection of example_gen_pb2.Input.Split objects. range_config: An instance of range_config_pb2.RangeConfig, which specifies which spans to consider when finding the most recent span and version. If unset, search for latest span number with no restrictions. Returns: A Tuple of [fingerprint, select_span, select_version], where select_span is either the value matched with the {SPAN} placeholder, the value mapped from matching the calendar date with the date placeholders {YYYY}, {MM}, {DD} or 0 if a placeholder wasn't specified, and where select_version is either the value matched with the {VERSION} placeholder, or None if the placeholder wasn't specified. Note that this function will update the {SPAN} or Date tags as well as the {VERSION} tags in the split configs to actual Span and Version numbers.
Here is the function:
def calculate_splits_fingerprint_span_and_version(
input_base_uri: str,
splits: Iterable[example_gen_pb2.Input.Split],
range_config: Optional[range_config_pb2.RangeConfig] = None
) -> Tuple[str, int, Optional[int]]:
"""Calculates the fingerprint of files in a URI matching split patterns.
If a pattern has the {SPAN} placeholder or the Date spec placeholders, {YYYY},
{MM}, and {DD}, and optionally, the {VERSION} placeholder, attempts to find
aligned values that results in all splits having the target span and most
recent version for that span.
Args:
input_base_uri: The base path from which files will be searched.
splits: An iterable collection of example_gen_pb2.Input.Split objects.
range_config: An instance of range_config_pb2.RangeConfig, which specifies
which spans to consider when finding the most recent span and version. If
unset, search for latest span number with no restrictions.
Returns:
A Tuple of [fingerprint, select_span, select_version], where select_span
is either the value matched with the {SPAN} placeholder, the value mapped
from matching the calendar date with the date placeholders {YYYY}, {MM},
{DD} or 0 if a placeholder wasn't specified, and where select_version is
either the value matched with the {VERSION} placeholder, or None if the
placeholder wasn't specified. Note that this function will update the
{SPAN} or Date tags as well as the {VERSION} tags in the split configs to
actual Span and Version numbers.
"""
split_fingerprints = []
select_span = 0
select_version = None
# Calculate the fingerprint of files under input_base_uri.
for split in splits:
logging.info('select span and version = (%s, %s)', select_span,
select_version)
# Find most recent span and version for this split.
target_span, target_version = _get_target_span_version(
input_base_uri, split, range_config=range_config)
# TODO(b/162622803): add default behavior for when version spec not present.
target_span = target_span or 0
logging.info('latest span and version = (%s, %s)', target_span,
target_version)
if select_span == 0 and select_version is None:
select_span = target_span
select_version = target_version
# Check if latest span and version are the same over all splits.
if select_span != target_span:
raise ValueError('Latest span should be the same for each split')
if select_version != target_version:
raise ValueError('Latest version should be the same for each split')
# Calculate fingerprint.
pattern = os.path.join(input_base_uri, split.pattern)
split_fingerprint = io_utils.generate_fingerprint(split.name, pattern)
split_fingerprints.append(split_fingerprint)
fingerprint = '\n'.join(split_fingerprints)
return fingerprint, select_span, select_version | Calculates the fingerprint of files in a URI matching split patterns. If a pattern has the {SPAN} placeholder or the Date spec placeholders, {YYYY}, {MM}, and {DD}, and optionally, the {VERSION} placeholder, attempts to find aligned values that results in all splits having the target span and most recent version for that span. Args: input_base_uri: The base path from which files will be searched. splits: An iterable collection of example_gen_pb2.Input.Split objects. range_config: An instance of range_config_pb2.RangeConfig, which specifies which spans to consider when finding the most recent span and version. If unset, search for latest span number with no restrictions. Returns: A Tuple of [fingerprint, select_span, select_version], where select_span is either the value matched with the {SPAN} placeholder, the value mapped from matching the calendar date with the date placeholders {YYYY}, {MM}, {DD} or 0 if a placeholder wasn't specified, and where select_version is either the value matched with the {VERSION} placeholder, or None if the placeholder wasn't specified. Note that this function will update the {SPAN} or Date tags as well as the {VERSION} tags in the split configs to actual Span and Version numbers. |
166,359 | import os
from typing import Any, Dict, Iterable, List
from absl import logging
import apache_beam as beam
import tensorflow as tf
from tfx.components.example_gen.base_example_gen_executor import BaseExampleGenExecutor
from tfx.dsl.io import fileio
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx_bsl.coders import csv_decoder
def _int_handler(cell: csv_decoder.CSVCell) -> tf.train.Feature:
value_list = []
if cell:
value_list.append(int(cell))
return tf.train.Feature(int64_list=tf.train.Int64List(value=value_list)) | null |
166,360 | import os
from typing import Any, Dict, Iterable, List
from absl import logging
import apache_beam as beam
import tensorflow as tf
from tfx.components.example_gen.base_example_gen_executor import BaseExampleGenExecutor
from tfx.dsl.io import fileio
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx_bsl.coders import csv_decoder
def _float_handler(cell: csv_decoder.CSVCell) -> tf.train.Feature:
value_list = []
if cell:
value_list.append(float(cell))
return tf.train.Feature(float_list=tf.train.FloatList(value=value_list)) | null |
166,361 | import os
from typing import Any, Dict, Iterable, List
from absl import logging
import apache_beam as beam
import tensorflow as tf
from tfx.components.example_gen.base_example_gen_executor import BaseExampleGenExecutor
from tfx.dsl.io import fileio
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx_bsl.coders import csv_decoder
def _bytes_handler(cell: csv_decoder.CSVCell) -> tf.train.Feature:
value_list = []
if cell:
value_list.append(cell)
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value_list)) | null |
166,362 | import abc
import bisect
import hashlib
import pickle
from typing import Any, Dict, List, Union
from absl import logging
import apache_beam as beam
import tensorflow as tf
from tfx import types
from tfx.components.example_gen import utils
from tfx.components.example_gen import write_split
from tfx.components.util import examples_utils
from tfx.dsl.components.base import base_beam_executor
from tfx.proto import example_gen_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import proto_utils
def _GeneratePartitionKey(record: Union[tf.train.Example,
tf.train.SequenceExample, bytes,
Dict[str, Any]],
split_config: example_gen_pb2.SplitConfig) -> bytes:
"""Generates key for partition."""
if not split_config.HasField('partition_feature_name'):
if isinstance(record, bytes):
return record
if isinstance(record, dict):
return pickle.dumps(record)
return record.SerializeToString(deterministic=True)
if isinstance(record, tf.train.Example):
features = record.features.feature # pytype: disable=attribute-error
elif isinstance(record, tf.train.SequenceExample):
features = record.context.feature # pytype: disable=attribute-error
else:
raise RuntimeError('Split by `partition_feature_name` is only supported '
'for FORMAT_TF_EXAMPLE and FORMAT_TF_SEQUENCE_EXAMPLE '
'payload format.')
# Use a feature for partitioning the examples.
feature_name = split_config.partition_feature_name
if feature_name not in features:
raise RuntimeError('Feature name `{}` does not exist.'.format(feature_name))
feature = features[feature_name]
if not feature.HasField('kind'):
raise RuntimeError('Partition feature does not contain any value.')
if (not feature.HasField('bytes_list') and
not feature.HasField('int64_list')):
raise RuntimeError('Only `bytes_list` and `int64_list` features are '
'supported for partition.')
return feature.SerializeToString(deterministic=True)
The provided code snippet includes necessary dependencies for implementing the `_PartitionFn` function. Write a Python function `def _PartitionFn( record: Union[tf.train.Example, tf.train.SequenceExample, bytes, Dict[str, Any]], num_partitions: int, buckets: List[int], split_config: example_gen_pb2.SplitConfig, ) -> int` to solve the following problem:
Partition function for the ExampleGen's output splits.
Here is the function:
def _PartitionFn(
record: Union[tf.train.Example, tf.train.SequenceExample, bytes, Dict[str,
Any]],
num_partitions: int,
buckets: List[int],
split_config: example_gen_pb2.SplitConfig,
) -> int:
"""Partition function for the ExampleGen's output splits."""
assert num_partitions == len(
buckets), 'Partitions do not match bucket number.'
partition_str = _GeneratePartitionKey(record, split_config)
bucket = int(hashlib.sha256(partition_str).hexdigest(), 16) % buckets[-1]
# For example, if buckets is [10,50,80], there will be 3 splits:
# bucket >=0 && < 10, returns 0
# bucket >=10 && < 50, returns 1
# bucket >=50 && < 80, returns 2
return bisect.bisect(buckets, bucket) | Partition function for the ExampleGen's output splits. |
166,363 | import os
from typing import Any, Dict
from absl import logging
import apache_beam as beam
import tensorflow as tf
from tfx.components.example_gen import utils
from tfx.components.example_gen.base_example_gen_executor import BaseExampleGenExecutor
from tfx.types import standard_component_specs
The provided code snippet includes necessary dependencies for implementing the `_ParquetToExample` function. Write a Python function `def _ParquetToExample( # pylint: disable=invalid-name pipeline: beam.Pipeline, exec_properties: Dict[str, Any], split_pattern: str) -> beam.pvalue.PCollection` to solve the following problem:
Read Parquet files and transform to TF examples. Note that each input split will be transformed by this function separately. Args: pipeline: beam pipeline. exec_properties: A dict of execution properties. - input_base: input dir that contains Parquet data. split_pattern: Split.pattern in Input config, glob relative file pattern that maps to input files with root directory given by input_base. Returns: PCollection of TF examples.
Here is the function:
def _ParquetToExample( # pylint: disable=invalid-name
pipeline: beam.Pipeline, exec_properties: Dict[str, Any],
split_pattern: str) -> beam.pvalue.PCollection:
"""Read Parquet files and transform to TF examples.
Note that each input split will be transformed by this function separately.
Args:
pipeline: beam pipeline.
exec_properties: A dict of execution properties.
- input_base: input dir that contains Parquet data.
split_pattern: Split.pattern in Input config, glob relative file pattern
that maps to input files with root directory given by input_base.
Returns:
PCollection of TF examples.
"""
input_base_uri = exec_properties[standard_component_specs.INPUT_BASE_KEY]
parquet_pattern = os.path.join(input_base_uri, split_pattern)
logging.info('Processing input parquet data %s to TFExample.',
parquet_pattern)
return (pipeline
# TODO(jyzhao): support per column read by input_config.
| 'ReadFromParquet' >> beam.io.ReadFromParquet(parquet_pattern)
| 'ToTFExample' >> beam.Map(utils.dict_to_example)) | Read Parquet files and transform to TF examples. Note that each input split will be transformed by this function separately. Args: pipeline: beam pipeline. exec_properties: A dict of execution properties. - input_base: input dir that contains Parquet data. split_pattern: Split.pattern in Input config, glob relative file pattern that maps to input files with root directory given by input_base. Returns: PCollection of TF examples. |
166,364 | import os
from typing import Any, Dict
from absl import logging
import apache_beam as beam
import tensorflow as tf
from tfx.components.example_gen import utils
from tfx.components.example_gen.base_example_gen_executor import BaseExampleGenExecutor
from tfx.types import standard_component_specs
The provided code snippet includes necessary dependencies for implementing the `_AvroToExample` function. Write a Python function `def _AvroToExample( # pylint: disable=invalid-name pipeline: beam.Pipeline, exec_properties: Dict[str, Any], split_pattern: str) -> beam.pvalue.PCollection` to solve the following problem:
Read Avro files and transform to TF examples. Note that each input split will be transformed by this function separately. Args: pipeline: beam pipeline. exec_properties: A dict of execution properties. - input_base: input dir that contains Avro data. split_pattern: Split.pattern in Input config, glob relative file pattern that maps to input files with root directory given by input_base. Returns: PCollection of TF examples.
Here is the function:
def _AvroToExample( # pylint: disable=invalid-name
pipeline: beam.Pipeline, exec_properties: Dict[str, Any],
split_pattern: str) -> beam.pvalue.PCollection:
"""Read Avro files and transform to TF examples.
Note that each input split will be transformed by this function separately.
Args:
pipeline: beam pipeline.
exec_properties: A dict of execution properties.
- input_base: input dir that contains Avro data.
split_pattern: Split.pattern in Input config, glob relative file pattern
that maps to input files with root directory given by input_base.
Returns:
PCollection of TF examples.
"""
input_base_uri = exec_properties[standard_component_specs.INPUT_BASE_KEY]
avro_pattern = os.path.join(input_base_uri, split_pattern)
logging.info('Processing input avro data %s to TFExample.', avro_pattern)
return (pipeline
| 'ReadFromAvro' >> beam.io.ReadFromAvro(avro_pattern)
| 'ToTFExample' >> beam.Map(utils.dict_to_example)) | Read Avro files and transform to TF examples. Note that each input split will be transformed by this function separately. Args: pipeline: beam pipeline. exec_properties: A dict of execution properties. - input_base: input dir that contains Avro data. split_pattern: Split.pattern in Input config, glob relative file pattern that maps to input files with root directory given by input_base. Returns: PCollection of TF examples. |
166,365 | import os
from typing import Optional, Any, Dict, Union
import apache_beam as beam
import tensorflow as tf
from tfx.proto import example_gen_pb2
from tfx.types import standard_component_specs
from tfx.utils import deprecation_utils
from tfx_bsl.telemetry import util
DEFAULT_PARQUET_FILE_NAME = 'data_parquet'
DEFAULT_FILE_NAME = 'data_tfrecord'
class MaybeSerialize(beam.DoFn):
"""Serializes the proto if needed."""
def __init__(self):
self._num_instances = beam.metrics.Metrics.counter(METRICS_NAMESPACE,
'num_instances')
def process(self, e: Union[tf.train.Example, tf.train.SequenceExample,
bytes]):
self._num_instances.inc(1)
if isinstance(e, (tf.train.Example, tf.train.SequenceExample)):
yield e.SerializeToString() # pytype: disable=attribute-error
else:
yield e
tf.train.SequenceExample, bytes,
Dict[str, Any]])
The provided code snippet includes necessary dependencies for implementing the `WriteSplit` function. Write a Python function `def WriteSplit( example_split: beam.pvalue.PCollection, output_split_path: str, output_format: str, exec_properties: Optional[Dict[str, Any]] = None) -> beam.pvalue.PDone` to solve the following problem:
Shuffles and writes output split as serialized records in TFRecord or Parquet.
Here is the function:
def WriteSplit(
example_split: beam.pvalue.PCollection,
output_split_path: str,
output_format: str,
exec_properties: Optional[Dict[str, Any]] = None) -> beam.pvalue.PDone:
"""Shuffles and writes output split as serialized records in TFRecord or Parquet."""
del output_format
if exec_properties:
output_payload_format = exec_properties.get(
standard_component_specs.OUTPUT_DATA_FORMAT_KEY)
if output_payload_format == example_gen_pb2.PayloadFormat.FORMAT_PARQUET:
schema = exec_properties.get('pyarrow_schema')
return (example_split
# TODO(jyzhao): make shuffle optional.
| 'Shuffle' >> beam.transforms.Reshuffle()
| 'WriteParquet' >> beam.io.WriteToParquet(
os.path.join(output_split_path, DEFAULT_PARQUET_FILE_NAME),
schema,
file_name_suffix='.parquet',
codec='snappy'))
return (example_split
| 'MaybeSerialize' >> beam.ParDo(MaybeSerialize())
# TODO(jyzhao): make shuffle optional.
| 'Shuffle' >> beam.transforms.Reshuffle()
| 'Write' >> beam.io.WriteToTFRecord(
os.path.join(output_split_path, DEFAULT_FILE_NAME),
file_name_suffix='.gz')) | Shuffles and writes output split as serialized records in TFRecord or Parquet. |
166,366 | import os
from typing import Optional, Any, Dict, Union
import apache_beam as beam
import tensorflow as tf
from tfx.proto import example_gen_pb2
from tfx.types import standard_component_specs
from tfx.utils import deprecation_utils
from tfx_bsl.telemetry import util
def to_file_format_str(file_format: example_gen_pb2.FileFormat) -> str: # pylint: disable=invalid-name
if (file_format == example_gen_pb2.FILE_FORMAT_UNSPECIFIED or
file_format == example_gen_pb2.FORMAT_TFRECORDS_GZIP):
return 'tfrecords_gzip'
raise ValueError('File format is not valid.') | null |
166,367 | import os
from typing import Any, Dict, List
from absl import logging
import tensorflow_data_validation as tfdv
from tfx import types
from tfx.components.example_validator import labels
from tfx.components.statistics_gen import stats_artifact_utils
from tfx.components.util import value_utils
from tfx.dsl.components.base import base_executor
from tfx.orchestration.experimental.core import component_generated_alert_pb2
from tfx.orchestration.experimental.core import constants
from tfx.proto.orchestration import execution_result_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import json_utils
from tfx.utils import writer_utils
from google.protobuf import any_pb2
from tensorflow_metadata.proto.v0 import anomalies_pb2
The provided code snippet includes necessary dependencies for implementing the `_create_anomalies_alerts` function. Write a Python function `def _create_anomalies_alerts( anomalies: anomalies_pb2.Anomalies, split: str, ) -> list[component_generated_alert_pb2.ComponentGeneratedAlertInfo]` to solve the following problem:
Creates an alert for each anomaly in the anomalies artifact.
Here is the function:
def _create_anomalies_alerts(
anomalies: anomalies_pb2.Anomalies,
split: str,
) -> list[component_generated_alert_pb2.ComponentGeneratedAlertInfo]:
"""Creates an alert for each anomaly in the anomalies artifact."""
result = []
# Information about data missing in the dataset.
if anomalies.HasField('data_missing'):
result.append(
component_generated_alert_pb2.ComponentGeneratedAlertInfo(
alert_name=f'Data missing in split {split}',
alert_body=f'Empty input data for {split}.',
)
)
# Information about dataset-level anomalies, such as "Low num examples
# in dataset."
if anomalies.HasField('dataset_anomaly_info'):
result.append(
component_generated_alert_pb2.ComponentGeneratedAlertInfo(
alert_name='Dataset anomalies',
alert_body=(
f'{anomalies.dataset_anomaly_info.description} in split '
f'{split}'),
)
)
# Information about feature-level anomalies, such as "Some examples have
# fewer values than expected."
for feature_name, anomaly_info in anomalies.anomaly_info.items():
result.append(
component_generated_alert_pb2.ComponentGeneratedAlertInfo(
alert_name=anomaly_info.short_description,
alert_body=(
f'{anomaly_info.description} for feature {feature_name} in '
f'split {split}.'),
)
)
return result | Creates an alert for each anomaly in the anomalies artifact. |
166,368 | import contextlib
import functools
import os
import signal
import threading
import time
from typing import Any, Dict, List, Optional
from absl import logging
from tfx import types
from tfx.components.infra_validator import error_types
from tfx.components.infra_validator import request_builder
from tfx.components.infra_validator import serving_bins
from tfx.components.infra_validator import types as iv_types
from tfx.components.infra_validator.model_server_runners import kubernetes_runner
from tfx.components.infra_validator.model_server_runners import local_docker_runner
from tfx.dsl.components.base import base_executor
from tfx.proto import infra_validator_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import path_utils
from tfx.utils import proto_utils
from tfx.utils.model_paths import tf_serving_flavor
from tensorflow_serving.apis import classification_pb2
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_log_pb2
from tensorflow_serving.apis import regression_pb2
The provided code snippet includes necessary dependencies for implementing the `_create_model_server_runner` function. Write a Python function `def _create_model_server_runner( model_path: str, serving_binary: serving_bins.ServingBinary, serving_spec: infra_validator_pb2.ServingSpec)` to solve the following problem:
Create a ModelServerRunner from a model, a ServingBinary and a ServingSpec. Args: model_path: An IV-flavored model path. (See model_path_utils.py) serving_binary: One of ServingBinary instances parsed from the `serving_spec`. serving_spec: A ServingSpec instance of this infra validation. Returns: A ModelServerRunner.
Here is the function:
def _create_model_server_runner(
model_path: str,
serving_binary: serving_bins.ServingBinary,
serving_spec: infra_validator_pb2.ServingSpec):
"""Create a ModelServerRunner from a model, a ServingBinary and a ServingSpec.
Args:
model_path: An IV-flavored model path. (See model_path_utils.py)
serving_binary: One of ServingBinary instances parsed from the
`serving_spec`.
serving_spec: A ServingSpec instance of this infra validation.
Returns:
A ModelServerRunner.
"""
platform = serving_spec.WhichOneof('serving_platform')
if platform == 'local_docker':
return local_docker_runner.LocalDockerRunner(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec
)
elif platform == 'kubernetes':
return kubernetes_runner.KubernetesRunner(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec
)
else:
raise NotImplementedError('Invalid serving_platform {}'.format(platform)) | Create a ModelServerRunner from a model, a ServingBinary and a ServingSpec. Args: model_path: An IV-flavored model path. (See model_path_utils.py) serving_binary: One of ServingBinary instances parsed from the `serving_spec`. serving_spec: A ServingSpec instance of this infra validation. Returns: A ModelServerRunner. |
166,369 | import contextlib
import functools
import os
import signal
import threading
import time
from typing import Any, Dict, List, Optional
from absl import logging
from tfx import types
from tfx.components.infra_validator import error_types
from tfx.components.infra_validator import request_builder
from tfx.components.infra_validator import serving_bins
from tfx.components.infra_validator import types as iv_types
from tfx.components.infra_validator.model_server_runners import kubernetes_runner
from tfx.components.infra_validator.model_server_runners import local_docker_runner
from tfx.dsl.components.base import base_executor
from tfx.proto import infra_validator_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import path_utils
from tfx.utils import proto_utils
from tfx.utils.model_paths import tf_serving_flavor
from tensorflow_serving.apis import classification_pb2
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_log_pb2
from tensorflow_serving.apis import regression_pb2
The provided code snippet includes necessary dependencies for implementing the `_convert_to_prediction_log` function. Write a Python function `def _convert_to_prediction_log(request: iv_types.Request)` to solve the following problem:
Try convert infra validation request to TF-Serving PredictionLog.
Here is the function:
def _convert_to_prediction_log(request: iv_types.Request):
"""Try convert infra validation request to TF-Serving PredictionLog."""
if isinstance(request, classification_pb2.ClassificationRequest):
return prediction_log_pb2.PredictionLog(
classify_log=prediction_log_pb2.ClassifyLog(request=request))
elif isinstance(request, regression_pb2.RegressionRequest):
return prediction_log_pb2.PredictionLog(
regress_log=prediction_log_pb2.RegressLog(request=request))
elif isinstance(request, predict_pb2.PredictRequest):
return prediction_log_pb2.PredictionLog(
predict_log=prediction_log_pb2.PredictLog(request=request))
else:
raise NotImplementedError(
f'Cannot convert {type(request)} to PredictionLog') | Try convert infra validation request to TF-Serving PredictionLog. |
166,370 | import contextlib
import functools
import os
import signal
import threading
import time
from typing import Any, Dict, List, Optional
from absl import logging
from tfx import types
from tfx.components.infra_validator import error_types
from tfx.components.infra_validator import request_builder
from tfx.components.infra_validator import serving_bins
from tfx.components.infra_validator import types as iv_types
from tfx.components.infra_validator.model_server_runners import kubernetes_runner
from tfx.components.infra_validator.model_server_runners import local_docker_runner
from tfx.dsl.components.base import base_executor
from tfx.proto import infra_validator_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import path_utils
from tfx.utils import proto_utils
from tfx.utils.model_paths import tf_serving_flavor
from tensorflow_serving.apis import classification_pb2
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_log_pb2
from tensorflow_serving.apis import regression_pb2
_BLESSED_KEY = 'blessed'
_BLESSED_FILENAME = 'INFRA_BLESSED'
def _mark_blessed(blessing: types.Artifact) -> None:
logging.info('Model passed infra validation.')
io_utils.write_string_file(
os.path.join(blessing.uri, _BLESSED_FILENAME), '')
blessing.set_int_custom_property(_BLESSED_KEY, 1) | null |
166,371 | import contextlib
import functools
import os
import signal
import threading
import time
from typing import Any, Dict, List, Optional
from absl import logging
from tfx import types
from tfx.components.infra_validator import error_types
from tfx.components.infra_validator import request_builder
from tfx.components.infra_validator import serving_bins
from tfx.components.infra_validator import types as iv_types
from tfx.components.infra_validator.model_server_runners import kubernetes_runner
from tfx.components.infra_validator.model_server_runners import local_docker_runner
from tfx.dsl.components.base import base_executor
from tfx.proto import infra_validator_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import path_utils
from tfx.utils import proto_utils
from tfx.utils.model_paths import tf_serving_flavor
from tensorflow_serving.apis import classification_pb2
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_log_pb2
from tensorflow_serving.apis import regression_pb2
_BLESSED_KEY = 'blessed'
_NOT_BLESSED_FILENAME = 'INFRA_NOT_BLESSED'
def _mark_not_blessed(blessing: types.Artifact) -> None:
logging.info('Model failed infra validation.')
io_utils.write_string_file(
os.path.join(blessing.uri, _NOT_BLESSED_FILENAME), '')
blessing.set_int_custom_property(_BLESSED_KEY, 0) | null |
166,372 | import abc
import os
from typing import Any, Dict, List, Optional
from docker import types as docker_types
from tfx.components.infra_validator.model_server_clients import base_client
from tfx.components.infra_validator.model_server_clients import tensorflow_serving_client
from tfx.proto import infra_validator_pb2
from tfx.utils.model_paths import tf_serving_flavor
class TensorFlowServing(ServingBinary):
"""TensorFlow Serving binary."""
_BASE_DOCKER_RUN_PARAMS = {
# Enable auto-removal of the container on docker daemon after container
# process exits.
'auto_remove': True,
# Run container in the background instead of streaming its output.
'detach': True,
# Publish all ports to the host.
'publish_all_ports': True,
}
_DEFAULT_IMAGE_NAME = 'tensorflow/serving'
_DEFAULT_GRPC_PORT = 8500
_DEFAULT_MODEL_BASE_PATH = '/model'
def __init__(
self,
model_name: str,
image_name: Optional[str] = None,
tag: Optional[str] = None,
digest: Optional[str] = None,
):
super().__init__()
self._model_name = model_name
if (tag is None) == (digest is None):
raise ValueError('Exactly one of `tag` or `digest` should be used.')
image_name = image_name or self._DEFAULT_IMAGE_NAME
if tag is not None:
self._image = '{}:{}'.format(image_name, tag)
else:
self._image = '{}@{}'.format(image_name, digest)
def container_port(self) -> int:
return self._DEFAULT_GRPC_PORT
def image(self) -> str:
return self._image
def MakeEnvVars(self, model_path: Optional[str] = None) -> Dict[str, str]:
if model_path is None:
model_base_path = self._DEFAULT_MODEL_BASE_PATH
else:
model_base_path = tf_serving_flavor.parse_model_base_path(model_path)
return {
'MODEL_NAME': self._model_name,
'MODEL_BASE_PATH': model_base_path,
'TF_CPP_MAX_VLOG_LEVEL': '3',
}
def MakeDockerRunParams(self, model_path: str,
needs_mount: bool) -> Dict[str, Any]:
"""Make parameters for docker `client.containers.run`.
Args:
model_path: A path to the model.
needs_mount: If True, model_path will be mounted to the container.
Returns:
A dictionary of docker run parameters.
"""
result = dict(
self._BASE_DOCKER_RUN_PARAMS,
image=self._image)
if needs_mount:
# model_path should be a local directory. In order to make TF Serving see
# the host model path, we need to mount model path volume to the
# container.
assert os.path.isdir(model_path), '{} does not exist'.format(model_path)
container_model_path = tf_serving_flavor.make_model_path(
model_base_path=self._DEFAULT_MODEL_BASE_PATH,
model_name=self._model_name,
version=1)
result.update(
environment=self.MakeEnvVars(),
mounts=[
docker_types.Mount(
type='bind',
target=container_model_path,
source=model_path,
read_only=True)
])
else:
# model_path is presumably a remote URI. TF Serving is able to pickup
# model in remote directly using gfile, so all we need to do is setting
# environment variables correctly.
result.update(
environment=self.MakeEnvVars(model_path=model_path))
return result
def MakeClient(self, endpoint: str) -> base_client.BaseModelServerClient:
return tensorflow_serving_client.TensorFlowServingClient(
endpoint=endpoint, model_name=self._model_name)
The provided code snippet includes necessary dependencies for implementing the `parse_serving_binaries` function. Write a Python function `def parse_serving_binaries( # pylint: disable=invalid-name serving_spec: infra_validator_pb2.ServingSpec) -> List['ServingBinary']` to solve the following problem:
Parse `ServingBinary`s from `ServingSpec`.
Here is the function:
def parse_serving_binaries( # pylint: disable=invalid-name
serving_spec: infra_validator_pb2.ServingSpec) -> List['ServingBinary']:
"""Parse `ServingBinary`s from `ServingSpec`."""
result = []
serving_binary = serving_spec.WhichOneof('serving_binary')
if serving_binary == 'tensorflow_serving':
config = serving_spec.tensorflow_serving
image_name = config.image_name or None
for tag in config.tags:
result.append(TensorFlowServing(image_name=image_name,
model_name=serving_spec.model_name,
tag=tag))
for digest in config.digests:
result.append(TensorFlowServing(image_name=image_name,
model_name=serving_spec.model_name,
digest=digest))
return result
else:
raise ValueError('Invalid serving_binary {}'.format(serving_binary)) | Parse `ServingBinary`s from `ServingSpec`. |
166,373 | import abc
import os
from typing import Any, Iterable, List, Mapping, Optional
from absl import logging
import tensorflow as tf
from tfx import types
from tfx.components.infra_validator import types as iv_types
from tfx.components.util import examples_utils
from tfx.components.util import tfxio_utils
from tfx.dsl.io import fileio
from tfx.proto import example_gen_pb2
from tfx.proto import infra_validator_pb2
from tfx.types import artifact_utils
from tfx.utils import path_utils
from tfx_bsl.tfxio import dataset_options
from tensorflow.python.saved_model import loader_impl
from tensorflow_serving.apis import classification_pb2
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import regression_pb2
_TENSORFLOW_SERVING = 'tensorflow_serving'
_DEFAULT_NUM_EXAMPLES = 1
def _parse_saved_model_signatures( # pylint: disable=invalid-name
model_path: str, tag_set: Iterable[str],
signature_names: Iterable[str]) -> Mapping[str, _SignatureDef]:
"""Parse SignatureDefs of given signature names from SavedModel.
Among one or more MetaGraphDefs in SavedModel, the first one that has all the
tag_set elements is chosen. Selected MetaGraphDef should have signatures for
all given signature names.
Args:
model_path: A path to the SavedModel directory.
tag_set: A set of tags MetaGraphDef should have.
signature_names: A list of signature names to retrieve.
Returns:
A mapping from signature name to SignatureDef.
"""
if not tag_set:
tag_set = {tf.saved_model.SERVING}
logging.info('tag_set is not given. Using %r instead.', tag_set)
if not signature_names:
signature_names = [tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
logging.info('signature_names are not given. Using %r instead.',
signature_names)
loader = loader_impl.SavedModelLoader(model_path)
meta_graph_def = loader.get_meta_graph_def_from_tags(tag_set)
result = {}
for signature_name in signature_names:
if signature_name not in meta_graph_def.signature_def:
raise ValueError('SignatureDef of name {} could not be found in '
'MetaGraphDef'.format(signature_name))
result[signature_name] = meta_graph_def.signature_def[signature_name]
return result
class _TFServingRpcRequestBuilder(_BaseRequestBuilder):
"""RequestBuilder for TF Serving RPC requests.
There are three kinds of request the builder can make:
- ClassificationRequest
- RegressionRequest
- PredictRequest
Types of request to build is determined by inspecting SavedModel and getting
SignatureDef from it. What user can configure is the signature names to use.
To build a ClassificationRequest or a RegressionRequest, logical format of
the record should be TF_EXAMPLE.
To build a PredictRequest, its corresponding SignatureDef should have a single
input argument that accepts serialized record inputs. Its logical format does
not matter as long as user have a correct parsing logic.
"""
def __init__(self, model_name: str, signatures: Mapping[str, _SignatureDef]):
super().__init__()
self._model_name = model_name
self._signatures = signatures
self._examples = []
def examples(self) -> List[tf.train.Example]:
"""Get parsed TF.Examples that the builder has built."""
if not self._examples:
if (self._payload_format !=
example_gen_pb2.PayloadFormat.FORMAT_TF_EXAMPLE):
raise ValueError(
'Data payload format should be FORMAT_TF_EXAMPLE. Got: {}'.format(
example_gen_pb2.PayloadFormat.Name(self._payload_format)))
for record in self._records:
example = tf.train.Example()
example.ParseFromString(record)
self._examples.append(example)
return self._examples
def BuildRequests(self) -> List[iv_types.TensorFlowServingRequest]:
assert self._records, 'Records are empty.'
result = []
for signature_name, signature_def in self._signatures.items():
if signature_def.method_name == tf.saved_model.PREDICT_METHOD_NAME:
result.extend(
self._BuildPredictRequests(
signature_name, self._GetSerializedInputKey(signature_def)))
elif signature_def.method_name == tf.saved_model.CLASSIFY_METHOD_NAME:
result.extend(self._BuildClassificationRequests(signature_name))
elif signature_def.method_name == tf.saved_model.REGRESS_METHOD_NAME:
result.extend(self._BuildRegressionRequests(signature_name))
else:
raise ValueError('Unknown method name {}'.format(
signature_def.method_name))
return result
def _GetSerializedInputKey(self, signature_def: _SignatureDef):
"""Gets key for SignatureDef input that consumes serialized record.
To build a PredictRequest, SignatureDef inputs should have a single input
argument that accepts serialized record inputs. The input TensorSpec should
have dtype=DT_STRING and shape=TensorShape([None]).
Args:
signature_def: A SignatureDef proto message.
Returns:
An input key for the serialized input.
"""
signature_input_keys = list(signature_def.inputs.keys())
if len(signature_input_keys) == 1:
input_key = signature_input_keys[0]
input_spec = signature_def.inputs[input_key]
if (input_spec.dtype == tf.dtypes.string.as_datatype_enum
and input_spec.tensor_shape == tf.TensorShape([None]).as_proto()):
return input_key
# TODO(b/151697719): General Predict method signature support.
raise ValueError(
'Unable to find valid input key from SignatureDef. In order to make '
'PredictRequest, model should define signature that accepts serialized '
'record inputs, i.e. signature with single input whose dtype=DT_STRING '
'and shape=TensorShape([None]).')
def _BuildClassificationRequests(self, signature_name: str):
for example in self.examples:
request = classification_pb2.ClassificationRequest()
request.model_spec.name = self._model_name
request.model_spec.signature_name = signature_name
request.input.example_list.examples.append(example)
yield request
def _BuildRegressionRequests(self, signature_name: str):
for example in self.examples:
request = regression_pb2.RegressionRequest()
request.model_spec.name = self._model_name
request.model_spec.signature_name = signature_name
request.input.example_list.examples.append(example)
yield request
def _BuildPredictRequests(self, signature_name: str,
serialized_input_key: str):
for record in self._records:
request = predict_pb2.PredictRequest()
request.model_spec.name = self._model_name
request.model_spec.signature_name = signature_name
request.inputs[serialized_input_key].CopyFrom(
tf.make_tensor_proto([record]))
yield request
The provided code snippet includes necessary dependencies for implementing the `build_requests` function. Write a Python function `def build_requests( # pylint: disable=invalid-name model_name: str, model: types.Artifact, examples: types.Artifact, request_spec: infra_validator_pb2.RequestSpec) -> List[iv_types.Request]` to solve the following problem:
Build model server requests. Examples artifact will be used as a data source to build requests. Caller should guarantee that the logical format of the Examples artifact should be compatible with request type to build. Args: model_name: A model name that model server recognizes. model: A model artifact for model signature analysis. examples: An `Examples` artifact for request data source. request_spec: A `RequestSpec` config. Returns: A list of request protos.
Here is the function:
def build_requests( # pylint: disable=invalid-name
model_name: str, model: types.Artifact, examples: types.Artifact,
request_spec: infra_validator_pb2.RequestSpec) -> List[iv_types.Request]:
"""Build model server requests.
Examples artifact will be used as a data source to build requests. Caller
should guarantee that the logical format of the Examples artifact should be
compatible with request type to build.
Args:
model_name: A model name that model server recognizes.
model: A model artifact for model signature analysis.
examples: An `Examples` artifact for request data source.
request_spec: A `RequestSpec` config.
Returns:
A list of request protos.
"""
split_name = request_spec.split_name or None
num_examples = request_spec.num_examples or _DEFAULT_NUM_EXAMPLES
kind = request_spec.WhichOneof('kind')
if kind == _TENSORFLOW_SERVING:
spec = request_spec.tensorflow_serving
signatures = _parse_saved_model_signatures(
model_path=path_utils.serving_model_path(
model.uri, path_utils.is_old_model_artifact(model)),
tag_set=spec.tag_set,
signature_names=spec.signature_names)
builder = _TFServingRpcRequestBuilder(
model_name=model_name,
signatures=signatures)
else:
raise NotImplementedError('Unsupported RequestSpec kind {!r}'.format(kind))
builder.ReadExamplesArtifact(
examples,
split_name=split_name,
num_examples=num_examples)
return builder.BuildRequests() | Build model server requests. Examples artifact will be used as a data source to build requests. Caller should guarantee that the logical format of the Examples artifact should be compatible with request type to build. Args: model_name: A model name that model server recognizes. model: A model artifact for model signature analysis. examples: An `Examples` artifact for request data source. request_spec: A `RequestSpec` config. Returns: A list of request protos. |
166,374 | import os
import time
from typing import Any, Dict, Optional
from absl import logging
import docker
from docker import errors as docker_errors
from tfx.components.infra_validator import error_types
from tfx.components.infra_validator import serving_bins
from tfx.components.infra_validator.model_server_runners import base_runner
from tfx.proto import infra_validator_pb2
def _make_docker_client(config: infra_validator_pb2.LocalDockerConfig):
params = {}
if config.client_timeout_seconds:
params['timeout'] = config.client_timeout_seconds
if config.client_base_url:
params['base_url'] = config.client_base_url
if config.client_api_version:
params['version'] = config.client_api_version
return docker.DockerClient(**params) | null |
166,375 | import os
import time
from typing import Any, Dict, Optional
from absl import logging
import docker
from docker import errors as docker_errors
from tfx.components.infra_validator import error_types
from tfx.components.infra_validator import serving_bins
from tfx.components.infra_validator.model_server_runners import base_runner
from tfx.proto import infra_validator_pb2
The provided code snippet includes necessary dependencies for implementing the `_find_host_port` function. Write a Python function `def _find_host_port(ports: Dict[str, Any], container_port: int) -> str` to solve the following problem:
Find host port from container port mappings. `ports` is a nested dictionary of the following structure: { '8500/tcp': [ {'HostIp': '0.0.0.0', 'HostPort': '32769'}, {'HostIp': '::', 'HostPort': '32770'}, ], '8501/tcp': [ {'HostIp': '0.0.0.0', 'HostPort': '32768'}, {'HostIp': '::', 'HostPort': '32771'}, ], } Args: ports: Dictionary of docker container port mapping. container_port: Corresponding container port you're looking for. Returns: A found host port. Raises: ValueError: No corresponding host port was found.
Here is the function:
def _find_host_port(ports: Dict[str, Any], container_port: int) -> str:
"""Find host port from container port mappings.
`ports` is a nested dictionary of the following structure:
{
'8500/tcp': [
{'HostIp': '0.0.0.0', 'HostPort': '32769'},
{'HostIp': '::', 'HostPort': '32770'},
],
'8501/tcp': [
{'HostIp': '0.0.0.0', 'HostPort': '32768'},
{'HostIp': '::', 'HostPort': '32771'},
],
}
Args:
ports: Dictionary of docker container port mapping.
container_port: Corresponding container port you're looking for.
Returns:
A found host port.
Raises:
ValueError: No corresponding host port was found.
"""
mappings = ports.get('{}/tcp'.format(container_port), [])
for mapping in mappings:
if mapping['HostIp'] == '0.0.0.0':
return mapping['HostPort']
else:
raise ValueError(
'No HostPort found for ContainerPort={} (all port mappings: {})'
.format(container_port, ports)) | Find host port from container port mappings. `ports` is a nested dictionary of the following structure: { '8500/tcp': [ {'HostIp': '0.0.0.0', 'HostPort': '32769'}, {'HostIp': '::', 'HostPort': '32770'}, ], '8501/tcp': [ {'HostIp': '0.0.0.0', 'HostPort': '32768'}, {'HostIp': '::', 'HostPort': '32771'}, ], } Args: ports: Dictionary of docker container port mapping. container_port: Corresponding container port you're looking for. Returns: A found host port. Raises: ValueError: No corresponding host port was found. |
166,376 | import datetime
import os
import time
from typing import Optional
from absl import logging
from apache_beam.utils import retry
from kubernetes import client as k8s_client
from kubernetes.client import rest
from tfx.components.infra_validator import error_types
from tfx.components.infra_validator import serving_bins
from tfx.components.infra_validator.model_server_runners import base_runner
from tfx.proto import infra_validator_pb2
from tfx.utils import kube_utils
def _is_subdirectory(maybe_parent: str, maybe_child: str) -> bool:
paren = os.path.realpath(maybe_parent).split(os.path.sep)
child = os.path.realpath(maybe_child).split(os.path.sep)
return len(paren) <= len(child) and all(a == b for a, b in zip(paren, child)) | null |
166,377 | import datetime
import os
import time
from typing import Optional
from absl import logging
from apache_beam.utils import retry
from kubernetes import client as k8s_client
from kubernetes.client import rest
from tfx.components.infra_validator import error_types
from tfx.components.infra_validator import serving_bins
from tfx.components.infra_validator.model_server_runners import base_runner
from tfx.proto import infra_validator_pb2
from tfx.utils import kube_utils
def _get_container_or_error(
pod: k8s_client.V1Pod, container_name: str) -> k8s_client.V1Container:
for container in pod.spec.containers:
if container.name == container_name:
return container
raise ValueError(
'Unable to find {} container from the pod (found {}).'.format(
container_name, [c.name for c in pod.spec.containers])) | null |
166,378 | import datetime
import os
import time
from typing import Optional
from absl import logging
from apache_beam.utils import retry
from kubernetes import client as k8s_client
from kubernetes.client import rest
from tfx.components.infra_validator import error_types
from tfx.components.infra_validator import serving_bins
from tfx.components.infra_validator.model_server_runners import base_runner
from tfx.proto import infra_validator_pb2
from tfx.utils import kube_utils
def _api_exception_retry_filter(exception: Exception):
return isinstance(exception, rest.ApiException) | null |
166,379 | import datetime
import os
import time
from typing import Optional
from absl import logging
from apache_beam.utils import retry
from kubernetes import client as k8s_client
from kubernetes.client import rest
from tfx.components.infra_validator import error_types
from tfx.components.infra_validator import serving_bins
from tfx.components.infra_validator.model_server_runners import base_runner
from tfx.proto import infra_validator_pb2
from tfx.utils import kube_utils
The provided code snippet includes necessary dependencies for implementing the `_convert_to_kube_env` function. Write a Python function `def _convert_to_kube_env( env: infra_validator_pb2.EnvVar) -> k8s_client.V1EnvVar` to solve the following problem:
Convert infra_validator_pb2.EnvVar to kubernetes.V1EnvVar.
Here is the function:
def _convert_to_kube_env(
env: infra_validator_pb2.EnvVar) -> k8s_client.V1EnvVar:
"""Convert infra_validator_pb2.EnvVar to kubernetes.V1EnvVar."""
if not env.name:
raise ValueError('EnvVar.name must be specified.')
if env.HasField('value_from'):
if env.value_from.HasField('secret_key_ref'):
value_source = k8s_client.V1EnvVarSource(
secret_key_ref=k8s_client.V1SecretKeySelector(
name=env.value_from.secret_key_ref.name,
key=env.value_from.secret_key_ref.key))
return k8s_client.V1EnvVar(name=env.name, value_from=value_source)
else:
raise ValueError(f'Bad EnvVar: {env}')
else:
# Note that env.value can be empty.
return k8s_client.V1EnvVar(name=env.name, value=env.value) | Convert infra_validator_pb2.EnvVar to kubernetes.V1EnvVar. |
166,380 | import datetime
import os
import time
from typing import Optional
from absl import logging
from apache_beam.utils import retry
from kubernetes import client as k8s_client
from kubernetes.client import rest
from tfx.components.infra_validator import error_types
from tfx.components.infra_validator import serving_bins
from tfx.components.infra_validator.model_server_runners import base_runner
from tfx.proto import infra_validator_pb2
from tfx.utils import kube_utils
def _convert_to_resource_requirements(
resources: infra_validator_pb2.Resources
) -> k8s_client.V1ResourceRequirements:
if hasattr(k8s_client.V1ResourceRequirements, 'claims'):
return k8s_client.V1ResourceRequirements(
requests=dict(resources.requests),
limits=dict(resources.limits),
claims=dict(resources.claims),
)
else:
return k8s_client.V1ResourceRequirements(
requests=dict(resources.requests),
limits=dict(resources.limits),
) | null |
166,381 | import os
import tensorflow_data_validation as tfdv
from tfx.types import artifact
from tfx.types import artifact_utils
BINARY_PB_BASENAME = 'FeatureStats.pb'
TFRECORD_BASENAME = 'stats_tfrecord'
def load_statistics(stats_artifact: artifact.Artifact,
split: str) -> tfdv.DatasetListView:
stats_dir = artifact_utils.get_split_uri([stats_artifact], split)
if artifact_utils.is_artifact_version_older_than(
stats_artifact, artifact_utils._ARTIFACT_VERSION_FOR_STATS_UPDATE): # pylint: disable=protected-access
stats = tfdv.load_statistics(os.path.join(stats_dir, TFRECORD_BASENAME))
else:
stats = tfdv.load_stats_binary(os.path.join(stats_dir, BINARY_PB_BASENAME))
return tfdv.DatasetListView(stats) | null |
166,382 | from typing import Optional
from tfx import types
from tfx.components.experimental.data_view import constants
from tfx.types import standard_artifacts
The provided code snippet includes necessary dependencies for implementing the `get_data_view_uri` function. Write a Python function `def get_data_view_uri(examples: types.Artifact) -> Optional[str]` to solve the following problem:
Returns the URI to the DataView attached to an Examples artifact. Or None, if not attached. Args: examples: an Examples artifact. Returns: The URI to the DataView or None.
Here is the function:
def get_data_view_uri(examples: types.Artifact) -> Optional[str]:
"""Returns the URI to the DataView attached to an Examples artifact.
Or None, if not attached.
Args:
examples: an Examples artifact.
Returns:
The URI to the DataView or None.
"""
assert examples.type is standard_artifacts.Examples, (
'examples must be of type standard_artifacts.Examples')
data_view_uri = examples.get_string_custom_property(
constants.DATA_VIEW_URI_PROPERTY_KEY)
return data_view_uri if data_view_uri else None | Returns the URI to the DataView attached to an Examples artifact. Or None, if not attached. Args: examples: an Examples artifact. Returns: The URI to the DataView or None. |
166,383 | import os
from typing import Any, Dict, List
from absl import logging
import apache_beam as beam
import tensorflow as tf
from tensorflow_data_validation.skew import feature_skew_detector
from tfx import types
from tfx.components.util import tfxio_utils
from tfx.dsl.components.base import base_beam_executor
from tfx.proto import example_diff_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import json_utils
from tfx_bsl.tfxio import record_based_tfxio
def _parse_example(serialized: bytes):
# TODO(b/227361696): Validate that data are examples.
ex = tf.train.Example()
ex.ParseFromString(serialized)
return ex | null |
166,384 | import os
from typing import Any, Dict, List
from absl import logging
import apache_beam as beam
import tensorflow as tf
from tensorflow_data_validation.skew import feature_skew_detector
from tfx import types
from tfx.components.util import tfxio_utils
from tfx.dsl.components.base import base_beam_executor
from tfx.proto import example_diff_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import json_utils
from tfx_bsl.tfxio import record_based_tfxio
def _get_confusion_configs(
config: example_diff_pb2.ExampleDiffConfig
) -> List[feature_skew_detector.ConfusionConfig]:
result = []
for confusion in config.paired_example_skew.confusion_config:
result.append(feature_skew_detector.ConfusionConfig(confusion.feature_name))
return result
The provided code snippet includes necessary dependencies for implementing the `_config_to_kwargs` function. Write a Python function `def _config_to_kwargs(config: example_diff_pb2.ExampleDiffConfig)` to solve the following problem:
Convert ExampleDiffConfig to DetectFeatureSkewImpl kwargs.
Here is the function:
def _config_to_kwargs(config: example_diff_pb2.ExampleDiffConfig):
"""Convert ExampleDiffConfig to DetectFeatureSkewImpl kwargs."""
kwargs = {}
if not config.HasField('paired_example_skew'):
raise ValueError('ExampleDiffConfig missing required paired_example_skew.')
kwargs['identifier_features'] = list(
config.paired_example_skew.identifier_features)
kwargs['features_to_ignore'] = list(
config.paired_example_skew.ignore_features)
kwargs['sample_size'] = config.paired_example_skew.skew_sample_size
kwargs['float_round_ndigits'] = config.paired_example_skew.float_round_ndigits
kwargs[
'allow_duplicate_identifiers'] = config.paired_example_skew.allow_duplicate_identifiers
# TODO(b/227361696): Add better unit tests here. This will require generating
# a new dataset for test purposes.
kwargs['confusion_configs'] = _get_confusion_configs(config)
return kwargs | Convert ExampleDiffConfig to DetectFeatureSkewImpl kwargs. |
166,385 | import hashlib
import os
import re
import shutil
import struct
import subprocess
import sys
import tempfile
from typing import Any, Callable, Dict, List, Optional, Tuple
from absl import logging
from tfx.dsl.components.base import base_component
from tfx.dsl.io import fileio
from tfx.utils import import_utils
from tfx.utils import io_utils
def get_fn(exec_properties: Dict[str, Any], fn_name: str) -> Callable[..., Any]:
"""Loads and returns user-defined function."""
logging.info('udf_utils.get_fn %r %r', exec_properties, fn_name)
has_module_file = bool(exec_properties.get(_MODULE_FILE_KEY))
has_module_path = bool(exec_properties.get(_MODULE_PATH_KEY))
has_fn = bool(exec_properties.get(fn_name))
if has_module_path:
module_path = exec_properties[_MODULE_PATH_KEY]
return import_utils.import_func_from_module(module_path, fn_name)
elif has_module_file:
if has_fn:
return import_utils.import_func_from_source(
exec_properties[_MODULE_FILE_KEY], exec_properties[fn_name])
else:
return import_utils.import_func_from_source(
exec_properties[_MODULE_FILE_KEY], fn_name)
elif has_fn:
fn_path_split = exec_properties[fn_name].split('.')
return import_utils.import_func_from_module('.'.join(fn_path_split[0:-1]),
fn_path_split[-1])
else:
raise ValueError(
'Neither module file or user function have been supplied in `exec_properties`.'
)
The provided code snippet includes necessary dependencies for implementing the `try_get_fn` function. Write a Python function `def try_get_fn(exec_properties: Dict[str, Any], fn_name: str) -> Optional[Callable[..., Any]]` to solve the following problem:
Loads and returns user-defined function if exists.
Here is the function:
def try_get_fn(exec_properties: Dict[str, Any],
fn_name: str) -> Optional[Callable[..., Any]]:
"""Loads and returns user-defined function if exists."""
try:
return get_fn(exec_properties, fn_name)
except (ValueError, AttributeError):
# ValueError: module file or user function is unset.
# AttributeError: the function doesn't exist in the module.
return None | Loads and returns user-defined function if exists. |
166,386 | import hashlib
import os
import re
import shutil
import struct
import subprocess
import sys
import tempfile
from typing import Any, Callable, Dict, List, Optional, Tuple
from absl import logging
from tfx.dsl.components.base import base_component
from tfx.dsl.io import fileio
from tfx.utils import import_utils
from tfx.utils import io_utils
The provided code snippet includes necessary dependencies for implementing the `should_package_user_modules` function. Write a Python function `def should_package_user_modules()` to solve the following problem:
Whether to package user modules in the current execution environment.
Here is the function:
def should_package_user_modules():
"""Whether to package user modules in the current execution environment."""
if os.environ.get('UNSUPPORTED_DO_NOT_PACKAGE_USER_MODULES'):
return False
return True | Whether to package user modules in the current execution environment. |
166,387 | import hashlib
import os
import re
import shutil
import struct
import subprocess
import sys
import tempfile
from typing import Any, Callable, Dict, List, Optional, Tuple
from absl import logging
from tfx.dsl.components.base import base_component
from tfx.dsl.io import fileio
from tfx.utils import import_utils
from tfx.utils import io_utils
class UserModuleFilePipDependency(base_component._PipDependencyFuture): # pylint: disable=protected-access
"""Specification of a user module dependency."""
def __init__(self, component: base_component.BaseComponent,
module_file_key: str, module_path_key: str):
self.component = component
self.module_file_key = module_file_key
self.module_path_key = module_path_key
def resolve(self, pipeline_root: str):
# Package the given user module file as a Python wheel.
module_file = self.component.spec.exec_properties[self.module_file_key]
# Perform validation on the given `module_file`.
if not module_file:
return None
elif not isinstance(module_file, str):
# TODO(b/187753042): Deprecate and remove usage of RuntimeParameters for
# `module_file` parameters and remove this code path.
logging.warning(
'Module file %r for component %s is not a path string; '
'skipping Python user module wheel packaging.', module_file,
self.component)
return None
elif not fileio.exists(module_file):
raise ValueError(f'Specified module file {module_file!r} '
f'for component {self.component} does not exist.')
# Perform validation on the `pipeline_root`.
if not pipeline_root:
logging.warning(
'No pipeline root provided; skipping Python user module '
'wheel packaging for component %s.', self.component)
return None
pipeline_root_exists = fileio.exists(pipeline_root)
if not pipeline_root_exists:
fileio.makedirs(pipeline_root)
# Perform packaging of the user module.
dist_file_path, user_module_path = package_user_module_file(
self.component.id, module_file, pipeline_root)
# Set the user module key to point to a module in this wheel, and clear the
# module path key before returning.
self.component.spec.exec_properties[self.module_path_key] = user_module_path
self.component.spec.exec_properties[self.module_file_key] = None
return dist_file_path
The provided code snippet includes necessary dependencies for implementing the `add_user_module_dependency` function. Write a Python function `def add_user_module_dependency(component: base_component.BaseComponent, module_file_key: str, module_path_key: str) -> None` to solve the following problem:
Adds a module file dependency to the current component.
Here is the function:
def add_user_module_dependency(component: base_component.BaseComponent,
module_file_key: str,
module_path_key: str) -> None:
"""Adds a module file dependency to the current component."""
dependency = UserModuleFilePipDependency(component, module_file_key,
module_path_key)
component._add_pip_dependency(dependency) # pylint: disable=protected-access | Adds a module file dependency to the current component. |
166,388 | import hashlib
import os
import re
import shutil
import struct
import subprocess
import sys
import tempfile
from typing import Any, Callable, Dict, List, Optional, Tuple
from absl import logging
from tfx.dsl.components.base import base_component
from tfx.dsl.io import fileio
from tfx.utils import import_utils
from tfx.utils import io_utils
_EPHEMERAL_SETUP_PY_FILE_NAME = '_tfx_generated_setup.py'
def _get_ephemeral_setup_py_contents(package_name: str, version_string: str,
module_names: List[str]):
return f"""import setuptools
setuptools.setup(
name={repr(package_name)},
version={repr(version_string)},
author='TFX User',
author_email='nobody@example.com',
description='Auto-generated TFX user code package.',
py_modules={repr(module_names)},
classifiers=[],
python_requires='>=3.6',
)
"""
def _get_version_hash(user_module_dir: str, source_files: List[str]) -> str:
"""Compute a version hash based on user module directory contents."""
source_files = sorted(source_files)
h = hashlib.sha256()
for source_file in source_files:
source_file_name_bytes = source_file.encode('utf-8')
h.update(struct.pack('>Q', len(source_file_name_bytes)))
h.update(source_file_name_bytes)
with open(os.path.join(user_module_dir, source_file), 'rb') as f:
file_contents = f.read()
h.update(struct.pack('>Q', len(file_contents)))
h.update(file_contents)
return h.hexdigest()
The provided code snippet includes necessary dependencies for implementing the `package_user_module_file` function. Write a Python function `def package_user_module_file(instance_name: str, module_path: str, pipeline_root: str) -> Tuple[str, str]` to solve the following problem:
Package the given user module file into a Python Wheel package. Args: instance_name: Name of the component instance, for creating a unique wheel package name. module_path: Path to the module file to be packaged. pipeline_root: Text Returns: dist_file_path: Path to the generated wheel file. user_module_path: Path for referencing the user module when stored as the _MODULE_PATH_KEY execution property. Format should be treated as opaque by the user. Raises: RuntimeError: When wheel building fails.
Here is the function:
def package_user_module_file(instance_name: str, module_path: str,
pipeline_root: str) -> Tuple[str, str]:
"""Package the given user module file into a Python Wheel package.
Args:
instance_name: Name of the component instance, for creating a unique wheel
package name.
module_path: Path to the module file to be packaged.
pipeline_root: Text
Returns:
dist_file_path: Path to the generated wheel file.
user_module_path: Path for referencing the user module when stored
as the _MODULE_PATH_KEY execution property. Format should be treated
as opaque by the user.
Raises:
RuntimeError: When wheel building fails.
"""
module_path = os.path.abspath(io_utils.ensure_local(module_path))
if not module_path.endswith('.py'):
raise ValueError(f'Module path {module_path!r} is not a ".py" file.')
if not os.path.exists(module_path):
raise ValueError(f'Module path {module_path!r} does not exist.')
user_module_dir, module_file_name = os.path.split(module_path)
user_module_name = re.sub(r'\.py$', '', module_file_name)
source_files = []
# Discover all Python source files in this directory for inclusion.
for file_name in os.listdir(user_module_dir):
if file_name.endswith('.py'):
source_files.append(file_name)
module_names = []
for file_name in source_files:
if file_name in (_EPHEMERAL_SETUP_PY_FILE_NAME, '__init__.py'):
continue
module_name = re.sub(r'\.py$', '', file_name)
module_names.append(module_name)
# Set up build directory.
build_dir = tempfile.mkdtemp()
for source_file in source_files:
shutil.copyfile(
os.path.join(user_module_dir, source_file),
os.path.join(build_dir, source_file))
# Generate an ephemeral wheel for this module.
logging.info(
'Generating ephemeral wheel package for %r (including modules: %s).',
module_path, module_names)
version_hash = _get_version_hash(user_module_dir, source_files)
logging.info('User module package has hash fingerprint version %s.',
version_hash)
setup_py_path = os.path.join(build_dir, _EPHEMERAL_SETUP_PY_FILE_NAME)
with open(setup_py_path, 'w', encoding='utf-8') as f:
f.write(
_get_ephemeral_setup_py_contents(f'tfx-user-code-{instance_name}',
f'0.0+{version_hash}', module_names))
temp_dir = tempfile.mkdtemp()
dist_dir = tempfile.mkdtemp()
bdist_command = [
sys.executable, setup_py_path, 'bdist_wheel', '--bdist-dir', temp_dir,
'--dist-dir', dist_dir
]
logging.info('Executing: %s', bdist_command)
try:
subprocess.check_call(bdist_command, cwd=build_dir)
except subprocess.CalledProcessError as e:
raise RuntimeError('Failed to build wheel.') from e
dist_files = os.listdir(dist_dir)
if len(dist_files) != 1:
raise RuntimeError(f'Unexpectedly found {len(dist_files)} output files '
f'in wheel output directory {dist_dir}.')
build_dist_file_path = os.path.join(dist_dir, dist_files[0])
# Copy wheel file atomically to wheel staging directory.
dist_wheel_directory = os.path.join(pipeline_root, '_wheels')
dist_file_path = os.path.join(dist_wheel_directory, dist_files[0])
temp_dist_file_path = dist_file_path + '.tmp'
fileio.makedirs(dist_wheel_directory)
fileio.copy(build_dist_file_path, temp_dist_file_path, overwrite=True)
fileio.rename(temp_dist_file_path, dist_file_path, overwrite=True)
logging.info(
('Successfully built user code wheel distribution at %r; target user '
'module is %r.'), dist_file_path, user_module_name)
# Encode the user module key as a specification of a user module name within
# a packaged wheel path.
assert '@' not in user_module_name, ('Unexpected invalid module name: ' +
user_module_name)
user_module_path = f'{user_module_name}@{dist_file_path}'
logging.info('Full user module path is %r', user_module_path)
return dist_file_path, user_module_path | Package the given user module file into a Python Wheel package. Args: instance_name: Name of the component instance, for creating a unique wheel package name. module_path: Path to the module file to be packaged. pipeline_root: Text Returns: dist_file_path: Path to the generated wheel file. user_module_path: Path for referencing the user module when stored as the _MODULE_PATH_KEY execution property. Format should be treated as opaque by the user. Raises: RuntimeError: When wheel building fails. |
166,389 | import hashlib
import os
import re
import shutil
import struct
import subprocess
import sys
import tempfile
from typing import Any, Callable, Dict, List, Optional, Tuple
from absl import logging
from tfx.dsl.components.base import base_component
from tfx.dsl.io import fileio
from tfx.utils import import_utils
from tfx.utils import io_utils
The provided code snippet includes necessary dependencies for implementing the `decode_user_module_key` function. Write a Python function `def decode_user_module_key(user_module_key: str) -> Tuple[str, List[str]]` to solve the following problem:
Decode the given user module key into module path and pip dependencies.
Here is the function:
def decode_user_module_key(user_module_key: str) -> Tuple[str, List[str]]:
"""Decode the given user module key into module path and pip dependencies."""
if user_module_key and '@' in user_module_key:
user_module_name, dist_file_path = user_module_key.split('@', maxsplit=1)
return user_module_name, [dist_file_path]
else:
return user_module_key, [] | Decode the given user module key into module path and pip dependencies. |
166,390 | import hashlib
import os
import re
import shutil
import struct
import subprocess
import sys
import tempfile
from typing import Any, Callable, Dict, List, Optional, Tuple
from absl import logging
from tfx.dsl.components.base import base_component
from tfx.dsl.io import fileio
from tfx.utils import import_utils
from tfx.utils import io_utils
The provided code snippet includes necessary dependencies for implementing the `install_to_temp_directory` function. Write a Python function `def install_to_temp_directory(pip_dependency: str, temp_dir: Optional[str] = None) -> str` to solve the following problem:
Install the given pip dependency specifier to a temporary directory. Args: pip_dependency: Path to a wheel file or a pip dependency specifier (e.g. "setuptools==18.0"). temp_dir: Path to temporary installation location (optional). Returns: Temporary directory where the package was installed, that should be added to the Python import path.
Here is the function:
def install_to_temp_directory(pip_dependency: str,
temp_dir: Optional[str] = None) -> str:
"""Install the given pip dependency specifier to a temporary directory.
Args:
pip_dependency: Path to a wheel file or a pip dependency specifier (e.g.
"setuptools==18.0").
temp_dir: Path to temporary installation location (optional).
Returns:
Temporary directory where the package was installed, that should be added
to the Python import path.
"""
logging.info('Installing %r to a temporary directory.', pip_dependency)
if not temp_dir:
temp_dir = tempfile.mkdtemp()
install_command = [
sys.executable, '-m', 'pip', 'install', '--target', temp_dir,
pip_dependency
]
logging.info('Executing: %s', install_command)
subprocess.check_call(install_command)
logging.info('Successfully installed %r.', pip_dependency)
return temp_dir | Install the given pip dependency specifier to a temporary directory. Args: pip_dependency: Path to a wheel file or a pip dependency specifier (e.g. "setuptools==18.0"). temp_dir: Path to temporary installation location (optional). Returns: Temporary directory where the package was installed, that should be added to the Python import path. |
166,391 | import json
import os
from typing import Dict, List, Optional, Tuple
from absl import logging
from tfx import types
from tfx.components.example_gen import utils as example_gen_utils
from tfx.proto import example_gen_pb2
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.utils import io_utils
def get_payload_format(examples: types.Artifact) -> int:
"""Returns the payload format of Examples artifact.
If Examples artifact does not contain the "payload_format" custom property,
it is made before tfx supports multiple payload format, and can regard as
tf.Example format.
Args:
examples: A standard_artifacts.Examples artifact.
Returns:
payload_format: One of the enums in example_gen_pb2.PayloadFormat.
"""
assert examples.type_name == standard_artifacts.Examples.TYPE_NAME, (
'examples must be of type standard_artifacts.Examples')
if examples.has_custom_property(
example_gen_utils.PAYLOAD_FORMAT_PROPERTY_NAME):
return example_gen_pb2.PayloadFormat.Value(
examples.get_string_custom_property(
example_gen_utils.PAYLOAD_FORMAT_PROPERTY_NAME))
else:
logging.warning('Examples artifact does not have %s custom property. '
'Falling back to %s',
example_gen_utils.PAYLOAD_FORMAT_PROPERTY_NAME,
example_gen_pb2.PayloadFormat.Name(_DEFAULT_PAYLOAD_FORMAT))
return _DEFAULT_PAYLOAD_FORMAT
The provided code snippet includes necessary dependencies for implementing the `get_payload_format_string` function. Write a Python function `def get_payload_format_string(examples: types.Artifact) -> str` to solve the following problem:
Returns the payload format as a string.
Here is the function:
def get_payload_format_string(examples: types.Artifact) -> str:
"""Returns the payload format as a string."""
return example_gen_pb2.PayloadFormat.Name(get_payload_format(examples)) | Returns the payload format as a string. |
166,392 | import json
import os
from typing import Dict, List, Optional, Tuple
from absl import logging
from tfx import types
from tfx.components.example_gen import utils as example_gen_utils
from tfx.proto import example_gen_pb2
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.utils import io_utils
The provided code snippet includes necessary dependencies for implementing the `set_payload_format` function. Write a Python function `def set_payload_format(examples: types.Artifact, payload_format: int)` to solve the following problem:
Sets the payload format custom property for `examples`. Args: examples: A standard_artifacts.Examples artifact. payload_format: One of the enums in example_gen_pb2.PayloadFormat.
Here is the function:
def set_payload_format(examples: types.Artifact, payload_format: int):
"""Sets the payload format custom property for `examples`.
Args:
examples: A standard_artifacts.Examples artifact.
payload_format: One of the enums in example_gen_pb2.PayloadFormat.
"""
assert examples.type_name == standard_artifacts.Examples.TYPE_NAME, (
'examples must be of type standard_artifacts.Examples')
examples.set_string_custom_property(
example_gen_utils.PAYLOAD_FORMAT_PROPERTY_NAME,
example_gen_pb2.PayloadFormat.Name(payload_format)) | Sets the payload format custom property for `examples`. Args: examples: A standard_artifacts.Examples artifact. payload_format: One of the enums in example_gen_pb2.PayloadFormat. |
166,393 | import json
import os
from typing import Dict, List, Optional, Tuple
from absl import logging
from tfx import types
from tfx.components.example_gen import utils as example_gen_utils
from tfx.proto import example_gen_pb2
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.utils import io_utils
The provided code snippet includes necessary dependencies for implementing the `set_file_format` function. Write a Python function `def set_file_format(examples: types.Artifact, file_format: str)` to solve the following problem:
Sets the file format custom property for `examples`. Args: examples: A standard_artifacts.Examples artifact. file_format: One of the file format that tfx_bsl understands.
Here is the function:
def set_file_format(examples: types.Artifact, file_format: str):
"""Sets the file format custom property for `examples`.
Args:
examples: A standard_artifacts.Examples artifact.
file_format: One of the file format that tfx_bsl understands.
"""
assert examples.type_name == standard_artifacts.Examples.TYPE_NAME, (
'examples must be of type standard_artifacts.Examples')
examples.set_string_custom_property(
example_gen_utils.FILE_FORMAT_PROPERTY_NAME, file_format) | Sets the file format custom property for `examples`. Args: examples: A standard_artifacts.Examples artifact. file_format: One of the file format that tfx_bsl understands. |
166,394 | import json
import os
from typing import Dict, List, Optional, Tuple
from absl import logging
from tfx import types
from tfx.components.example_gen import utils as example_gen_utils
from tfx.proto import example_gen_pb2
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.utils import io_utils
CUSTOM_SPLIT_PATTERN_PROPERTY_NAME = 'custom_split_pattern'
The provided code snippet includes necessary dependencies for implementing the `get_custom_split_patterns_key_and_property` function. Write a Python function `def get_custom_split_patterns_key_and_property( split_to_pattern: Dict[str, str] ) -> Tuple[str, str]` to solve the following problem:
Get a custom property name and value encoding custom split patterns. Args: split_to_pattern: A dictionary mapping split names to file patterns. These patterns should be relative to the artifact's uri, which is expected to be an ancestor directory of split patterns. Returns: A tuple consisting of a property name and value appropriate for artifact.set_string_custom_property.
Here is the function:
def get_custom_split_patterns_key_and_property(
split_to_pattern: Dict[str, str]
) -> Tuple[str, str]:
"""Get a custom property name and value encoding custom split patterns.
Args:
split_to_pattern: A dictionary mapping split names to file patterns. These
patterns should be relative to the artifact's uri, which is expected to be
an ancestor directory of split patterns.
Returns:
A tuple consisting of a property name and value appropriate for
artifact.set_string_custom_property.
"""
return CUSTOM_SPLIT_PATTERN_PROPERTY_NAME, json.dumps(split_to_pattern) | Get a custom property name and value encoding custom split patterns. Args: split_to_pattern: A dictionary mapping split names to file patterns. These patterns should be relative to the artifact's uri, which is expected to be an ancestor directory of split patterns. Returns: A tuple consisting of a property name and value appropriate for artifact.set_string_custom_property. |
166,395 | import logging
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union
import pyarrow as pa
import tensorflow as tf
from tfx.components.experimental.data_view import constants
from tfx.components.util import examples_utils
from tfx.proto import example_gen_pb2
from tfx.types import artifact
from tfx.types import standard_artifacts
from tfx_bsl.tfxio import dataset_options
from tfx_bsl.tfxio import parquet_tfxio
from tfx_bsl.tfxio import raw_tf_record
from tfx_bsl.tfxio import record_to_tensor_tfxio
from tfx_bsl.tfxio import tf_example_record
from tfx_bsl.tfxio import tf_sequence_example_record
from tfx_bsl.tfxio import tfxio
from tensorflow_metadata.proto.v0 import schema_pb2
def resolve_payload_format_and_data_view_uri(
examples: List[artifact.Artifact]) -> Tuple[int, Optional[str]]:
"""Resolves the payload format and a DataView URI for given artifacts.
This routine make sure that the provided list of Examples artifacts are of
the same payload type, and if their payload type is FORMAT_PROTO, it resolves
one DataView (if applicable) to be used to access the data in all the
artifacts in a consistent way (i.e. the RecordBatches from those artifacts
will have the same schema).
Args:
examples: A list of Examples artifact.
Returns:
A pair. The first term is the payload format (a value in
example_gen_pb2.PayloadFormat enum); the second term is the URI to the
resolved DataView (could be None, if the examples are not FORMAT_PROTO,
or they are all FORMAT_PROTO, but all do not have a DataView attached).
Raises:
ValueError: if not all artifacts are of the same payload format, or
if they are all of FORMAT_PROTO but some (but not all) of them do not
have a DataView attached.
"""
assert examples, 'At least one Examples artifact is needed.'
payload_format = _get_payload_format(examples)
if payload_format != example_gen_pb2.PayloadFormat.FORMAT_PROTO:
# Only FORMAT_PROTO may have DataView attached.
return payload_format, None
data_view_infos = []
for examples_artifact in examples:
data_view_infos.append(_get_data_view_info(examples_artifact))
# All the artifacts do not have DataView attached -- this is allowed. The
# caller may be requesting to read the data as raw string records.
if all([i is None for i in data_view_infos]):
return payload_format, None
# All the artifacts have a DataView attached -- resolve to the latest
# DataView (the one with the largest create time). This will guarantee that
# the RecordBatch read from each artifact will share the same Arrow schema
# (and thus Tensors fed to TF graphs, if applicable). The DataView will need
# to guarantee backward compatibility with older spans. Usually the DataView
# is a struct2tensor query, so such guarantee is provided by protobuf
# (as long as the user follows the basic principles of making changes to
# the proto).
if all([i is not None for i in data_view_infos]):
return payload_format, max(data_view_infos, key=lambda pair: pair[1])[0]
violating_artifacts = [
e for e, i in zip(examples, data_view_infos) if i is None]
raise ValueError(
'Unable to resolve a DataView for the Examples Artifacts '
'provided -- some Artifacts did not have DataView attached: {}'
.format(violating_artifacts))
def get_file_format_and_patterns(
examples: List[artifact.Artifact],
split: str,
) -> Tuple[List[str], List[str]]:
"""Get aligned lists of file patterns and formats for Examples artifacts."""
file_patterns, file_formats = [], []
for examples_artifact in examples:
file_pattern = examples_utils.get_split_file_patterns(
[examples_artifact], split
)
assert len(file_pattern) == 1
file_patterns.append(file_pattern[0])
file_formats.append(examples_utils.get_file_format(examples_artifact))
return file_patterns, file_formats
def make_tfxio(
file_pattern: OneOrMorePatterns,
telemetry_descriptors: List[str],
payload_format: int,
data_view_uri: Optional[str] = None,
schema: Optional[schema_pb2.Schema] = None,
read_as_raw_records: bool = False,
raw_record_column_name: Optional[str] = None,
file_format: Optional[Union[int, List[int], str, List[str]]] = None
) -> tfxio.TFXIO:
"""Creates a TFXIO instance that reads `file_pattern`.
Args:
file_pattern: the file pattern for the TFXIO to access.
telemetry_descriptors: A set of descriptors that identify the component that
is instantiating the TFXIO. These will be used to construct the namespace
to contain metrics for profiling and are therefore expected to be
identifiers of the component itself and not individual instances of source
use.
payload_format: one of the enums from example_gen_pb2.PayloadFormat (may be
in string or int form). If None, default to FORMAT_TF_EXAMPLE.
data_view_uri: uri to a DataView artifact. A DataView is needed in order to
create a TFXIO for certain payload formats.
schema: TFMD schema. Note: although optional, some payload formats need a
schema in order for all TFXIO interfaces (e.g. TensorAdapter()) to work.
Unless you know what you are doing, always supply a schema.
read_as_raw_records: If True, ignore the payload type of `examples`. Always
use RawTfRecord TFXIO.
raw_record_column_name: If provided, the arrow RecordBatch produced by the
TFXIO will contain a string column of the given name, and the contents of
that column will be the raw records. Note that not all TFXIO supports this
option, and an error will be raised in that case. Required if
read_as_raw_records == True.
file_format: file format for each file_pattern. Only 'tfrecords_gzip' and
'parquet' are supported for now.
Returns:
a TFXIO instance.
"""
if not isinstance(payload_format, int):
payload_format = example_gen_pb2.PayloadFormat.Value(payload_format)
if file_format is not None:
if type(file_format) is not type(file_pattern):
raise ValueError(
f'The type of file_pattern and file_formats should be the same.'
f'Given: file_pattern={file_pattern}, file_format={file_format}')
if isinstance(file_format, list):
if len(file_format) != len(file_pattern):
raise ValueError(
f'The length of file_pattern and file_formats should be the same.'
f'Given: file_pattern={file_pattern}, file_format={file_format}')
else:
file_format = [_file_format_from_string(item) for item in file_format]
if any(item not in _SUPPORTED_FILE_FORMATS for item in file_format):
raise NotImplementedError(f'{file_format} is not supported yet.')
else: # file_format is str type.
file_format = _file_format_from_string(file_format)
if file_format not in _SUPPORTED_FILE_FORMATS:
raise NotImplementedError(f'{file_format} is not supported yet.')
if read_as_raw_records:
assert raw_record_column_name is not None, (
'read_as_raw_records is specified - '
'must provide raw_record_column_name')
return raw_tf_record.RawTfRecordTFXIO(
file_pattern=file_pattern,
raw_record_column_name=raw_record_column_name,
telemetry_descriptors=telemetry_descriptors)
if payload_format == example_gen_pb2.PayloadFormat.FORMAT_TF_EXAMPLE:
return tf_example_record.TFExampleRecord(
file_pattern=file_pattern,
schema=schema,
raw_record_column_name=raw_record_column_name,
telemetry_descriptors=telemetry_descriptors)
if (payload_format ==
example_gen_pb2.PayloadFormat.FORMAT_TF_SEQUENCE_EXAMPLE):
return tf_sequence_example_record.TFSequenceExampleRecord(
file_pattern=file_pattern,
schema=schema,
raw_record_column_name=raw_record_column_name,
telemetry_descriptors=telemetry_descriptors)
if payload_format == example_gen_pb2.PayloadFormat.FORMAT_PROTO:
assert data_view_uri is not None, (
'Accessing FORMAT_PROTO requires a DataView to parse the proto.')
return record_to_tensor_tfxio.TFRecordToTensorTFXIO(
file_pattern=file_pattern,
saved_decoder_path=data_view_uri,
telemetry_descriptors=telemetry_descriptors,
raw_record_column_name=raw_record_column_name)
if payload_format == example_gen_pb2.PayloadFormat.FORMAT_PARQUET:
return parquet_tfxio.ParquetTFXIO(
file_pattern=file_pattern,
schema=schema,
telemetry_descriptors=telemetry_descriptors)
raise NotImplementedError(
'Unsupport payload format: {}'.format(payload_format))
The provided code snippet includes necessary dependencies for implementing the `get_split_tfxio` function. Write a Python function `def get_split_tfxio( examples: List[artifact.Artifact], split: str, telemetry_descriptors: List[str], schema: Optional[schema_pb2.Schema] = None, read_as_raw_records: bool = False, raw_record_column_name: Optional[str] = None, ) -> tfxio.TFXIO` to solve the following problem:
Returns a TFXIO for a single split. Args: examples: The Examples artifacts that the TFXIO is intended to access. split: The split to read. Must be a split contained in examples. telemetry_descriptors: A set of descriptors that identify the component that is instantiating the TFXIO. These will be used to construct the namespace to contain metrics for profiling and are therefore expected to be identifiers of the component itself and not individual instances of source use. schema: TFMD schema. Note that without a schema, some TFXIO interfaces in certain TFXIO implementations might not be available. read_as_raw_records: If True, ignore the payload type of `examples`. Always use RawTfRecord TFXIO. raw_record_column_name: If provided, the arrow RecordBatch produced by the TFXIO will contain a string column of the given name, and the contents of that column will be the raw records. Note that not all TFXIO supports this option, and an error will be raised in that case. Required if read_as_raw_records == True. Returns: A function that takes a file pattern as input and returns a TFXIO instance. Raises: NotImplementedError: when given an unsupported example payload type.
Here is the function:
def get_split_tfxio(
examples: List[artifact.Artifact],
split: str,
telemetry_descriptors: List[str],
schema: Optional[schema_pb2.Schema] = None,
read_as_raw_records: bool = False,
raw_record_column_name: Optional[str] = None,
) -> tfxio.TFXIO:
"""Returns a TFXIO for a single split.
Args:
examples: The Examples artifacts that the TFXIO is intended to access.
split: The split to read. Must be a split contained in examples.
telemetry_descriptors: A set of descriptors that identify the component that
is instantiating the TFXIO. These will be used to construct the namespace
to contain metrics for profiling and are therefore expected to be
identifiers of the component itself and not individual instances of source
use.
schema: TFMD schema. Note that without a schema, some TFXIO interfaces in
certain TFXIO implementations might not be available.
read_as_raw_records: If True, ignore the payload type of `examples`. Always
use RawTfRecord TFXIO.
raw_record_column_name: If provided, the arrow RecordBatch produced by the
TFXIO will contain a string column of the given name, and the contents of
that column will be the raw records. Note that not all TFXIO supports this
option, and an error will be raised in that case. Required if
read_as_raw_records == True.
Returns:
A function that takes a file pattern as input and returns a TFXIO
instance.
Raises:
NotImplementedError: when given an unsupported example payload type.
"""
payload_format, data_view_uri = resolve_payload_format_and_data_view_uri(
examples
)
file_patterns, file_formats = get_file_format_and_patterns(examples, split)
logging.info('Reading from pattern %s for split %s', file_patterns, split)
return make_tfxio(
file_pattern=file_patterns,
file_format=file_formats,
telemetry_descriptors=telemetry_descriptors,
payload_format=payload_format,
data_view_uri=data_view_uri,
schema=schema,
read_as_raw_records=read_as_raw_records,
raw_record_column_name=raw_record_column_name,
) | Returns a TFXIO for a single split. Args: examples: The Examples artifacts that the TFXIO is intended to access. split: The split to read. Must be a split contained in examples. telemetry_descriptors: A set of descriptors that identify the component that is instantiating the TFXIO. These will be used to construct the namespace to contain metrics for profiling and are therefore expected to be identifiers of the component itself and not individual instances of source use. schema: TFMD schema. Note that without a schema, some TFXIO interfaces in certain TFXIO implementations might not be available. read_as_raw_records: If True, ignore the payload type of `examples`. Always use RawTfRecord TFXIO. raw_record_column_name: If provided, the arrow RecordBatch produced by the TFXIO will contain a string column of the given name, and the contents of that column will be the raw records. Note that not all TFXIO supports this option, and an error will be raised in that case. Required if read_as_raw_records == True. Returns: A function that takes a file pattern as input and returns a TFXIO instance. Raises: NotImplementedError: when given an unsupported example payload type. |
166,396 | import logging
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union
import pyarrow as pa
import tensorflow as tf
from tfx.components.experimental.data_view import constants
from tfx.components.util import examples_utils
from tfx.proto import example_gen_pb2
from tfx.types import artifact
from tfx.types import standard_artifacts
from tfx_bsl.tfxio import dataset_options
from tfx_bsl.tfxio import parquet_tfxio
from tfx_bsl.tfxio import raw_tf_record
from tfx_bsl.tfxio import record_to_tensor_tfxio
from tfx_bsl.tfxio import tf_example_record
from tfx_bsl.tfxio import tf_sequence_example_record
from tfx_bsl.tfxio import tfxio
from tensorflow_metadata.proto.v0 import schema_pb2
OneOrMorePatterns = Union[str, List[str]]
def resolve_payload_format_and_data_view_uri(
examples: List[artifact.Artifact]) -> Tuple[int, Optional[str]]:
"""Resolves the payload format and a DataView URI for given artifacts.
This routine make sure that the provided list of Examples artifacts are of
the same payload type, and if their payload type is FORMAT_PROTO, it resolves
one DataView (if applicable) to be used to access the data in all the
artifacts in a consistent way (i.e. the RecordBatches from those artifacts
will have the same schema).
Args:
examples: A list of Examples artifact.
Returns:
A pair. The first term is the payload format (a value in
example_gen_pb2.PayloadFormat enum); the second term is the URI to the
resolved DataView (could be None, if the examples are not FORMAT_PROTO,
or they are all FORMAT_PROTO, but all do not have a DataView attached).
Raises:
ValueError: if not all artifacts are of the same payload format, or
if they are all of FORMAT_PROTO but some (but not all) of them do not
have a DataView attached.
"""
assert examples, 'At least one Examples artifact is needed.'
payload_format = _get_payload_format(examples)
if payload_format != example_gen_pb2.PayloadFormat.FORMAT_PROTO:
# Only FORMAT_PROTO may have DataView attached.
return payload_format, None
data_view_infos = []
for examples_artifact in examples:
data_view_infos.append(_get_data_view_info(examples_artifact))
# All the artifacts do not have DataView attached -- this is allowed. The
# caller may be requesting to read the data as raw string records.
if all([i is None for i in data_view_infos]):
return payload_format, None
# All the artifacts have a DataView attached -- resolve to the latest
# DataView (the one with the largest create time). This will guarantee that
# the RecordBatch read from each artifact will share the same Arrow schema
# (and thus Tensors fed to TF graphs, if applicable). The DataView will need
# to guarantee backward compatibility with older spans. Usually the DataView
# is a struct2tensor query, so such guarantee is provided by protobuf
# (as long as the user follows the basic principles of making changes to
# the proto).
if all([i is not None for i in data_view_infos]):
return payload_format, max(data_view_infos, key=lambda pair: pair[1])[0]
violating_artifacts = [
e for e, i in zip(examples, data_view_infos) if i is None]
raise ValueError(
'Unable to resolve a DataView for the Examples Artifacts '
'provided -- some Artifacts did not have DataView attached: {}'
.format(violating_artifacts))
def make_tfxio(
file_pattern: OneOrMorePatterns,
telemetry_descriptors: List[str],
payload_format: int,
data_view_uri: Optional[str] = None,
schema: Optional[schema_pb2.Schema] = None,
read_as_raw_records: bool = False,
raw_record_column_name: Optional[str] = None,
file_format: Optional[Union[int, List[int], str, List[str]]] = None
) -> tfxio.TFXIO:
"""Creates a TFXIO instance that reads `file_pattern`.
Args:
file_pattern: the file pattern for the TFXIO to access.
telemetry_descriptors: A set of descriptors that identify the component that
is instantiating the TFXIO. These will be used to construct the namespace
to contain metrics for profiling and are therefore expected to be
identifiers of the component itself and not individual instances of source
use.
payload_format: one of the enums from example_gen_pb2.PayloadFormat (may be
in string or int form). If None, default to FORMAT_TF_EXAMPLE.
data_view_uri: uri to a DataView artifact. A DataView is needed in order to
create a TFXIO for certain payload formats.
schema: TFMD schema. Note: although optional, some payload formats need a
schema in order for all TFXIO interfaces (e.g. TensorAdapter()) to work.
Unless you know what you are doing, always supply a schema.
read_as_raw_records: If True, ignore the payload type of `examples`. Always
use RawTfRecord TFXIO.
raw_record_column_name: If provided, the arrow RecordBatch produced by the
TFXIO will contain a string column of the given name, and the contents of
that column will be the raw records. Note that not all TFXIO supports this
option, and an error will be raised in that case. Required if
read_as_raw_records == True.
file_format: file format for each file_pattern. Only 'tfrecords_gzip' and
'parquet' are supported for now.
Returns:
a TFXIO instance.
"""
if not isinstance(payload_format, int):
payload_format = example_gen_pb2.PayloadFormat.Value(payload_format)
if file_format is not None:
if type(file_format) is not type(file_pattern):
raise ValueError(
f'The type of file_pattern and file_formats should be the same.'
f'Given: file_pattern={file_pattern}, file_format={file_format}')
if isinstance(file_format, list):
if len(file_format) != len(file_pattern):
raise ValueError(
f'The length of file_pattern and file_formats should be the same.'
f'Given: file_pattern={file_pattern}, file_format={file_format}')
else:
file_format = [_file_format_from_string(item) for item in file_format]
if any(item not in _SUPPORTED_FILE_FORMATS for item in file_format):
raise NotImplementedError(f'{file_format} is not supported yet.')
else: # file_format is str type.
file_format = _file_format_from_string(file_format)
if file_format not in _SUPPORTED_FILE_FORMATS:
raise NotImplementedError(f'{file_format} is not supported yet.')
if read_as_raw_records:
assert raw_record_column_name is not None, (
'read_as_raw_records is specified - '
'must provide raw_record_column_name')
return raw_tf_record.RawTfRecordTFXIO(
file_pattern=file_pattern,
raw_record_column_name=raw_record_column_name,
telemetry_descriptors=telemetry_descriptors)
if payload_format == example_gen_pb2.PayloadFormat.FORMAT_TF_EXAMPLE:
return tf_example_record.TFExampleRecord(
file_pattern=file_pattern,
schema=schema,
raw_record_column_name=raw_record_column_name,
telemetry_descriptors=telemetry_descriptors)
if (payload_format ==
example_gen_pb2.PayloadFormat.FORMAT_TF_SEQUENCE_EXAMPLE):
return tf_sequence_example_record.TFSequenceExampleRecord(
file_pattern=file_pattern,
schema=schema,
raw_record_column_name=raw_record_column_name,
telemetry_descriptors=telemetry_descriptors)
if payload_format == example_gen_pb2.PayloadFormat.FORMAT_PROTO:
assert data_view_uri is not None, (
'Accessing FORMAT_PROTO requires a DataView to parse the proto.')
return record_to_tensor_tfxio.TFRecordToTensorTFXIO(
file_pattern=file_pattern,
saved_decoder_path=data_view_uri,
telemetry_descriptors=telemetry_descriptors,
raw_record_column_name=raw_record_column_name)
if payload_format == example_gen_pb2.PayloadFormat.FORMAT_PARQUET:
return parquet_tfxio.ParquetTFXIO(
file_pattern=file_pattern,
schema=schema,
telemetry_descriptors=telemetry_descriptors)
raise NotImplementedError(
'Unsupport payload format: {}'.format(payload_format))
The provided code snippet includes necessary dependencies for implementing the `get_tfxio_factory_from_artifact` function. Write a Python function `def get_tfxio_factory_from_artifact( examples: List[artifact.Artifact], telemetry_descriptors: List[str], schema: Optional[schema_pb2.Schema] = None, read_as_raw_records: bool = False, raw_record_column_name: Optional[str] = None ) -> Callable[[OneOrMorePatterns], tfxio.TFXIO]` to solve the following problem:
Returns a factory function that creates a proper TFXIO. Args: examples: The Examples artifacts that the TFXIO is intended to access. telemetry_descriptors: A set of descriptors that identify the component that is instantiating the TFXIO. These will be used to construct the namespace to contain metrics for profiling and are therefore expected to be identifiers of the component itself and not individual instances of source use. schema: TFMD schema. Note that without a schema, some TFXIO interfaces in certain TFXIO implementations might not be available. read_as_raw_records: If True, ignore the payload type of `examples`. Always use RawTfRecord TFXIO. raw_record_column_name: If provided, the arrow RecordBatch produced by the TFXIO will contain a string column of the given name, and the contents of that column will be the raw records. Note that not all TFXIO supports this option, and an error will be raised in that case. Required if read_as_raw_records == True. Returns: A function that takes a file pattern as input and returns a TFXIO instance. Raises: NotImplementedError: when given an unsupported example payload type.
Here is the function:
def get_tfxio_factory_from_artifact(
examples: List[artifact.Artifact],
telemetry_descriptors: List[str],
schema: Optional[schema_pb2.Schema] = None,
read_as_raw_records: bool = False,
raw_record_column_name: Optional[str] = None
) -> Callable[[OneOrMorePatterns], tfxio.TFXIO]:
"""Returns a factory function that creates a proper TFXIO.
Args:
examples: The Examples artifacts that the TFXIO is intended to access.
telemetry_descriptors: A set of descriptors that identify the component
that is instantiating the TFXIO. These will be used to construct the
namespace to contain metrics for profiling and are therefore expected to
be identifiers of the component itself and not individual instances of
source use.
schema: TFMD schema. Note that without a schema, some TFXIO interfaces
in certain TFXIO implementations might not be available.
read_as_raw_records: If True, ignore the payload type of `examples`. Always
use RawTfRecord TFXIO.
raw_record_column_name: If provided, the arrow RecordBatch produced by
the TFXIO will contain a string column of the given name, and the contents
of that column will be the raw records. Note that not all TFXIO supports
this option, and an error will be raised in that case. Required if
read_as_raw_records == True.
Returns:
A function that takes a file pattern as input and returns a TFXIO
instance.
Raises:
NotImplementedError: when given an unsupported example payload type.
"""
payload_format, data_view_uri = resolve_payload_format_and_data_view_uri(
examples)
return lambda file_pattern: make_tfxio( # pylint:disable=g-long-lambda
file_pattern=file_pattern,
telemetry_descriptors=telemetry_descriptors,
payload_format=payload_format,
data_view_uri=data_view_uri,
schema=schema,
read_as_raw_records=read_as_raw_records,
raw_record_column_name=raw_record_column_name) | Returns a factory function that creates a proper TFXIO. Args: examples: The Examples artifacts that the TFXIO is intended to access. telemetry_descriptors: A set of descriptors that identify the component that is instantiating the TFXIO. These will be used to construct the namespace to contain metrics for profiling and are therefore expected to be identifiers of the component itself and not individual instances of source use. schema: TFMD schema. Note that without a schema, some TFXIO interfaces in certain TFXIO implementations might not be available. read_as_raw_records: If True, ignore the payload type of `examples`. Always use RawTfRecord TFXIO. raw_record_column_name: If provided, the arrow RecordBatch produced by the TFXIO will contain a string column of the given name, and the contents of that column will be the raw records. Note that not all TFXIO supports this option, and an error will be raised in that case. Required if read_as_raw_records == True. Returns: A function that takes a file pattern as input and returns a TFXIO instance. Raises: NotImplementedError: when given an unsupported example payload type. |
166,397 | from tfx import types
The provided code snippet includes necessary dependencies for implementing the `is_model_blessed` function. Write a Python function `def is_model_blessed(model_blessing: types.Artifact) -> bool` to solve the following problem:
Returns whether model is blessed by upstream ModelValidator. Args: model_blessing: model blessing artifact from model_validator. Returns: True if the model is blessed by validator.
Here is the function:
def is_model_blessed(model_blessing: types.Artifact) -> bool:
"""Returns whether model is blessed by upstream ModelValidator.
Args:
model_blessing: model blessing artifact from model_validator.
Returns:
True if the model is blessed by validator.
"""
return model_blessing.get_int_custom_property('blessed') == 1 | Returns whether model is blessed by upstream ModelValidator. Args: model_blessing: model blessing artifact from model_validator. Returns: True if the model is blessed by validator. |
166,398 | from tfx import types
The provided code snippet includes necessary dependencies for implementing the `is_infra_validated` function. Write a Python function `def is_infra_validated(infra_blessing: types.Artifact) -> bool` to solve the following problem:
Returns whether model is infra blessed by upstream InfraValidator. Args: infra_blessing: A `InfraBlessing` artifact from infra validator. Returns: Whether model is infra validated or not.
Here is the function:
def is_infra_validated(infra_blessing: types.Artifact) -> bool:
"""Returns whether model is infra blessed by upstream InfraValidator.
Args:
infra_blessing: A `InfraBlessing` artifact from infra validator.
Returns:
Whether model is infra validated or not.
"""
return infra_blessing.get_int_custom_property('blessed') == 1 | Returns whether model is infra blessed by upstream InfraValidator. Args: infra_blessing: A `InfraBlessing` artifact from infra validator. Returns: Whether model is infra validated or not. |
166,399 | from typing import Any, Dict, List, Optional
import apache_beam as beam
from apache_beam.io.gcp import bigquery
from apache_beam.options import value_provider
import tensorflow as tf
from tfx.utils import telemetry_utils
The provided code snippet includes necessary dependencies for implementing the `row_to_example` function. Write a Python function `def row_to_example( # pylint: disable=invalid-name field_to_type: Dict[str, str], field_name_to_data: Dict[str, Any]) -> tf.train.Example` to solve the following problem:
Convert bigquery result row to tf example. Args: field_to_type: The name of the field to its type from BigQuery. field_name_to_data: The data need to be converted from BigQuery that contains field name and data. Returns: A tf.train.Example that converted from the BigQuery row. Note that BOOLEAN type in BigQuery result will be converted to int in tf.train.Example. Raises: RuntimeError: If the data type is not supported to be converted. Only INTEGER, BOOLEAN, FLOAT, STRING is supported now.
Here is the function:
def row_to_example( # pylint: disable=invalid-name
field_to_type: Dict[str, str],
field_name_to_data: Dict[str, Any]) -> tf.train.Example:
"""Convert bigquery result row to tf example.
Args:
field_to_type: The name of the field to its type from BigQuery.
field_name_to_data: The data need to be converted from BigQuery that
contains field name and data.
Returns:
A tf.train.Example that converted from the BigQuery row. Note that BOOLEAN
type in BigQuery result will be converted to int in tf.train.Example.
Raises:
RuntimeError: If the data type is not supported to be converted.
Only INTEGER, BOOLEAN, FLOAT, STRING is supported now.
"""
feature = {}
for key, value in field_name_to_data.items():
data_type = field_to_type[key]
if value is None:
feature[key] = tf.train.Feature()
continue
value_list = value if isinstance(value, list) else [value]
if data_type in ('INTEGER', 'BOOLEAN'):
feature[key] = tf.train.Feature(
int64_list=tf.train.Int64List(value=value_list))
elif data_type == 'FLOAT':
feature[key] = tf.train.Feature(
float_list=tf.train.FloatList(value=value_list))
elif data_type == 'STRING':
feature[key] = tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[tf.compat.as_bytes(elem) for elem in value_list]))
else:
# TODO(jyzhao): support more types.
raise RuntimeError(
'BigQuery column "{}" has non-supported type {}.'.format(key,
data_type))
return tf.train.Example(features=tf.train.Features(feature=feature)) | Convert bigquery result row to tf example. Args: field_to_type: The name of the field to its type from BigQuery. field_name_to_data: The data need to be converted from BigQuery that contains field name and data. Returns: A tf.train.Example that converted from the BigQuery row. Note that BOOLEAN type in BigQuery result will be converted to int in tf.train.Example. Raises: RuntimeError: If the data type is not supported to be converted. Only INTEGER, BOOLEAN, FLOAT, STRING is supported now. |
166,400 | import json
from typing import Any, Dict, Optional
import apache_beam as beam
from google.cloud import bigquery
import tensorflow as tf
from tfx.components.example_gen import base_example_gen_executor
from tfx.extensions.google_cloud_big_query import utils
class _BigQueryConverter:
"""Help class for bigquery result row to tf example conversion."""
def __init__(self, query: str, project_id: Optional[str] = None):
"""Instantiate a _BigQueryConverter object.
Args:
query: the query statement to get the type information.
project_id: optional. The GCP project ID to run the query job. Default to
the GCP project ID set by the gcloud environment on the machine.
"""
client = bigquery.Client(project=project_id)
# Dummy query to get the type information for each field.
query_job = client.query('SELECT * FROM ({}) LIMIT 0'.format(query))
results = query_job.result()
self._type_map = {}
for field in results.schema:
self._type_map[field.name] = field.field_type
def RowToExample(self, instance: Dict[str, Any]) -> tf.train.Example:
"""Convert bigquery result row to tf example."""
return utils.row_to_example(self._type_map, instance)
The provided code snippet includes necessary dependencies for implementing the `_BigQueryToExample` function. Write a Python function `def _BigQueryToExample(pipeline: beam.Pipeline, exec_properties: Dict[str, Any], split_pattern: str) -> beam.pvalue.PCollection` to solve the following problem:
Read from BigQuery and transform to TF examples. Args: pipeline: beam pipeline. exec_properties: A dict of execution properties. split_pattern: Split.pattern in Input config, a BigQuery sql string. Returns: PCollection of TF examples.
Here is the function:
def _BigQueryToExample(pipeline: beam.Pipeline, exec_properties: Dict[str, Any],
split_pattern: str) -> beam.pvalue.PCollection:
"""Read from BigQuery and transform to TF examples.
Args:
pipeline: beam pipeline.
exec_properties: A dict of execution properties.
split_pattern: Split.pattern in Input config, a BigQuery sql string.
Returns:
PCollection of TF examples.
"""
project = utils.parse_gcp_project(exec_properties['_beam_pipeline_args'])
converter = _BigQueryConverter(split_pattern, project)
big_query_custom_config = None
if custom_config_str := exec_properties.get('custom_config'):
big_query_custom_config = json.loads(custom_config_str)
return (
pipeline
| 'QueryTable'
>> utils.ReadFromBigQuery(
query=split_pattern,
big_query_custom_config=big_query_custom_config,
)
| 'ToTFExample' >> beam.Map(converter.RowToExample)
) | Read from BigQuery and transform to TF examples. Args: pipeline: beam pipeline. exec_properties: A dict of execution properties. split_pattern: Split.pattern in Input config, a BigQuery sql string. Returns: PCollection of TF examples. |
166,401 | from typing import Any, Dict, Iterable, List, Set, Tuple
import apache_beam as beam
from google.cloud import bigquery
import tensorflow as tf
from tfx.components.example_gen import base_example_gen_executor
from tfx.extensions.google_cloud_big_query import utils
from tfx.extensions.google_cloud_big_query.experimental.elwc_example_gen.proto import elwc_config_pb2
from tfx.proto import example_gen_pb2
from google.protobuf import json_format
from tensorflow_serving.apis import input_pb2
class _RowToContextFeatureAndExample(beam.DoFn):
"""Convert bigquery result to context feature and example feature pair."""
def __init__(self, type_map: Dict[str, str],
context_feature_fields: Set[str]):
self._type_map = type_map
self._context_feature_fields = context_feature_fields
def process(
self, instance: Dict[str, Any]
) -> Iterable[Tuple[bytes, tf.train.Example]]:
context_feature = dict((k, instance[k])
for k in instance.keys()
if k in self._context_feature_fields)
context_feature_proto = utils.row_to_example(self._type_map,
context_feature)
context_feature_key = context_feature_proto.SerializeToString(
deterministic=True)
example_feature = dict((k, instance[k])
for k in instance.keys()
if k not in self._context_feature_fields)
example_feature_value = utils.row_to_example(self._type_map,
example_feature)
yield (context_feature_key, example_feature_value)
def _ConvertContextAndExamplesToElwc(
context_feature_and_examples: Tuple[bytes,
List[tf.train.Example]]) -> bytes:
"""Convert context feature and examples to ELWC."""
context_feature, examples = context_feature_and_examples
context_feature_proto = tf.train.Example()
context_feature_proto.ParseFromString(context_feature)
elwc_pb2 = input_pb2.ExampleListWithContext(
context=context_feature_proto, examples=examples)
return elwc_pb2.SerializeToString(deterministic=True)
The provided code snippet includes necessary dependencies for implementing the `_BigQueryToElwc` function. Write a Python function `def _BigQueryToElwc(pipeline: beam.Pipeline, exec_properties: Dict[str, Any], split_pattern: str) -> beam.pvalue.PCollection` to solve the following problem:
Read from BigQuery and transform to ExampleListWithContext. When a field has no value in BigQuery, a feature with no value will be generated in the tf.train.Features. This behavior is consistent with BigQueryExampleGen. Args: pipeline: beam pipeline. exec_properties: A dict of execution properties. split_pattern: Split.pattern in Input config, a BigQuery sql string. Returns: PCollection of ExampleListWithContext. Raises: RuntimeError: Context features must be included in the queried result.
Here is the function:
def _BigQueryToElwc(pipeline: beam.Pipeline, exec_properties: Dict[str, Any],
split_pattern: str) -> beam.pvalue.PCollection:
"""Read from BigQuery and transform to ExampleListWithContext.
When a field has no value in BigQuery, a feature with no value will be
generated in the tf.train.Features. This behavior is consistent with
BigQueryExampleGen.
Args:
pipeline: beam pipeline.
exec_properties: A dict of execution properties.
split_pattern: Split.pattern in Input config, a BigQuery sql string.
Returns:
PCollection of ExampleListWithContext.
Raises:
RuntimeError: Context features must be included in the queried result.
"""
project = utils.parse_gcp_project(exec_properties['_beam_pipeline_args'])
custom_config = example_gen_pb2.CustomConfig()
json_format.Parse(exec_properties['custom_config'], custom_config)
elwc_config = elwc_config_pb2.ElwcConfig()
custom_config.custom_config.Unpack(elwc_config)
client = bigquery.Client(project=project)
# Dummy query to get the type information for each field.
query_job = client.query('SELECT * FROM ({}) LIMIT 0'.format(split_pattern))
results = query_job.result()
type_map = {}
context_feature_fields = set(elwc_config.context_feature_fields)
field_names = set()
for field in results.schema:
type_map[field.name] = field.field_type
field_names.add(field.name)
# Check whether the query contains necessary context fields.
if not field_names.issuperset(context_feature_fields):
raise RuntimeError('Context feature fields are missing from the query.')
return (
pipeline
| 'ReadFromBigQuery' >> utils.ReadFromBigQuery(query=split_pattern)
| 'RowToContextFeatureAndExample' >> beam.ParDo(
_RowToContextFeatureAndExample(type_map, context_feature_fields))
|
'CombineByContext' >> beam.CombinePerKey(beam.combiners.ToListCombineFn())
| 'ConvertContextAndExamplesToElwc' >>
beam.Map(_ConvertContextAndExamplesToElwc)) | Read from BigQuery and transform to ExampleListWithContext. When a field has no value in BigQuery, a feature with no value will be generated in the tf.train.Features. This behavior is consistent with BigQueryExampleGen. Args: pipeline: beam pipeline. exec_properties: A dict of execution properties. split_pattern: Split.pattern in Input config, a BigQuery sql string. Returns: PCollection of ExampleListWithContext. Raises: RuntimeError: Context features must be included in the queried result. |
166,402 | from typing import Any, Callable, Dict
from tfx.dsl.component.experimental import container_component
from tfx.dsl.component.experimental import placeholders
from tfx.dsl.components.base import base_component
from tfx.extensions.experimental.kfp_compatibility.proto import kfp_component_spec_pb2
from tfx.types import standard_artifacts
import yaml
from google.protobuf import json_format
def _convert_target_fields_to_kv_pair(parsed_dict: Dict[str, Any]) -> None:
"""Converts in place specific string fields to key value pairs of {constantValue: [Text]} for proto3 compatibility.
Args:
parsed_dict: dictionary obtained from parsing a Kubeflow component spec.
This argument is modified in place.
Returns:
None
"""
conversion_string_paths = [
['implementation', 'container', 'command'],
['implementation', 'container', 'args'],
]
for path in conversion_string_paths:
parsed_dict_location = parsed_dict
for label in path:
parsed_dict_location = parsed_dict_location.get(label, {})
if isinstance(parsed_dict_location, list):
for ind, value in enumerate(parsed_dict_location):
if isinstance(value, str):
parsed_dict_location[ind] = {'constantValue': value}
def _get_command_line_argument_type(
command: kfp_component_spec_pb2.StringOrPlaceholder
) -> placeholders.CommandlineArgumentType:
"""Converts a container command to the corresponding type.
Args:
command: StringOrPlaceholder which encodes a container command.
Returns:
command to be passed into create_container_component.
"""
if command.HasField('constantValue'):
return command.constantValue
if command.HasField('inputValue'):
return placeholders.InputValuePlaceholder(command.inputValue)
if command.HasField('inputPath'):
return placeholders.InputUriPlaceholder(command.inputPath)
if command.HasField('outputPath'):
return placeholders.OutputUriPlaceholder(command.outputPath)
raise ValueError('Unrecognized command %s' % command)
The provided code snippet includes necessary dependencies for implementing the `load_kfp_yaml_container_component` function. Write a Python function `def load_kfp_yaml_container_component( path: str) -> Callable[..., base_component.BaseComponent]` to solve the following problem:
Creates a container-based component from a Kubeflow component spec. See https://www.kubeflow.org/docs/pipelines/reference/component-spec/ Example: component = load_kfp_yaml_container_component( "kfp_pipelines_root/components/datasets/Chicago_Taxi_Trips/component.yaml" ) Args: path: local file path of a Kubeflow Pipelines component YAML file. Returns: Container component that can be instantiated in a TFX pipeline.
Here is the function:
def load_kfp_yaml_container_component(
path: str) -> Callable[..., base_component.BaseComponent]:
"""Creates a container-based component from a Kubeflow component spec.
See
https://www.kubeflow.org/docs/pipelines/reference/component-spec/
Example:
component = load_kfp_yaml_container_component(
"kfp_pipelines_root/components/datasets/Chicago_Taxi_Trips/component.yaml"
)
Args:
path: local file path of a Kubeflow Pipelines component YAML file.
Returns:
Container component that can be instantiated in a TFX pipeline.
"""
with open(path) as component_file:
data = yaml.load(component_file, Loader=yaml.SafeLoader)
_convert_target_fields_to_kv_pair(data)
component_spec = json_format.ParseDict(data,
kfp_component_spec_pb2.ComponentSpec())
container = component_spec.implementation.container
command = (
list(map(_get_command_line_argument_type, container.command)) +
list(map(_get_command_line_argument_type, container.args)))
# TODO(ericlege): Support classname to class translation in inputs.type
inputs = {
item.name: standard_artifacts.String for item in component_spec.inputs
}
outputs = {
item.name: standard_artifacts.String for item in component_spec.outputs
}
parameters = {}
return container_component.create_container_component(
name=component_spec.name,
image=container.image,
command=command,
inputs=inputs,
outputs=outputs,
parameters=parameters,
) | Creates a container-based component from a Kubeflow component spec. See https://www.kubeflow.org/docs/pipelines/reference/component-spec/ Example: component = load_kfp_yaml_container_component( "kfp_pipelines_root/components/datasets/Chicago_Taxi_Trips/component.yaml" ) Args: path: local file path of a Kubeflow Pipelines component YAML file. Returns: Container component that can be instantiated in a TFX pipeline. |
166,403 | import datetime
import json
import os
from typing import Any, Dict, List
from absl import logging
from tfx import types
from tfx.components.tuner import executor as tuner_executor
from tfx.dsl.components.base import base_executor
from tfx.extensions.google_cloud_ai_platform import constants
from tfx.extensions.google_cloud_ai_platform import runner
from tfx.extensions.google_cloud_ai_platform.trainer import executor as ai_platform_trainer_executor
from tfx.types import standard_component_specs
from tfx.utils import doc_controls
from tfx.utils import json_utils
from tfx.utils import name_utils
import multiprocessing
The provided code snippet includes necessary dependencies for implementing the `_need_chief_oracle` function. Write a Python function `def _need_chief_oracle(exec_properties: Dict[str, Any]) -> bool` to solve the following problem:
Returns True if the Tuner instance requires a chief oracle.
Here is the function:
def _need_chief_oracle(exec_properties: Dict[str, Any]) -> bool:
"""Returns True if the Tuner instance requires a chief oracle."""
# TODO(b/160902662): Skip chief oracle for CloudTuner that does not require
# chief oracle for distributed tuning (it is a no-op,
# because it simply forwards to the AI Platform Optimizer
# service).
del exec_properties
return True | Returns True if the Tuner instance requires a chief oracle. |
166,404 | import time
from typing import Any, Dict, List, Optional
from absl import logging
from googleapiclient import discovery
from tfx import types
from tfx.extensions.google_cloud_ai_platform import prediction_clients
from tfx.extensions.google_cloud_ai_platform import training_clients
from tfx.utils import version_utils
_POLLING_INTERVAL_IN_SECONDS = 30
The provided code snippet includes necessary dependencies for implementing the `_wait_for_operation` function. Write a Python function `def _wait_for_operation(api: discovery.Resource, operation: Dict[str, Any], method_name: str) -> Dict[str, Any]` to solve the following problem:
Wait for a long running operation. Args: api: Google API client resource. operation: The operation to wait for. method_name: Operation method name for logging. Returns: Operation completion status. Raises: RuntimeError: If the operation completed with an error.
Here is the function:
def _wait_for_operation(api: discovery.Resource, operation: Dict[str, Any],
method_name: str) -> Dict[str, Any]:
"""Wait for a long running operation.
Args:
api: Google API client resource.
operation: The operation to wait for.
method_name: Operation method name for logging.
Returns:
Operation completion status.
Raises:
RuntimeError: If the operation completed with an error.
"""
status_resc = api.projects().operations().get(name=operation['name'])
while not status_resc.execute().get('done'):
time.sleep(_POLLING_INTERVAL_IN_SECONDS)
logging.info('Method %s still being executed...', method_name)
result = status_resc.execute()
if result.get('error'):
# The operation completed with an error.
raise RuntimeError('Failed to execute {}: {}'.format(
method_name, result['error']))
return result | Wait for a long running operation. Args: api: Google API client resource. operation: The operation to wait for. method_name: Operation method name for logging. Returns: Operation completion status. Raises: RuntimeError: If the operation completed with an error. |
166,405 | import time
from typing import Any, Dict, List, Optional
from absl import logging
from googleapiclient import discovery
from tfx import types
from tfx.extensions.google_cloud_ai_platform import prediction_clients
from tfx.extensions.google_cloud_ai_platform import training_clients
from tfx.utils import version_utils
def _launch_cloud_training(project: str,
training_job: Dict[str, Any],
enable_vertex: Optional[bool] = False,
vertex_region: Optional[str] = None) -> None:
"""Launches and monitors a Cloud custom training job.
Args:
project: The GCP project under which the training job will be executed.
training_job: Training job argument for AI Platform training job. See
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.customJobs#CustomJob
for detailed schema for the Vertex CustomJob. See
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs for the
detailed schema for CAIP Job.
enable_vertex: Whether to enable Vertex or not.
vertex_region: Region for endpoint in Vertex training.
Raises:
RuntimeError: if the Google Cloud AI Platform training job failed/cancelled.
ConnectionError: if the status polling of the training job failed due to
connection issue.
"""
# TODO(b/185159702): Migrate all training jobs to Vertex and remove the
# enable_vertex switch.
client = training_clients.get_job_client(enable_vertex, vertex_region)
# Configure and launch AI Platform training job.
client.launch_job(project, training_job)
# Wait for Cloud Training job to finish
response = client.get_job()
retry_count = 0
job_id = client.get_job_name()
# Monitors the long-running operation by polling the job state periodically,
# and retries the polling when a transient connectivity issue is encountered.
#
# Long-running operation monitoring:
# The possible states of "get job" response can be found at
# https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.jobs#State
# where SUCCEEDED/FAILED/CANCELLED are considered to be final states.
# The following logic will keep polling the state of the job until the job
# enters a final state.
#
# During the polling, if a connection error was encountered, the GET request
# will be retried by recreating the Python API client to refresh the lifecycle
# of the connection being used. See
# https://github.com/googleapis/google-api-python-client/issues/218
# for a detailed description of the problem. If the error persists for
# _CONNECTION_ERROR_RETRY_LIMIT consecutive attempts, the function will raise
# ConnectionError.
while client.get_job_state(response) not in client.JOB_STATES_COMPLETED:
time.sleep(_POLLING_INTERVAL_IN_SECONDS)
try:
response = client.get_job()
retry_count = 0
# Handle transient connection error.
except ConnectionError as err:
if retry_count < _CONNECTION_ERROR_RETRY_LIMIT:
retry_count += 1
logging.warning(
'ConnectionError (%s) encountered when polling job: %s. Trying to '
'recreate the API client.', err, job_id)
# Recreate the Python API client.
client.create_client()
else:
logging.error('Request failed after %s retries.',
_CONNECTION_ERROR_RETRY_LIMIT)
raise
if client.get_job_state(response) in client.JOB_STATES_FAILED:
err_msg = 'Job \'{}\' did not succeed. Detailed response {}.'.format(
client.get_job_name(), response)
logging.error(err_msg)
raise RuntimeError(err_msg)
# Cloud training complete
logging.info('Job \'%s\' successful.', client.get_job_name())
The provided code snippet includes necessary dependencies for implementing the `start_cloud_training` function. Write a Python function `def start_cloud_training( input_dict: Dict[str, List[types.Artifact]], output_dict: Dict[str, List[types.Artifact]], exec_properties: Dict[str, Any], executor_class_path: str, job_args: Dict[str, Any], job_id: Optional[str], job_labels: Optional[Dict[str, Any]] = None, # AI Platform only. enable_vertex: Optional[bool] = False, vertex_region: Optional[str] = None)` to solve the following problem:
Start a trainer job on AI Platform (AIP). This is done by forwarding the inputs/outputs/exec_properties to the tfx.scripts.run_executor module on a AI Platform training job interpreter. Args: input_dict: Passthrough input dict for tfx.components.Trainer.executor. output_dict: Passthrough input dict for tfx.components.Trainer.executor. exec_properties: Passthrough input dict for tfx.components.Trainer.executor. executor_class_path: class path for TFX core default trainer. job_args: Training argument for AI Platform training job. 'pythonModule', 'pythonVersion' and 'runtimeVersion' will be inferred. For the full set of parameters supported by Vertex AI CustomJob, refer to https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.customJobs#CustomJob For the full set of parameters supported by Google Cloud AI Platform (CAIP) TrainingInput, refer to https://cloud.google.com/ml-engine/docs/tensorflow/training-jobs#configuring_the_job job_id: Job ID for AI Platform Training job. If not supplied, system-determined unique ID is given. Refer to https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#resource-job. In Vertex AI, the job_id corresponds to the display name, a unique ID is always given to the created job. job_labels: Labels for AI Platform training job. enable_vertex: Whether to enable Vertex or not. vertex_region: Region for endpoint in Vertex training. Returns: None
Here is the function:
def start_cloud_training(
input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any],
executor_class_path: str,
job_args: Dict[str, Any],
job_id: Optional[str],
job_labels: Optional[Dict[str, Any]] = None, # AI Platform only.
enable_vertex: Optional[bool] = False,
vertex_region: Optional[str] = None):
"""Start a trainer job on AI Platform (AIP).
This is done by forwarding the inputs/outputs/exec_properties to the
tfx.scripts.run_executor module on a AI Platform training job interpreter.
Args:
input_dict: Passthrough input dict for tfx.components.Trainer.executor.
output_dict: Passthrough input dict for tfx.components.Trainer.executor.
exec_properties: Passthrough input dict for tfx.components.Trainer.executor.
executor_class_path: class path for TFX core default trainer.
job_args: Training argument for AI Platform training job. 'pythonModule',
'pythonVersion' and 'runtimeVersion' will be inferred. For the full set of
parameters supported by Vertex AI CustomJob, refer to
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.customJobs#CustomJob
For the full set of parameters supported by Google Cloud AI Platform
(CAIP) TrainingInput, refer to
https://cloud.google.com/ml-engine/docs/tensorflow/training-jobs#configuring_the_job
job_id: Job ID for AI Platform Training job. If not supplied,
system-determined unique ID is given. Refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#resource-job.
In Vertex AI, the job_id corresponds to the display name, a unique ID is
always given to the created job.
job_labels: Labels for AI Platform training job.
enable_vertex: Whether to enable Vertex or not.
vertex_region: Region for endpoint in Vertex training.
Returns:
None
"""
# Project was stowaway in job_args and has finally reached its destination.
project = job_args.pop('project')
client = training_clients.get_job_client(enable_vertex, vertex_region)
training_job = client.create_training_job(input_dict, output_dict,
exec_properties,
executor_class_path, job_args,
job_id, job_labels)
_launch_cloud_training(
project=project,
training_job=training_job,
enable_vertex=enable_vertex,
vertex_region=vertex_region) | Start a trainer job on AI Platform (AIP). This is done by forwarding the inputs/outputs/exec_properties to the tfx.scripts.run_executor module on a AI Platform training job interpreter. Args: input_dict: Passthrough input dict for tfx.components.Trainer.executor. output_dict: Passthrough input dict for tfx.components.Trainer.executor. exec_properties: Passthrough input dict for tfx.components.Trainer.executor. executor_class_path: class path for TFX core default trainer. job_args: Training argument for AI Platform training job. 'pythonModule', 'pythonVersion' and 'runtimeVersion' will be inferred. For the full set of parameters supported by Vertex AI CustomJob, refer to https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.customJobs#CustomJob For the full set of parameters supported by Google Cloud AI Platform (CAIP) TrainingInput, refer to https://cloud.google.com/ml-engine/docs/tensorflow/training-jobs#configuring_the_job job_id: Job ID for AI Platform Training job. If not supplied, system-determined unique ID is given. Refer to https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#resource-job. In Vertex AI, the job_id corresponds to the display name, a unique ID is always given to the created job. job_labels: Labels for AI Platform training job. enable_vertex: Whether to enable Vertex or not. vertex_region: Region for endpoint in Vertex training. Returns: None |
166,406 | import time
from typing import Any, Dict, List, Optional
from absl import logging
from googleapiclient import discovery
from tfx import types
from tfx.extensions.google_cloud_ai_platform import prediction_clients
from tfx.extensions.google_cloud_ai_platform import training_clients
from tfx.utils import version_utils
_DEFAULT_API_VERSION = 'v1'
The provided code snippet includes necessary dependencies for implementing the `get_service_name_and_api_version` function. Write a Python function `def get_service_name_and_api_version( ai_platform_serving_args: Dict[str, Any])` to solve the following problem:
Gets service name and api version from ai_platform_serving_args. Args: ai_platform_serving_args: Dictionary containing arguments for pushing to AI Platform. Returns: Service name and API version.
Here is the function:
def get_service_name_and_api_version(
ai_platform_serving_args: Dict[str, Any]): # -> Tuple[Text, Text]
"""Gets service name and api version from ai_platform_serving_args.
Args:
ai_platform_serving_args: Dictionary containing arguments for pushing to AI
Platform.
Returns:
Service name and API version.
"""
del ai_platform_serving_args
return ('ml', _DEFAULT_API_VERSION) | Gets service name and api version from ai_platform_serving_args. Args: ai_platform_serving_args: Dictionary containing arguments for pushing to AI Platform. Returns: Service name and API version. |
166,407 | import time
from typing import Any, Dict, List, Optional
from absl import logging
from googleapiclient import discovery
from tfx import types
from tfx.extensions.google_cloud_ai_platform import prediction_clients
from tfx.extensions.google_cloud_ai_platform import training_clients
from tfx.utils import version_utils
The provided code snippet includes necessary dependencies for implementing the `create_model_for_aip_prediction_if_not_exist` function. Write a Python function `def create_model_for_aip_prediction_if_not_exist( labels: Dict[str, str], ai_platform_serving_args: Dict[str, Any], api: Optional[discovery.Resource] = None, enable_vertex: Optional[bool] = False) -> bool` to solve the following problem:
Creates a new CAIP model or Vertex endpoint for serving with AI Platform if not exists. Args: labels: The dict of labels that will be attached to this CAIP job or Vertex endpoint. ai_platform_serving_args: Dictionary containing arguments for pushing to AI Platform. api: (CAIP only, required) Google API client resource. enable_vertex: Whether to enable Vertex or not. Returns: Whether a new CAIP model or Vertex endpoint is created. Raises: RuntimeError if creation failed.
Here is the function:
def create_model_for_aip_prediction_if_not_exist(
labels: Dict[str, str],
ai_platform_serving_args: Dict[str, Any],
api: Optional[discovery.Resource] = None,
enable_vertex: Optional[bool] = False) -> bool:
"""Creates a new CAIP model or Vertex endpoint for serving with AI Platform if not exists.
Args:
labels: The dict of labels that will be attached to this CAIP job or Vertex
endpoint.
ai_platform_serving_args: Dictionary containing arguments for pushing to AI
Platform.
api: (CAIP only, required) Google API client resource.
enable_vertex: Whether to enable Vertex or not.
Returns:
Whether a new CAIP model or Vertex endpoint is created.
Raises:
RuntimeError if creation failed.
"""
client = prediction_clients.get_prediction_client(
api=api, enable_vertex=enable_vertex)
return client.create_model_for_aip_prediction_if_not_exist(
labels=labels, ai_platform_serving_args=ai_platform_serving_args) | Creates a new CAIP model or Vertex endpoint for serving with AI Platform if not exists. Args: labels: The dict of labels that will be attached to this CAIP job or Vertex endpoint. ai_platform_serving_args: Dictionary containing arguments for pushing to AI Platform. api: (CAIP only, required) Google API client resource. enable_vertex: Whether to enable Vertex or not. Returns: Whether a new CAIP model or Vertex endpoint is created. Raises: RuntimeError if creation failed. |
166,408 | import time
from typing import Any, Dict, List, Optional
from absl import logging
from googleapiclient import discovery
from tfx import types
from tfx.extensions.google_cloud_ai_platform import prediction_clients
from tfx.extensions.google_cloud_ai_platform import training_clients
from tfx.utils import version_utils
The provided code snippet includes necessary dependencies for implementing the `deploy_model_for_aip_prediction` function. Write a Python function `def deploy_model_for_aip_prediction( serving_path: str, model_version_name: str, ai_platform_serving_args: Dict[str, Any], labels: Dict[str, str], api: Optional[discovery.Resource] = None, serving_container_image_uri: Optional[str] = None, endpoint_region: Optional[str] = None, skip_model_endpoint_creation: Optional[bool] = False, set_default: Optional[bool] = True, enable_vertex: Optional[bool] = False) -> str` to solve the following problem:
Deploys a model for serving with AI Platform. Args: serving_path: The path to the model. Must be a GCS URI. model_version_name: Model version for CAIP model being deployed, or model name for the Vertex model being deployed. Must be different from what is currently being served. ai_platform_serving_args: Dictionary containing arguments for pushing to AI Platform. The full set of parameters supported can be found at, for CAIP: https://cloud.google.com/ml-engine/reference/rest/v1/projects.models.versions#Version. for Vertex: https://googleapis.dev/python/aiplatform/latest/aiplatform.html?highlight=deploy#google.cloud.aiplatform.Model.deploy Most keys are forwarded as-is, but following keys are handled specially. For CAIP: - name: this must be empty (and will be filled by pusher). - deployment_uri: this must be empty (and will be filled by pusher). - python_version: when left empty, this will be filled by python version of the environment being used. - runtime_version: when left empty, this will be filled by TensorFlow version from the environment. - labels: a list of job labels will be merged with user's input. For Vertex: - endpoint_name: Name of the endpoint. - traffic_percentage: Desired traffic to newly deployed model. Forwarded as-is if specified. If not specified, it is set to 100 if set_default_version is True, or set to 0 otherwise. - labels: a list of job labels will be merged with user's input. labels: The dict of labels that will be attached to this CAIP job or Vertex endpoint. They are merged with optional labels from `ai_platform_serving_args`. api: (CAIP only, required) Google API client resource. serving_container_image_uri: (Vertex only, required) The path to the serving container image URI. Container registry for prediction is available at: https://gcr.io/cloud-aiplatform/prediction. endpoint_region: (Vertex only, required) Region for Vertex endpoint. For available regions, please see https://cloud.google.com/vertex-ai/docs/general/locations skip_model_endpoint_creation: If true, the method assumes CAIP model or Vertex endpoint already exists in AI platform, therefore skipping its creation. set_default: Whether set the newly deployed CAIP model version or Vertex model as the default. enable_vertex: Whether to enable Vertex or not. Returns: For Vertex, the resource name of the deployed model. Raises: RuntimeError: if an error is encountered when trying to push.
Here is the function:
def deploy_model_for_aip_prediction(
serving_path: str,
model_version_name: str,
ai_platform_serving_args: Dict[str, Any],
labels: Dict[str, str],
api: Optional[discovery.Resource] = None,
serving_container_image_uri: Optional[str] = None,
endpoint_region: Optional[str] = None,
skip_model_endpoint_creation: Optional[bool] = False,
set_default: Optional[bool] = True,
enable_vertex: Optional[bool] = False) -> str:
"""Deploys a model for serving with AI Platform.
Args:
serving_path: The path to the model. Must be a GCS URI.
model_version_name: Model version for CAIP model being deployed, or model
name for the Vertex model being deployed. Must be different from what is
currently being served.
ai_platform_serving_args: Dictionary containing arguments for pushing to AI
Platform. The full set of parameters supported can be found at,
for CAIP:
https://cloud.google.com/ml-engine/reference/rest/v1/projects.models.versions#Version.
for Vertex:
https://googleapis.dev/python/aiplatform/latest/aiplatform.html?highlight=deploy#google.cloud.aiplatform.Model.deploy
Most keys are forwarded as-is, but following keys are handled specially.
For CAIP:
- name: this must be empty (and will be filled by pusher).
- deployment_uri: this must be empty (and will be filled by pusher).
- python_version: when left empty, this will be filled by python version
of the environment being used.
- runtime_version: when left empty, this will be filled by TensorFlow
version from the environment.
- labels: a list of job labels will be merged with user's input.
For Vertex:
- endpoint_name: Name of the endpoint.
- traffic_percentage: Desired traffic to newly deployed model. Forwarded
as-is if specified. If not specified, it is set to 100 if
set_default_version is True, or set to 0 otherwise.
- labels: a list of job labels will be merged with user's input.
labels: The dict of labels that will be attached to this CAIP job or Vertex
endpoint. They are merged with optional labels from
`ai_platform_serving_args`.
api: (CAIP only, required) Google API client resource.
serving_container_image_uri: (Vertex only, required) The path to the serving
container image URI. Container registry for prediction is available at:
https://gcr.io/cloud-aiplatform/prediction.
endpoint_region: (Vertex only, required) Region for Vertex endpoint. For
available regions, please see
https://cloud.google.com/vertex-ai/docs/general/locations
skip_model_endpoint_creation: If true, the method assumes CAIP model or
Vertex endpoint already exists in AI platform, therefore skipping its
creation.
set_default: Whether set the newly deployed CAIP model version or Vertex
model as the default.
enable_vertex: Whether to enable Vertex or not.
Returns:
For Vertex, the resource name of the deployed model.
Raises:
RuntimeError: if an error is encountered when trying to push.
"""
client = prediction_clients.get_prediction_client(
api=api, enable_vertex=enable_vertex)
if enable_vertex:
return client.deploy_model(
serving_path=serving_path,
model_version_name=model_version_name,
ai_platform_serving_args=ai_platform_serving_args,
labels=labels,
serving_container_image_uri=serving_container_image_uri,
endpoint_region=endpoint_region,
skip_model_endpoint_creation=skip_model_endpoint_creation,
set_default=set_default)
else:
return client.deploy_model(
serving_path=serving_path,
model_version_name=model_version_name,
ai_platform_serving_args=ai_platform_serving_args,
labels=labels,
skip_model_endpoint_creation=skip_model_endpoint_creation,
set_default=set_default) | Deploys a model for serving with AI Platform. Args: serving_path: The path to the model. Must be a GCS URI. model_version_name: Model version for CAIP model being deployed, or model name for the Vertex model being deployed. Must be different from what is currently being served. ai_platform_serving_args: Dictionary containing arguments for pushing to AI Platform. The full set of parameters supported can be found at, for CAIP: https://cloud.google.com/ml-engine/reference/rest/v1/projects.models.versions#Version. for Vertex: https://googleapis.dev/python/aiplatform/latest/aiplatform.html?highlight=deploy#google.cloud.aiplatform.Model.deploy Most keys are forwarded as-is, but following keys are handled specially. For CAIP: - name: this must be empty (and will be filled by pusher). - deployment_uri: this must be empty (and will be filled by pusher). - python_version: when left empty, this will be filled by python version of the environment being used. - runtime_version: when left empty, this will be filled by TensorFlow version from the environment. - labels: a list of job labels will be merged with user's input. For Vertex: - endpoint_name: Name of the endpoint. - traffic_percentage: Desired traffic to newly deployed model. Forwarded as-is if specified. If not specified, it is set to 100 if set_default_version is True, or set to 0 otherwise. - labels: a list of job labels will be merged with user's input. labels: The dict of labels that will be attached to this CAIP job or Vertex endpoint. They are merged with optional labels from `ai_platform_serving_args`. api: (CAIP only, required) Google API client resource. serving_container_image_uri: (Vertex only, required) The path to the serving container image URI. Container registry for prediction is available at: https://gcr.io/cloud-aiplatform/prediction. endpoint_region: (Vertex only, required) Region for Vertex endpoint. For available regions, please see https://cloud.google.com/vertex-ai/docs/general/locations skip_model_endpoint_creation: If true, the method assumes CAIP model or Vertex endpoint already exists in AI platform, therefore skipping its creation. set_default: Whether set the newly deployed CAIP model version or Vertex model as the default. enable_vertex: Whether to enable Vertex or not. Returns: For Vertex, the resource name of the deployed model. Raises: RuntimeError: if an error is encountered when trying to push. |
166,409 | import time
from typing import Any, Dict, List, Optional
from absl import logging
from googleapiclient import discovery
from tfx import types
from tfx.extensions.google_cloud_ai_platform import prediction_clients
from tfx.extensions.google_cloud_ai_platform import training_clients
from tfx.utils import version_utils
The provided code snippet includes necessary dependencies for implementing the `delete_model_from_aip_if_exists` function. Write a Python function `def delete_model_from_aip_if_exists( ai_platform_serving_args: Dict[str, Any], api: Optional[discovery.Resource] = None, model_version_name: Optional[str] = None, delete_model_endpoint: Optional[bool] = False, enable_vertex: Optional[bool] = False, ) -> None` to solve the following problem:
Deletes a model version from Google Cloud AI Platform if version exists. Args: ai_platform_serving_args: Dictionary containing arguments for pushing to AI Platform. For the full set of parameters supported, refer to https://cloud.google.com/ml-engine/reference/rest/v1/projects.models api: (CAIP only, required) Google API client resource. model_version_name: Model version for CAIP model being deployed, or model name for the Vertex model to be deleted. Required if delete_model_endpoint is False, otherwise not needed. delete_model_endpoint: Whether CAIP model or Vertex endpoint should be deleted. enable_vertex: Whether to enable Vertex or not. Raises: RuntimeError: if an error is encountered when trying to delete.
Here is the function:
def delete_model_from_aip_if_exists(
ai_platform_serving_args: Dict[str, Any],
api: Optional[discovery.Resource] = None,
model_version_name: Optional[str] = None,
delete_model_endpoint: Optional[bool] = False,
enable_vertex: Optional[bool] = False,
) -> None:
"""Deletes a model version from Google Cloud AI Platform if version exists.
Args:
ai_platform_serving_args: Dictionary containing arguments for pushing to AI
Platform. For the full set of parameters supported, refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.models
api: (CAIP only, required) Google API client resource.
model_version_name: Model version for CAIP model being deployed, or model
name for the Vertex model to be deleted. Required if delete_model_endpoint
is False, otherwise not needed.
delete_model_endpoint: Whether CAIP model or Vertex endpoint should be
deleted.
enable_vertex: Whether to enable Vertex or not.
Raises:
RuntimeError: if an error is encountered when trying to delete.
"""
client = prediction_clients.get_prediction_client(
api=api, enable_vertex=enable_vertex)
client.delete_model_from_aip_if_exists(
ai_platform_serving_args=ai_platform_serving_args,
model_version_name=model_version_name,
delete_model_endpoint=delete_model_endpoint) | Deletes a model version from Google Cloud AI Platform if version exists. Args: ai_platform_serving_args: Dictionary containing arguments for pushing to AI Platform. For the full set of parameters supported, refer to https://cloud.google.com/ml-engine/reference/rest/v1/projects.models api: (CAIP only, required) Google API client resource. model_version_name: Model version for CAIP model being deployed, or model name for the Vertex model to be deleted. Required if delete_model_endpoint is False, otherwise not needed. delete_model_endpoint: Whether CAIP model or Vertex endpoint should be deleted. enable_vertex: Whether to enable Vertex or not. Raises: RuntimeError: if an error is encountered when trying to delete. |
166,410 | import abc
import sys
import time
from typing import Any, Dict, Optional, Union
from absl import logging
from google.cloud import aiplatform
from googleapiclient import discovery
from googleapiclient import errors
import tensorflow as tf
_TF_COMPATIBILITY_OVERRIDE = {
# Generally, runtimeVersion should be same as <major>.<minor> of currently
# installed tensorflow version, with certain compatibility hacks since
# some TensorFlow runtime versions are not explicitly supported by
# CAIP pusher. See:
# https://cloud.google.com/ai-platform/prediction/docs/runtime-version-list
'2.0': '1.15',
# TODO(b/168249383) Update this once CAIP model support TF 2.13 runtime.
'2.13': '2.11',
}
The provided code snippet includes necessary dependencies for implementing the `_get_tf_runtime_version` function. Write a Python function `def _get_tf_runtime_version(tf_version: str) -> str` to solve the following problem:
Returns the tensorflow runtime version used in Cloud AI Platform. This is only used for prediction service. Args: tf_version: version string returned from `tf.__version__`. Returns: same major.minor version of installed tensorflow, except when overriden by _TF_COMPATIBILITY_OVERRIDE.
Here is the function:
def _get_tf_runtime_version(tf_version: str) -> str:
"""Returns the tensorflow runtime version used in Cloud AI Platform.
This is only used for prediction service.
Args:
tf_version: version string returned from `tf.__version__`.
Returns: same major.minor version of installed tensorflow, except when
overriden by _TF_COMPATIBILITY_OVERRIDE.
"""
tf_version = '.'.join(tf_version.split('.')[0:2])
return _TF_COMPATIBILITY_OVERRIDE.get(tf_version) or tf_version | Returns the tensorflow runtime version used in Cloud AI Platform. This is only used for prediction service. Args: tf_version: version string returned from `tf.__version__`. Returns: same major.minor version of installed tensorflow, except when overriden by _TF_COMPATIBILITY_OVERRIDE. |
166,411 | import json
import os
import re
from typing import List
from absl import logging
from packaging import version
from tfx.types import artifact as artifact_lib
from ml_metadata.proto import metadata_store_pb2
_Artifact = artifact_lib.Artifact
def get_split_uris(artifact_list: List[_Artifact], split: str) -> List[str]:
"""Get the uris of Artifacts with matching split from given list.
Args:
artifact_list: A list of Artifact objects.
split: Name of split.
Returns:
A list of uris of Artifact object in artifact_list with matching split.
Raises:
ValueError: If number of artifacts matching the split is not equal to
number of input artifacts.
"""
result = []
for artifact in artifact_list:
split_names = decode_split_names(artifact.split_names)
if split in split_names:
# TODO(b/182526033): deprecate old split format.
if is_artifact_version_older_than(
artifact, _ARTIFACT_VERSION_FOR_SPLIT_UPDATE
):
result.append(os.path.join(artifact.uri, split))
else:
result.append(os.path.join(artifact.uri, f'Split-{split}'))
if len(result) != len(artifact_list):
raise ValueError(
f'Split does not exist over all example artifacts: {split}'
)
return result
The provided code snippet includes necessary dependencies for implementing the `get_split_uri` function. Write a Python function `def get_split_uri(artifact_list: List[_Artifact], split: str) -> str` to solve the following problem:
Get the uri of Artifact with matching split from given list. Args: artifact_list: A list of Artifact objects whose length must be one. split: Name of split. Returns: The uri of Artifact object in artifact_list with matching split. Raises: ValueError: If number with matching split in artifact_list is not one.
Here is the function:
def get_split_uri(artifact_list: List[_Artifact], split: str) -> str:
"""Get the uri of Artifact with matching split from given list.
Args:
artifact_list: A list of Artifact objects whose length must be one.
split: Name of split.
Returns:
The uri of Artifact object in artifact_list with matching split.
Raises:
ValueError: If number with matching split in artifact_list is not one.
"""
artifact_split_uris = get_split_uris(artifact_list, split)
if len(artifact_split_uris) != 1:
raise ValueError(
f'Expected exactly one artifact with split {repr(split)}, but found '
f'matching artifacts {artifact_split_uris}.'
)
return artifact_split_uris[0] | Get the uri of Artifact with matching split from given list. Args: artifact_list: A list of Artifact objects whose length must be one. split: Name of split. Returns: The uri of Artifact object in artifact_list with matching split. Raises: ValueError: If number with matching split in artifact_list is not one. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.