id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
166,412 | import json
import os
import re
from typing import List
from absl import logging
from packaging import version
from tfx.types import artifact as artifact_lib
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `encode_split_names` function. Write a Python function `def encode_split_names(splits: List[str]) -> str` to solve the following problem:
Get the encoded representation of a list of split names.
Here is the function:
def encode_split_names(splits: List[str]) -> str:
"""Get the encoded representation of a list of split names."""
rewritten_splits = []
for split in splits:
# TODO(b/146759051): Remove workaround for RuntimeParameter object once
# this bug is clarified.
if split.__class__.__name__ == 'RuntimeParameter':
logging.warning(
'RuntimeParameter provided for split name: this functionality may '
'not be supported in the future.'
)
split = str(split)
# Intentionally ignore split format check to pass through the template for
# now. This behavior is very fragile and should be fixed (see
# b/146759051).
elif not re.match('^([A-Za-z0-9][A-Za-z0-9_-]*)?$', split):
# TODO(ccy): Disallow empty split names once the importer removes split as
# a property for all artifacts.
raise ValueError(
'Split names are expected to be alphanumeric (allowing dashes and '
'underscores, provided they are not the first character); got '
f'{repr(split)} instead.'
)
rewritten_splits.append(split)
return json.dumps(rewritten_splits) | Get the encoded representation of a list of split names. |
166,413 | import typing
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Sequence, Set, Type, cast
from tfx.dsl.placeholder import placeholder as ph
from tfx.proto.orchestration import placeholder_pb2
from tfx.types import artifact
from tfx.types import channel
The provided code snippet includes necessary dependencies for implementing the `get_individual_channels` function. Write a Python function `def get_individual_channels( input_channel: channel.BaseChannel) -> List[channel.Channel]` to solve the following problem:
Converts BaseChannel into a list of Channels.
Here is the function:
def get_individual_channels(
input_channel: channel.BaseChannel) -> List[channel.Channel]:
"""Converts BaseChannel into a list of Channels."""
if isinstance(input_channel, channel.Channel):
return [input_channel]
elif isinstance(input_channel, channel.UnionChannel):
return [
chan for chan in cast(channel.UnionChannel, input_channel).channels
if isinstance(chan, channel.Channel)]
else:
raise NotImplementedError(
f'Unsupported Channel type: {type(input_channel)}') | Converts BaseChannel into a list of Channels. |
166,414 | import typing
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Sequence, Set, Type, cast
from tfx.dsl.placeholder import placeholder as ph
from tfx.proto.orchestration import placeholder_pb2
from tfx.types import artifact
from tfx.types import channel
The provided code snippet includes necessary dependencies for implementing the `union` function. Write a Python function `def union(channels: Iterable[channel.BaseChannel]) -> channel.UnionChannel` to solve the following problem:
Returns the union of channels. All channels should have the same artifact type, otherwise an error would be raised. Returned channel deduplicates the inputs so each artifact is guaranteed to be present at most once. `union()` does NOT guarantee any ordering of artifacts for the consumer component. Args: channels: An iterable of BaseChannels. Returns: A BaseChannel that represents the union of channels.
Here is the function:
def union(channels: Iterable[channel.BaseChannel]) -> channel.UnionChannel:
"""Returns the union of channels.
All channels should have the same artifact type, otherwise an error would be
raised. Returned channel deduplicates the inputs so each artifact is
guaranteed to be present at most once. `union()` does NOT guarantee any
ordering of artifacts for the consumer component.
Args:
channels: An iterable of BaseChannels.
Returns:
A BaseChannel that represents the union of channels.
"""
return channel.UnionChannel(channels) | Returns the union of channels. All channels should have the same artifact type, otherwise an error would be raised. Returned channel deduplicates the inputs so each artifact is guaranteed to be present at most once. `union()` does NOT guarantee any ordering of artifacts for the consumer component. Args: channels: An iterable of BaseChannels. Returns: A BaseChannel that represents the union of channels. |
166,415 | import typing
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Sequence, Set, Type, cast
from tfx.dsl.placeholder import placeholder as ph
from tfx.proto.orchestration import placeholder_pb2
from tfx.types import artifact
from tfx.types import channel
The provided code snippet includes necessary dependencies for implementing the `external_pipeline_artifact_query` function. Write a Python function `def external_pipeline_artifact_query( artifact_type: Type[artifact.Artifact], *, owner: str, pipeline_name: str, producer_component_id: str, output_key: str, pipeline_run_id: str = '', ) -> channel.ExternalPipelineChannel` to solve the following problem:
Helper function to construct a query to get artifacts from an external pipeline. Args: artifact_type: Subclass of Artifact for this channel. owner: Owner of the pipeline. pipeline_name: Name of the pipeline the artifacts belong to. producer_component_id: Id of the component produces the artifacts. output_key: The output key when producer component produces the artifacts in this Channel. pipeline_run_id: (Optional) Pipeline run id the artifacts belong to. Returns: channel.ExternalPipelineChannel instance. Raises: ValueError, if owner or pipeline_name is missing.
Here is the function:
def external_pipeline_artifact_query(
artifact_type: Type[artifact.Artifact],
*,
owner: str,
pipeline_name: str,
producer_component_id: str,
output_key: str,
pipeline_run_id: str = '',
) -> channel.ExternalPipelineChannel:
"""Helper function to construct a query to get artifacts from an external pipeline.
Args:
artifact_type: Subclass of Artifact for this channel.
owner: Owner of the pipeline.
pipeline_name: Name of the pipeline the artifacts belong to.
producer_component_id: Id of the component produces the artifacts.
output_key: The output key when producer component produces the artifacts in
this Channel.
pipeline_run_id: (Optional) Pipeline run id the artifacts belong to.
Returns:
channel.ExternalPipelineChannel instance.
Raises:
ValueError, if owner or pipeline_name is missing.
"""
if not owner or not pipeline_name:
raise ValueError('owner or pipeline_name is missing.')
return channel.ExternalPipelineChannel(
artifact_type=artifact_type,
owner=owner,
pipeline_name=pipeline_name,
producer_component_id=producer_component_id,
output_key=output_key,
pipeline_run_id=pipeline_run_id,
) | Helper function to construct a query to get artifacts from an external pipeline. Args: artifact_type: Subclass of Artifact for this channel. owner: Owner of the pipeline. pipeline_name: Name of the pipeline the artifacts belong to. producer_component_id: Id of the component produces the artifacts. output_key: The output key when producer component produces the artifacts in this Channel. pipeline_run_id: (Optional) Pipeline run id the artifacts belong to. Returns: channel.ExternalPipelineChannel instance. Raises: ValueError, if owner or pipeline_name is missing. |
166,416 | import typing
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Sequence, Set, Type, cast
from tfx.dsl.placeholder import placeholder as ph
from tfx.proto.orchestration import placeholder_pb2
from tfx.types import artifact
from tfx.types import channel
The provided code snippet includes necessary dependencies for implementing the `unwrap_simple_channel_placeholder` function. Write a Python function `def unwrap_simple_channel_placeholder( placeholder: ph.Placeholder, ) -> channel.Channel` to solve the following problem:
Unwraps a `x.future()[0].value` placeholder and returns its `x`. Args: placeholder: A placeholder expression. Returns: The (only) channel involved in the expression. Raises: ValueError: If the input placeholder is anything more complex than `some_channel.future()[0].value`, and in particular if it involves multiple channels, arithmetic operations or input/output artifacts.
Here is the function:
def unwrap_simple_channel_placeholder(
placeholder: ph.Placeholder,
) -> channel.Channel:
"""Unwraps a `x.future()[0].value` placeholder and returns its `x`.
Args:
placeholder: A placeholder expression.
Returns:
The (only) channel involved in the expression.
Raises:
ValueError: If the input placeholder is anything more complex than
`some_channel.future()[0].value`, and in particular if it involves
multiple channels, arithmetic operations or input/output artifacts.
"""
# Validate that it's the right shape.
outer_ph = placeholder.encode()
index_op = outer_ph.operator.artifact_value_op.expression.operator.index_op
cwp = index_op.expression.placeholder
if (
# This catches the case where we've been navigating down non-existent
# proto paths above and been getting default messages all along. If this
# sub-message is present, then the whole chain was correct.
not index_op.expression.HasField('placeholder')
# ChannelWrappedPlaceholder uses INPUT_ARTIFACT for some reason, and has
# no key when encoded with encode().
or cwp.type != placeholder_pb2.Placeholder.Type.INPUT_ARTIFACT
or cwp.key
# For the `[0]` part of the desired shape.
or index_op.index != 0
):
raise ValueError(
'Expected placeholder of shape somechannel.future()[0].value, but got'
f' {placeholder}.'
)
# Now that we know there's only one channel inside, we can just extract it:
return next(
p.channel
for p in placeholder.traverse()
if isinstance(p, ph.ChannelWrappedPlaceholder)
) | Unwraps a `x.future()[0].value` placeholder and returns its `x`. Args: placeholder: A placeholder expression. Returns: The (only) channel involved in the expression. Raises: ValueError: If the input placeholder is anything more complex than `some_channel.future()[0].value`, and in particular if it involves multiple channels, arithmetic operations or input/output artifacts. |
166,417 | import copy
import inspect
import itertools
from typing import Any, Dict, List, Mapping, Optional, Type, cast
from tfx.dsl.component.experimental.json_compat import check_strict_json_compat
from tfx.dsl.placeholder import placeholder
from tfx.types import artifact
from tfx.types import channel
from tfx.types.system_executions import SystemExecution
from tfx.utils import json_utils
from tfx.utils import proto_utils
from google.protobuf import message
def _is_runtime_param(data: Any) -> bool:
return data.__class__.__name__ == 'RuntimeParameter'
def _put_default_dict(dict_data: Dict[str, Any]) -> None:
"""Helper function to replace RuntimeParameter with its default value."""
for k, v in dict_data.items():
if isinstance(v, dict):
_put_default_dict(v)
elif isinstance(v, list):
_put_default_list(v)
elif v.__class__.__name__ == 'RuntimeParameter':
# Currently supporting int, float, bool, Text
ptype = v.ptype
dict_data[k] = ptype.__new__(ptype)
def _put_default_list(list_data: List[Any]) -> None:
"""Helper function to replace RuntimeParameter with its default value."""
for index, item in enumerate(list_data):
if isinstance(item, dict):
_put_default_dict(item)
elif isinstance(item, list):
_put_default_list(item)
elif item.__class__.__name__ == 'RuntimeParameter':
# Currently supporting int, float, bool, Text
ptype = item.ptype
list_data[index] = ptype.__new__(ptype)
The provided code snippet includes necessary dependencies for implementing the `_make_default` function. Write a Python function `def _make_default(data: Any) -> Any` to solve the following problem:
Replaces RuntimeParameter by its ptype's default. Args: data: an object possibly containing RuntimeParameter. Returns: A version of input data where RuntimeParameters are replaced with the default values of their ptype.
Here is the function:
def _make_default(data: Any) -> Any:
"""Replaces RuntimeParameter by its ptype's default.
Args:
data: an object possibly containing RuntimeParameter.
Returns:
A version of input data where RuntimeParameters are replaced with
the default values of their ptype.
"""
if isinstance(data, dict):
copy_data = copy.deepcopy(data)
_put_default_dict(copy_data)
return copy_data
if isinstance(data, list):
copy_data = copy.deepcopy(data)
_put_default_list(copy_data)
return copy_data
if _is_runtime_param(data):
ptype = data.ptype
return ptype.__new__(ptype)
return data | Replaces RuntimeParameter by its ptype's default. Args: data: an object possibly containing RuntimeParameter. Returns: A version of input data where RuntimeParameters are replaced with the default values of their ptype. |
166,418 | from __future__ import annotations
import abc
import copy
import dataclasses
import inspect
import json
import textwrap
from typing import Any, Dict, Generic, Iterable, List, Optional, Sequence, Set, Type, TypeVar, Union, cast
from absl import logging
from tfx.dsl.placeholder import artifact_placeholder
from tfx.types import artifact_utils
from tfx.types.artifact import Artifact
from tfx.utils import deprecation_utils
from tfx.utils import doc_controls
from tfx.utils import json_utils
import typing_extensions
from google.protobuf import json_format
from google.protobuf import message
from ml_metadata.proto import metadata_store_pb2
class Artifact(json_utils.Jsonable):
"""TFX artifact used for orchestration.
This is used for type-checking and inter-component communication. Currently,
it wraps a tuple of (ml_metadata.proto.Artifact,
ml_metadata.proto.ArtifactType) with additional property accessors for
internal state.
A user may create a subclass of Artifact and override the TYPE_NAME property
with the type for this artifact subclass. Users of the subclass may then omit
the "type_name" field when construction the object.
A user may specify artifact type-specific properties for an Artifact subclass
by overriding the PROPERTIES dictionary, as detailed below.
Note: the behavior of this class is experimental, without backwards
compatibility guarantees, and may change in upcoming releases.
"""
# String artifact type name used to identify the type in ML Metadata
# database. Must be overridden by subclass.
#
# Example usage:
#
# TYPE_NAME = 'MyTypeName'
TYPE_NAME: Optional[str] = None
# An MLMD type annotations from tfx.v1.dsl.standard_annotations.
#
# Example usage:
#
# ```python
# from tfx import v1 as tfx
# TYPE_ANNOTATION = tfx.dsl.standard_annotations.Dataset
# ````
TYPE_ANNOTATION: Optional[Type[SystemArtifact]] = None
# Optional dictionary of property name strings as keys and `Property`
# objects as values, used to specify the artifact type's properties.
# Subsequently, this artifact property may be accessed as Python attributes
# of the artifact object.
#
# Example usage:
#
# PROPERTIES = {
# 'span': Property(type=PropertyType.INT),
# # Comma separated of splits for an artifact. Empty string means artifact
# # has no split.
# 'split_names': Property(type=PropertyType.STRING),
# }
#
# Subsequently, these properties can be stored and accessed as
# `myartifact.span` and `myartifact.split_name`, respectively.
PROPERTIES: Optional[Dict[str, Property]] = None
# Initialization flag to support setattr / getattr behavior.
_initialized = False
def __init__(
self,
mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType] = None):
"""Construct an instance of Artifact.
Used by TFX internal implementation: create an empty Artifact with
type_name and optional split info specified. The remaining info will be
filled in during compiling and running time. The Artifact should be
transparent to end users and should not be initiated directly by pipeline
users.
Args:
mlmd_artifact_type: Proto message defining the underlying ArtifactType.
Optional and intended for internal use.
"""
if self.__class__ == Artifact:
if not mlmd_artifact_type:
raise ValueError(
'The "mlmd_artifact_type" argument must be passed to specify a '
'type for this Artifact.')
if not isinstance(mlmd_artifact_type, metadata_store_pb2.ArtifactType):
raise ValueError(
'The "mlmd_artifact_type" argument must be an instance of the '
'proto message ml_metadata.proto.metadata_store_pb2.ArtifactType.')
else:
if mlmd_artifact_type:
raise ValueError(
'The "mlmd_artifact_type" argument must not be passed for '
'Artifact subclass %s.' % self.__class__)
mlmd_artifact_type = self._get_artifact_type()
# MLMD artifact type proto object.
self._artifact_type = mlmd_artifact_type
# Underlying MLMD artifact proto object.
self._artifact = metadata_store_pb2.Artifact()
# When list/dict JSON or proto value properties are read, it is possible
# they will be modified without knowledge of this class. Therefore,
# deserialized values need to be cached here and reserialized into the
# metadata proto when requested.
self._cached_modifiable_properties = {}
self._cached_modifiable_custom_properties = {}
# Initialization flag to prevent recursive getattr / setattr errors.
self._initialized = True
def _get_artifact_type(cls):
existing_artifact_type = getattr(cls, '_MLMD_ARTIFACT_TYPE', None)
if (not existing_artifact_type) or (cls.TYPE_NAME !=
existing_artifact_type.name):
type_name = cls.TYPE_NAME
if not (type_name and isinstance(type_name, str)):
raise ValueError(
('The Artifact subclass %s must override the TYPE_NAME attribute '
'with a string type name identifier (got %r instead).') %
(cls, type_name))
artifact_type = metadata_store_pb2.ArtifactType()
artifact_type.name = type_name
# Populate ML Metadata artifact properties dictionary.
if cls.PROPERTIES:
# Perform validation on PROPERTIES dictionary.
if not isinstance(cls.PROPERTIES, dict):
raise ValueError(
'Artifact subclass %s.PROPERTIES is not a dictionary.' % cls)
for key, value in cls.PROPERTIES.items():
if not (isinstance(key,
(str, bytes)) and isinstance(value, Property)):
raise ValueError(
('Artifact subclass %s.PROPERTIES dictionary must have keys of '
'type string and values of type artifact.Property.') % cls)
for key, value in cls.PROPERTIES.items():
artifact_type.properties[key] = value.mlmd_type()
# Populate ML Metadata artifact type field: `base_type`.
type_annotation_cls = cls.TYPE_ANNOTATION
if type_annotation_cls:
if not issubclass(type_annotation_cls, SystemArtifact):
raise ValueError(
'TYPE_ANNOTATION %s is not a subclass of SystemArtifact.' %
type_annotation_cls)
if type_annotation_cls.MLMD_SYSTEM_BASE_TYPE:
artifact_type.base_type = type_annotation_cls.MLMD_SYSTEM_BASE_TYPE
cls._MLMD_ARTIFACT_TYPE = artifact_type
return copy.deepcopy(cls._MLMD_ARTIFACT_TYPE)
def __getattr__(self, name: str) -> Any:
"""Custom __getattr__ to allow access to artifact properties."""
if name == '_artifact_type':
# Prevent infinite recursion when used with copy.deepcopy().
raise AttributeError()
if name not in self._artifact_type.properties:
raise AttributeError('Artifact has no property %r.' % name)
property_mlmd_type = self._artifact_type.properties[name]
if property_mlmd_type == metadata_store_pb2.STRING:
if name not in self._artifact.properties:
# Avoid populating empty property protobuf with the [] operator.
return ''
return self._artifact.properties[name].string_value
elif property_mlmd_type == metadata_store_pb2.INT:
if name not in self._artifact.properties:
# Avoid populating empty property protobuf with the [] operator.
return 0
return self._artifact.properties[name].int_value
elif property_mlmd_type == metadata_store_pb2.DOUBLE:
if name not in self._artifact.properties:
# Avoid populating empty property protobuf with the [] operator.
return 0.0
return self._artifact.properties[name].double_value
elif property_mlmd_type == metadata_store_pb2.BOOLEAN:
if name not in self._artifact.properties:
# Avoid populating empty property protobuf with the [] operator.
return False
return self._artifact.properties[name].bool_value
elif property_mlmd_type == metadata_store_pb2.STRUCT:
if name not in self._artifact.properties:
# Avoid populating empty property protobuf with the [] operator.
return None
if name in self._cached_modifiable_properties:
return self._cached_modifiable_properties[name]
value = _decode_struct_value(self._artifact.properties[name].struct_value)
# We must cache the decoded lists or dictionaries returned here so that
# if their recursive contents are modified, the Metadata proto message
# can be updated to reflect this.
if isinstance(value, (dict, list)):
self._cached_modifiable_properties[name] = value
return value
elif property_mlmd_type == metadata_store_pb2.PROTO:
if name not in self._artifact.properties:
# Avoid populating empty property protobuf with the [] operator.
return None
if name in self._cached_modifiable_properties:
return self._cached_modifiable_properties[name]
value = proto_utils.unpack_proto_any(
self._artifact.properties[name].proto_value)
# We must cache the protobuf message here so that if its contents are
# modified, the Metadata proto message can be updated to reflect this.
self._cached_modifiable_properties[name] = value
return value
else:
raise ValueError(
'Unknown MLMD type %r for property %r.' % (property_mlmd_type, name)
)
def __setattr__(self, name: str, value: Any):
"""Custom __setattr__ to allow access to artifact properties."""
if not self._initialized:
object.__setattr__(self, name, value)
return
if name not in self._artifact_type.properties:
if (name in self.__dict__ or
any(name in c.__dict__ for c in self.__class__.mro())):
# Use any provided getter / setter if available.
object.__setattr__(self, name, value)
return
# In the case where we do not handle this via an explicit getter /
# setter, we assume that the user implied an artifact attribute store,
# and we raise an exception since such an attribute was not explicitly
# defined in the Artifact PROPERTIES dictionary.
raise AttributeError('Cannot set unknown property %r on artifact %r.' %
(name, self))
property_mlmd_type = self._artifact_type.properties[name]
if property_mlmd_type == metadata_store_pb2.STRING:
if not isinstance(value, (str, bytes)):
raise ValueError(
'Expected string value for property %r; got %r instead.'
% (name, value)
)
self._artifact.properties[name].string_value = value
elif property_mlmd_type == metadata_store_pb2.INT:
if not isinstance(value, int):
raise ValueError(
'Expected integer value for property %r; got %r instead.'
% (name, value)
)
self._artifact.properties[name].int_value = value
elif property_mlmd_type == metadata_store_pb2.DOUBLE:
if not isinstance(value, float):
raise ValueError(
'Expected float value for property %r; got %r instead.'
% (name, value)
)
self._artifact.properties[name].double_value = value
elif property_mlmd_type == metadata_store_pb2.BOOLEAN:
if not isinstance(value, bool):
raise ValueError(
'Expected boolean value for property %r; got %r instead.'
% (name, value)
)
self._artifact.properties[name].bool_value = value
elif property_mlmd_type == metadata_store_pb2.STRUCT:
if not isinstance(value, (dict, list, str, float, int, type(None))):
raise ValueError(
(
'Expected JSON value (dict, list, string, float, int or None) '
'for property %r; got %r instead.'
)
% (name, value)
)
encoded_value = _encode_struct_value(value)
if encoded_value is None:
self._artifact.properties[name].struct_value.Clear()
else:
self._artifact.properties[name].struct_value.CopyFrom(encoded_value)
self._cached_modifiable_properties[name] = value
elif property_mlmd_type == metadata_store_pb2.PROTO:
if not isinstance(value, (message.Message, type(None))):
raise ValueError(
'Expected protobuf message value or None for property %r; got %r '
'instead.' % (name, value)
)
if value is None:
self._artifact.properties[name].proto_value.Clear()
else:
self._artifact.properties[name].proto_value.Pack(value)
self._cached_modifiable_properties[name] = value
else:
raise ValueError(
'Unknown MLMD type %r for property %r.' % (property_mlmd_type, name)
)
def set_mlmd_artifact(self, artifact: metadata_store_pb2.Artifact):
"""Replace the MLMD artifact object on this artifact."""
if not isinstance(artifact, metadata_store_pb2.Artifact):
raise ValueError(
('Expected instance of metadata_store_pb2.Artifact, got %s '
'instead.') % (artifact,))
self._artifact = artifact
self._cached_modifiable_properties = {}
self._cached_modifiable_custom_properties = {}
def set_mlmd_artifact_type(self,
artifact_type: metadata_store_pb2.ArtifactType):
"""Set entire ArtifactType in this object."""
if not isinstance(artifact_type, metadata_store_pb2.ArtifactType):
raise ValueError(
('Expected instance of metadata_store_pb2.ArtifactType, got %s '
'instead.') % (artifact_type,))
self._artifact_type = artifact_type
self._artifact.type_id = artifact_type.id
def __repr__(self):
return 'Artifact(artifact: {}, artifact_type: {})'.format(
str(self.mlmd_artifact), str(self._artifact_type))
def to_json_dict(self) -> Dict[str, Any]:
return {
'artifact':
json.loads(
json_format.MessageToJson(
message=self.mlmd_artifact,
preserving_proto_field_name=True)),
'artifact_type':
json.loads(
json_format.MessageToJson(
message=self._artifact_type,
preserving_proto_field_name=True)),
'__artifact_class_module__':
self.__class__.__module__,
'__artifact_class_name__':
self.__class__.__name__,
}
def from_json_dict(cls, dict_data: Dict[str, Any]) -> Any:
module_name = dict_data['__artifact_class_module__']
class_name = dict_data['__artifact_class_name__']
artifact = metadata_store_pb2.Artifact()
artifact_type = metadata_store_pb2.ArtifactType()
json_format.Parse(json.dumps(dict_data['artifact']), artifact)
json_format.Parse(json.dumps(dict_data['artifact_type']), artifact_type)
# First, try to resolve the specific class used for the artifact; if this
# is not possible, use a generic artifact.Artifact object.
result = None
try:
artifact_cls = getattr(importlib.import_module(module_name), class_name)
# If the artifact type is the base Artifact class, do not construct the
# object here since that constructor requires the mlmd_artifact_type
# argument.
if artifact_cls != Artifact:
result = artifact_cls()
except (AttributeError, ImportError, ValueError):
logging.warning((
'Could not load artifact class %s.%s; using fallback deserialization '
'for the relevant artifact. Please make sure that any artifact '
'classes can be imported within your container or environment.'),
module_name, class_name)
if not result:
result = Artifact(mlmd_artifact_type=artifact_type)
result.set_mlmd_artifact_type(artifact_type)
result.set_mlmd_artifact(artifact)
return result
# Read-only properties.
def type(self):
"""Type of the artifact."""
return self.__class__
def type_name(self):
"""Type name of the underlying mlmd artifact."""
return self._artifact_type.name
def artifact_type(self):
"""Type of the underlying mlmd artifact."""
return self._artifact_type
def mlmd_artifact(self):
"""Underlying mlmd artifact."""
# Update the Metadata proto message to reflect the contents of any
# possibly-modified JSON value properties, which may be dicts or lists
# modifiable by the user.
for cache_map, target_proto_properties in [
(self._cached_modifiable_properties, self._artifact.properties),
(self._cached_modifiable_custom_properties,
self._artifact.custom_properties)
]:
for key, cached_value in cache_map.items():
if cached_value is None:
if key in target_proto_properties:
del target_proto_properties[key]
elif isinstance(cached_value, message.Message):
target_proto_properties[key].proto_value.Pack(cached_value)
else:
struct_value = _encode_struct_value(cached_value)
target_proto_properties[key].struct_value.CopyFrom(struct_value)
return self._artifact
# Settable properties for all artifact types.
def uri(self) -> str:
"""Artifact URI."""
return self._artifact.uri
def uri(self, uri: str):
"""Setter for artifact URI."""
self._artifact.uri = uri
def id(self) -> int:
"""Id of the underlying mlmd artifact."""
return self._artifact.id
def id(self, artifact_id: int):
"""Set id of underlying artifact."""
self._artifact.id = artifact_id
def type_id(self) -> int:
"""Type id of the underlying mlmd artifact."""
return self._artifact.type_id
def type_id(self, type_id: int):
"""Set id of underlying artifact type."""
self._artifact.type_id = type_id
# System-managed properties for all artifact types. Will be deprecated soon
# in favor of a unified getter / setter interface and MLMD context.
#
# TODO(b/135056715): Rely on MLMD context for pipeline grouping for
# artifacts once it's ready.
#
# The following system properties are used:
# - name: The name of the artifact, used to differentiate same type of
# artifact produced by the same component (in a subsequent change, this
# information will move to the associated ML Metadata Event object).
# - state: The state of an artifact; can be one of PENDING, PUBLISHED,
# MISSING, DELETING, DELETED (in a subsequent change, this information
# will move to a top-level ML Metadata Artifact attribute).
# - pipeline_name: The name of the pipeline that produces the artifact (in
# a subsequent change, this information will move to an associated ML
# Metadata Context attribute).
# - producer_component: The name of the component that produces the
# artifact (in a subsequent change, this information will move to the
# associated ML Metadata Event object).
def _get_system_property(self, key: str) -> str:
if (key in self._artifact_type.properties and
key in self._artifact.properties):
# Legacy artifact types which have explicitly defined system properties.
return self._artifact.properties[key].string_value
elif key in self._artifact.custom_properties:
return self._artifact.custom_properties[key].string_value
else:
# Do not call __getitem__ on properties or custom_properties if key is
# missing, so that property mapping is not mutateed.
return ''
def _set_system_property(self, key: str, value: str):
if (key in self._artifact_type.properties and
key in self._artifact.properties):
# Clear non-custom property in legacy artifact types.
del self._artifact.properties[key]
self._artifact.custom_properties[key].string_value = value
def name(self) -> str:
"""Name of the underlying mlmd artifact."""
return self._get_system_property('name')
def name(self, name: str):
"""Set name of the underlying artifact."""
self._set_system_property('name', name)
self._artifact.name = name
def state(self) -> str:
"""State of the underlying mlmd artifact."""
# Backward compatibility behavior; for unknown artifact state string we
# uses UNKNOWN and 'state' custom property.
if self._artifact.state not in _MLMD_TO_TFX_ARTIFACT_STATE:
return self._get_system_property('state')
return _MLMD_TO_TFX_ARTIFACT_STATE[self._artifact.state]
def state(self, state: str):
"""Set state of the underlying artifact."""
if state not in _TFX_TO_MLMD_ARTIFACT_STATE:
# Backward compatibility behavior; for unknown artifact state string we
# uses UNKNOWN and 'state' custom property.
self._artifact.state = MlmdArtifactState.UNKNOWN
self._set_system_property('state', state)
else:
self._artifact.state = _TFX_TO_MLMD_ARTIFACT_STATE[state]
def pipeline_name(self) -> str:
"""Name of the pipeline that produce the artifact."""
return self._get_system_property('pipeline_name')
def pipeline_name(self, pipeline_name: str):
"""Set name of the pipeline that produce the artifact."""
self._set_system_property('pipeline_name', pipeline_name)
def producer_component(self) -> str:
"""Producer component of the artifact."""
return self._get_system_property('producer_component')
def producer_component(self, producer_component: str):
"""Set producer component of the artifact."""
self._set_system_property('producer_component', producer_component)
# LINT.IfChange
def is_external(self) -> bool:
"""Returns true if the artifact is external."""
return self.get_int_custom_property('is_external') == 1
def is_external(self, is_external: bool):
"""Sets if the artifact is external."""
self.set_int_custom_property('is_external', is_external)
# LINT.ThenChange(<Internal source code>)
# Custom property accessors.
def set_string_custom_property(self, key: str, value: str):
"""Set a custom property of string type."""
self._artifact.custom_properties[key].string_value = value
def set_int_custom_property(self, key: str, value: int):
"""Set a custom property of int type."""
self._artifact.custom_properties[key].int_value = builtins.int(value)
def set_float_custom_property(self, key: str, value: float):
"""Sets a custom property of float type."""
self._artifact.custom_properties[key].double_value = builtins.float(value)
def set_bool_custom_property(self, key: str, value: bool):
"""Sets a custom property of bool type."""
self._artifact.custom_properties[key].bool_value = value
def set_json_value_custom_property(self, key: str, value: JsonValueType):
"""Sets a custom property of JSON type."""
self._cached_modifiable_custom_properties[key] = value
def set_proto_custom_property(self, key: str, value: message.Message):
"""Sets a custom property of proto type."""
self._cached_modifiable_custom_properties[key] = value
def has_property(self, key: str) -> bool:
return (
key in self._artifact.properties
or key in self._cached_modifiable_properties
)
def has_custom_property(self, key: str) -> bool:
return (
key in self._artifact.custom_properties
or key in self._cached_modifiable_custom_properties
)
def get_string_custom_property(self, key: str) -> str:
"""Get a custom property of string type."""
if key not in self._artifact.custom_properties:
return ''
json_value = self.get_json_value_custom_property(key)
if isinstance(json_value, str):
return json_value
return self._artifact.custom_properties[key].string_value
def get_int_custom_property(self, key: str) -> int:
"""Get a custom property of int type."""
if key not in self._artifact.custom_properties:
return 0
json_value = self.get_json_value_custom_property(key)
if isinstance(json_value, float):
return int(json_value)
return self._artifact.custom_properties[key].int_value
# TODO(b/179215351): Standardize type name into one of float and double.
def get_float_custom_property(self, key: str) -> float:
"""Gets a custom property of float type."""
if key not in self._artifact.custom_properties:
return 0.0
json_value = self.get_json_value_custom_property(key)
if isinstance(json_value, float):
return json_value
return self._artifact.custom_properties[key].double_value
def get_bool_custom_property(self, key: str) -> bool:
"""Get a custom property of bool type."""
if key not in self._artifact.custom_properties:
return False
json_value = self.get_json_value_custom_property(key)
if isinstance(json_value, bool):
return json_value
return self._artifact.custom_properties[key].bool_value
def get_custom_property(
self, key: str
) -> Optional[Union[int, float, str, bool, JsonValueType]]:
"""Gets a custom property with key. Return None if not found."""
if key not in self._artifact.custom_properties:
return None
json_value = self.get_json_value_custom_property(key)
if json_value:
return json_value
mlmd_value = self._artifact.custom_properties[key]
if mlmd_value.HasField('int_value'):
return mlmd_value.int_value
elif mlmd_value.HasField('double_value'):
return mlmd_value.double_value
elif mlmd_value.HasField('string_value'):
return mlmd_value.string_value
elif mlmd_value.HasField('bool_value'):
return mlmd_value.bool_value
return None
def get_json_value_custom_property(self, key: str) -> JsonValueType:
"""Get a custom property of JSON type."""
if key in self._cached_modifiable_custom_properties:
return self._cached_modifiable_custom_properties[key]
if (key not in self._artifact.custom_properties or
not self._artifact.custom_properties[key].HasField('struct_value')):
return None
value = _decode_struct_value(
self._artifact.custom_properties[key].struct_value)
# We must cache the decoded lists or dictionaries returned here so that
# if their recursive contents are modified, the Metadata proto message
# can be updated to reflect this.
if isinstance(value, (dict, list)):
self._cached_modifiable_custom_properties[key] = value
return value
def get_proto_custom_property(self, key: str) -> Optional[message.Message]:
"""Get a custom property of proto type."""
if key in self._cached_modifiable_custom_properties:
return self._cached_modifiable_custom_properties[key]
if (key not in self._artifact.custom_properties or
not self._artifact.custom_properties[key].HasField('proto_value')):
return None
value = proto_utils.unpack_proto_any(
self._artifact.custom_properties[key].proto_value)
# We must cache the protobuf message here so that if its contents are
# modified, the Metadata proto message can be updated to reflect this.
if isinstance(value, message.Message):
self._cached_modifiable_custom_properties[key] = value
return value
def copy_from(self, other: 'Artifact'):
"""Set uri, properties and custom properties from a given Artifact."""
assert self.type is other.type, (
'Unable to set properties from an artifact of different type: {} vs {}'
.format(self.type_name, other.type_name))
self.uri = other.uri
if other.artifact_type.HasField('id'):
self.type_id = other.artifact_type.id
self._artifact.properties.clear()
self._artifact.properties.MergeFrom(other._artifact.properties) # pylint: disable=protected-access
self._artifact.custom_properties.clear()
self._artifact.custom_properties.MergeFrom(
other._artifact.custom_properties) # pylint: disable=protected-access
self._cached_modifiable_properties = copy.deepcopy(
other._cached_modifiable_properties) # pylint: disable=protected-access
self._cached_modifiable_custom_properties = copy.deepcopy(
other._cached_modifiable_custom_properties) # pylint: disable=protected-access
def _is_artifact_type(value: Any):
return inspect.isclass(value) and issubclass(value, Artifact) | null |
166,419 | from __future__ import annotations
import abc
import copy
import dataclasses
import inspect
import json
import textwrap
from typing import Any, Dict, Generic, Iterable, List, Optional, Sequence, Set, Type, TypeVar, Union, cast
from absl import logging
from tfx.dsl.placeholder import artifact_placeholder
from tfx.types import artifact_utils
from tfx.types.artifact import Artifact
from tfx.utils import deprecation_utils
from tfx.utils import doc_controls
from tfx.utils import json_utils
import typing_extensions
from google.protobuf import json_format
from google.protobuf import message
from ml_metadata.proto import metadata_store_pb2
_EXEC_PROPERTY_CLASSES = (int, float, str, bool, message.Message, list, dict)
def _is_property_dict(value: Any):
return (
isinstance(value, dict) and
all(isinstance(k, str) for k in value.keys()) and
all(isinstance(v, _EXEC_PROPERTY_CLASSES) for v in value.values())) | null |
166,420 | from __future__ import annotations
import abc
import copy
import dataclasses
import inspect
import json
import textwrap
from typing import Any, Dict, Generic, Iterable, List, Optional, Sequence, Set, Type, TypeVar, Union, cast
from absl import logging
from tfx.dsl.placeholder import artifact_placeholder
from tfx.types import artifact_utils
from tfx.types.artifact import Artifact
from tfx.utils import deprecation_utils
from tfx.utils import doc_controls
from tfx.utils import json_utils
import typing_extensions
from google.protobuf import json_format
from google.protobuf import message
from ml_metadata.proto import metadata_store_pb2
class BaseChannel(abc.ABC, Generic[_AT]):
"""An abstraction for component (BaseNode) artifact inputs.
`BaseChannel` is often interchangeably used with the term 'channel' (not
capital `Channel` which points to the legacy class name).
Component takes artifact inputs distinguished by each "input key". For
example:
trainer = Trainer(
examples=example_gen.outputs['examples'])
^^^^^^^^
input key
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
channel
Here "examples" is the input key of the `Examples` artifact type.
`example_gen.outputs['examples']` is a channel. Typically a single channel
refers to a *list of `Artifact` of a homogeneous type*. Since channel is a
declarative abstraction it is not strictly bound to the actual artifact, but
is more of an *input selector*.
The most commonly used channel type is an `OutputChannel` (in the form of
`component.outputs["key"]`, which selects the artifact produced by the
component in the same pipeline run (in synchronous execution mode; more
information on OutputChannel docstring), and is typically a single artifact.
Attributes:
type: The artifact type class that the Channel takes.
is_optional: If this channel is optional (e.g. may trigger components at run
time if there are no artifacts in the channel). None if not explicetely
set.
"""
def __init__(self, type: Type[_AT]): # pylint: disable=redefined-builtin
if not _is_artifact_type(type):
raise ValueError(
'Argument "type" of BaseChannel constructor must be a subclass of '
f'tfx.Artifact (got {type}).')
self._artifact_type = type
self._input_trigger = None
self._original_channel = None
self._is_optional = None
def is_optional(self) -> Optional[bool]:
"""If this is an "optional" channel. Changes Pipeline *runtime* behavior."""
return self._is_optional
# TODO(kmonte): Update this to Self once we're on 3.11 everywhere
def as_optional(self) -> typing_extensions.Self:
"""Creates an optional version of self.
By default component input channels are considered required, meaning
if the channel does not contain at least 1 artifact, the component
will be skipped. Making channel optional disables this requirement and
allows componenst to be executed with no artifacts from this channel.
Returns:
A copy of self which is optional.
"""
new_channel = copy.copy(self)
new_channel._is_optional = True # pylint: disable=protected-access
return new_channel
def type(self) -> Type[_AT]: # pylint: disable=redefined-builtin
return self._artifact_type
def type(self, value: Type[_AT]): # pylint: disable=redefined-builtin
self._set_type(value)
def _set_type(self, value: Type[_AT]):
raise NotImplementedError('Cannot change artifact type.')
def get_data_dependent_node_ids(self) -> Set[str]:
"""Get data dependent nodes of this channel.
Currently only the `OutputChannel` directly imposes the data dependency,
but other channels can also indirectly have a data dependency if they depend
on the OutputChannel. Use this abstract method to define transitive data
dependency.
Returns:
A set of data-dependent node IDs.
"""
def type_name(self):
"""Name of the artifact type class that Channel takes."""
return self.type.TYPE_NAME
def input_trigger(self) -> _InputTrigger: # pylint: disable=g-missing-from-attributes
return self._input_trigger
def _with_input_trigger(self, input_trigger: _InputTrigger):
"""Creates shallow-copied channel with new annotations."""
# Save a copy of the original object.
self._original_channel = self
result = copy.copy(self)
result._input_trigger = input_trigger # pylint: disable=protected-access
return result
def no_trigger(self):
return self._with_input_trigger(NoTrigger())
def trigger_by_property(self, *property_keys: str):
return self._with_input_trigger(TriggerByProperty(property_keys))
def future(self) -> ChannelWrappedPlaceholder:
return ChannelWrappedPlaceholder(self)
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(id(self))
class UnionChannel(BaseChannel):
"""Union of multiple channels with the same type.
Prefer to use union() to create UnionChannel.
Currently future() method is only support for Channel class, so conditional
does not yet work with channel union.
"""
def __init__(self, channels: Iterable[BaseChannel]):
channels = list(channels)
if not channels:
raise ValueError('At least one input channel expected.')
self.channels = []
for channel in channels:
if isinstance(channel, UnionChannel):
self.channels.extend(cast(UnionChannel, channel).channels)
elif isinstance(channel, BaseChannel):
self.channels.append(channel)
else:
raise ValueError('Unexpected channel type: %s.' % channel.type_name)
super().__init__(type=channels[0].type)
for channel in self.channels:
if channel.type != self.type:
raise TypeError(
'Unioned channels must have the same type. Expected %s (got %s).' %
(self.type, channel.type))
def get_data_dependent_node_ids(self) -> Set[str]:
if self.channels:
return set.union(
*[chan.get_data_dependent_node_ids() for chan in self.channels])
return set()
The provided code snippet includes necessary dependencies for implementing the `union` function. Write a Python function `def union(input_channels: Iterable[BaseChannel]) -> UnionChannel` to solve the following problem:
Convenient method to combine multiple input channels into union channel.
Here is the function:
def union(input_channels: Iterable[BaseChannel]) -> UnionChannel:
"""Convenient method to combine multiple input channels into union channel."""
return UnionChannel(input_channels) | Convenient method to combine multiple input channels into union channel. |
166,421 | import argparse
import base64
import json
from typing import List, Tuple
import absl
from absl import app
from absl.flags import argparse_flags
from tfx.dsl.components.base import base_beam_executor
from tfx.dsl.components.base import base_executor
from tfx.types import artifact_utils
from tfx.utils import import_utils
The provided code snippet includes necessary dependencies for implementing the `_run_executor` function. Write a Python function `def _run_executor(args, pipeline_args) -> None` to solve the following problem:
r"""Select a particular executor and run it based on name. # pylint: disable=line-too-long _run_executor() is used to invoke a class subclassing tfx.dsl.components.base.base_executor.BaseExecutor. This function can be used for both invoking the executor on remote environments as well as for unit testing of executors. How to invoke an executor as standalone: # TODO(b/132958430): Create utility script to generate arguments for run_executor.py First, the input data needs to be prepared. An easy way to generate the test data is to fully run the pipeline once. This will generate the data to be used for testing as well as log the artifacts to be used as input parameters. In each executed component, three log entries will be generated similar to the below: ``` [2019-05-16 08:59:27,117] {logging_mixin.py:95} INFO - [2019-05-16 08:59:27,116] {base_executor.py:72} INFO - Starting Executor execution. [2019-05-16 08:59:27,117] {logging_mixin.py:95} INFO - [2019-05-16 08:59:27,117] {base_executor.py:74} INFO - Inputs for Executor is: {"input_base": [{"artifact": {"id": "1", "typeId": "1", "uri": "/usr/local/google/home/khaas/taxi/data/simple", "properties": {"split": {"stringValue": ""}, "state": {"stringValue": "published"}, "span": {"intValue": "1"}, "type_name": {"stringValue": "ExternalPath"}}}, "artifact_type": {"id": "1", "name": "ExternalPath", "properties": {"span": "INT", "name": "STRING", "type_name": "STRING", "split": "STRING", "state": "STRING"}}}]} [2019-05-16 08:59:27,117] {logging_mixin.py:95} INFO - [2019-05-16 08:59:27,117] {base_executor.py:76} INFO - Outputs for Executor is: {"examples": [{"artifact": {"uri": "/usr/local/google/home/khaas/tfx/pipelines/chicago_taxi_simple/CsvExampleGen/examples/1/train/", "properties": {"type_name": {"stringValue": "ExamplesPath"}, "split": {"stringValue": "train"}, "span": {"intValue": "1"}}}, "artifact_type": {"name": "ExamplesPath", "properties": {"name": "STRING", "type_name": "STRING", "split": "STRING", "state": "STRING", "span": "INT"}}}, {"artifact": {"uri": "/usr/local/google/home/khaas/tfx/pipelines/chicago_taxi_simple/CsvExampleGen/examples/1/eval/", "properties": {"type_name": {"stringValue": "ExamplesPath"}, "split": {"stringValue": "eval"}, "span": {"intValue": "1"}}}, "artifact_type": {"name": "ExamplesPath", "properties": {"name": "STRING", "type_name": "STRING", "split": "STRING", "state": "STRING", "span": "INT"}}}]} [2019-05-16 08:59:27,117] {logging_mixin.py:95} INFO - [2019-05-16 08:59:27,117] {base_executor.py:78} INFO - Execution properties for Executor is: {"output": "{ \"splitConfig\": {\"splits\": [{\"name\": \"train\", \"hashBuckets\": 2}, {\"name\": \"eval\",\"hashBuckets\": 1}]}}"} ``` Each of these map directly to the input parameters expected by run_executor(): ``` python scripts/run_executor.py \ --executor_class_path=tfx.components.example_gen.csv_example_gen.executor.Executor \ --inputs={"input_base": [{"artifact": {"id": "1", "typeId": "1", "uri": "/usr/local/google/home/khaas/taxi/data/simple", "properties": {"split": {"stringValue": ""}, "state": {"stringValue": "published"}, "span": {"intValue": "1"}, "type_name": {"stringValue": "ExternalPath"}}}, "artifact_type": {"id": "1", "name": "ExternalPath", "properties": {"span": "INT", "name": "STRING", "type_name": "STRING", "split": "STRING", "state": "STRING"}}}]} \ --outputs={"examples": [{"artifact": {"uri": "/usr/local/google/home/khaas/tfx/pipelines/chicago_taxi_simple/CsvExampleGen/examples/1/train/", "properties": {"type_name": {"stringValue": "ExamplesPath"}, "split": {"stringValue": "train"}, "span": {"intValue": "1"}}}, "artifact_type": {"name": "ExamplesPath", "properties": {"name": "STRING", "type_name": "STRING", "split": "STRING", "state": "STRING", "span": "INT"}}}, {"artifact": {"uri": "/usr/local/google/home/khaas/tfx/pipelines/chicago_taxi_simple/CsvExampleGen/examples/1/eval/", "properties": {"type_name": {"stringValue": "ExamplesPath"}, "split": {"stringValue": "eval"}, "span": {"intValue": "1"}}}, "artifact_type": {"name": "ExamplesPath", "properties": {"name": "STRING", "type_name": "STRING", "split": "STRING", "state": "STRING", "span": "INT"}}}]} \ --exec-properties={"output": "{ \"splitConfig\": {\"splits\": [{\"name\": \"train\", \"hashBuckets\": 2}, {\"name\": \"eval\",\"hashBuckets\": 1}]}}"} ``` # pylint: disable=line-too-long Args: args: - inputs: The input artifacts for this execution, serialized as JSON. - outputs: The output artifacts to be generated by this execution, serialized as JSON. - exec_properties: The execution properties to be used by this execution, serialized as JSON. pipeline_args: Optional parameter that maps to the optional_pipeline_args parameter in the pipeline, which provides additional configuration options for apache-beam and tensorflow.logging. Returns: None Raises: None
Here is the function:
def _run_executor(args, pipeline_args) -> None:
r"""Select a particular executor and run it based on name.
# pylint: disable=line-too-long
_run_executor() is used to invoke a class subclassing
tfx.dsl.components.base.base_executor.BaseExecutor. This function can be used
for
both invoking the executor on remote environments as well as for unit testing
of executors.
How to invoke an executor as standalone:
# TODO(b/132958430): Create utility script to generate arguments for
run_executor.py
First, the input data needs to be prepared. An easy way to generate the test
data is to fully run the pipeline once. This will generate the data to be
used for testing as well as log the artifacts to be used as input parameters.
In each executed component, three log entries will be generated similar to the
below:
```
[2019-05-16 08:59:27,117] {logging_mixin.py:95} INFO - [2019-05-16
08:59:27,116] {base_executor.py:72} INFO - Starting Executor execution.
[2019-05-16 08:59:27,117] {logging_mixin.py:95} INFO - [2019-05-16
08:59:27,117] {base_executor.py:74} INFO - Inputs for Executor is:
{"input_base": [{"artifact": {"id": "1", "typeId": "1", "uri":
"/usr/local/google/home/khaas/taxi/data/simple", "properties": {"split":
{"stringValue": ""}, "state": {"stringValue": "published"}, "span":
{"intValue": "1"}, "type_name": {"stringValue": "ExternalPath"}}},
"artifact_type": {"id": "1", "name": "ExternalPath", "properties": {"span":
"INT", "name": "STRING", "type_name": "STRING", "split": "STRING", "state":
"STRING"}}}]}
[2019-05-16 08:59:27,117] {logging_mixin.py:95} INFO - [2019-05-16
08:59:27,117] {base_executor.py:76} INFO - Outputs for Executor is:
{"examples": [{"artifact": {"uri":
"/usr/local/google/home/khaas/tfx/pipelines/chicago_taxi_simple/CsvExampleGen/examples/1/train/",
"properties": {"type_name": {"stringValue": "ExamplesPath"}, "split":
{"stringValue": "train"}, "span": {"intValue": "1"}}}, "artifact_type":
{"name": "ExamplesPath", "properties": {"name": "STRING", "type_name":
"STRING", "split": "STRING", "state": "STRING", "span": "INT"}}}, {"artifact":
{"uri":
"/usr/local/google/home/khaas/tfx/pipelines/chicago_taxi_simple/CsvExampleGen/examples/1/eval/",
"properties": {"type_name": {"stringValue": "ExamplesPath"}, "split":
{"stringValue": "eval"}, "span": {"intValue": "1"}}}, "artifact_type":
{"name": "ExamplesPath", "properties": {"name": "STRING", "type_name":
"STRING", "split": "STRING", "state": "STRING", "span": "INT"}}}]}
[2019-05-16 08:59:27,117] {logging_mixin.py:95} INFO - [2019-05-16
08:59:27,117] {base_executor.py:78} INFO - Execution properties for Executor
is: {"output": "{ \"splitConfig\": {\"splits\": [{\"name\": \"train\",
\"hashBuckets\": 2}, {\"name\": \"eval\",\"hashBuckets\": 1}]}}"}
```
Each of these map directly to the input parameters expected by run_executor():
```
python scripts/run_executor.py \
--executor_class_path=tfx.components.example_gen.csv_example_gen.executor.Executor
\
--inputs={"input_base": [{"artifact": {"id": "1", "typeId": "1", "uri":
"/usr/local/google/home/khaas/taxi/data/simple", "properties": {"split":
{"stringValue": ""}, "state": {"stringValue": "published"}, "span":
{"intValue": "1"}, "type_name": {"stringValue": "ExternalPath"}}},
"artifact_type": {"id": "1", "name": "ExternalPath", "properties":
{"span": "INT", "name": "STRING", "type_name": "STRING", "split":
"STRING", "state": "STRING"}}}]} \
--outputs={"examples": [{"artifact": {"uri":
"/usr/local/google/home/khaas/tfx/pipelines/chicago_taxi_simple/CsvExampleGen/examples/1/train/",
"properties": {"type_name": {"stringValue": "ExamplesPath"}, "split":
{"stringValue": "train"}, "span": {"intValue": "1"}}}, "artifact_type":
{"name": "ExamplesPath", "properties": {"name": "STRING", "type_name":
"STRING", "split": "STRING", "state": "STRING", "span": "INT"}}},
{"artifact": {"uri":
"/usr/local/google/home/khaas/tfx/pipelines/chicago_taxi_simple/CsvExampleGen/examples/1/eval/",
"properties": {"type_name": {"stringValue": "ExamplesPath"}, "split":
{"stringValue": "eval"}, "span": {"intValue": "1"}}}, "artifact_type":
{"name": "ExamplesPath", "properties": {"name": "STRING", "type_name":
"STRING", "split": "STRING", "state": "STRING", "span": "INT"}}}]} \
--exec-properties={"output": "{ \"splitConfig\": {\"splits\": [{\"name\":
\"train\", \"hashBuckets\": 2}, {\"name\": \"eval\",\"hashBuckets\":
1}]}}"}
```
# pylint: disable=line-too-long
Args:
args:
- inputs: The input artifacts for this execution, serialized as JSON.
- outputs: The output artifacts to be generated by this execution,
serialized as JSON.
- exec_properties: The execution properties to be used by this execution,
serialized as JSON.
pipeline_args: Optional parameter that maps to the optional_pipeline_args
parameter in the pipeline, which provides additional configuration options
for apache-beam and tensorflow.logging.
Returns:
None
Raises:
None
"""
absl.logging.set_verbosity(absl.logging.INFO)
(inputs_str, outputs_str,
exec_properties_str) = (args.inputs or base64.b64decode(args.inputs_base64),
args.outputs or
base64.b64decode(args.outputs_base64),
args.exec_properties or
base64.b64decode(args.exec_properties_base64))
inputs = artifact_utils.parse_artifact_dict(inputs_str)
outputs = artifact_utils.parse_artifact_dict(outputs_str)
exec_properties = json.loads(exec_properties_str)
absl.logging.info(
'Executor {} do: inputs: {}, outputs: {}, exec_properties: {}'.format(
args.executor_class_path, inputs, outputs, exec_properties))
executor_cls = import_utils.import_class_by_path(args.executor_class_path)
if issubclass(executor_cls,
base_beam_executor.BaseBeamExecutor):
executor_context = base_beam_executor.BaseBeamExecutor.Context(
beam_pipeline_args=pipeline_args,
tmp_dir=args.temp_directory_path,
unique_id='')
else:
executor_context = base_executor.BaseExecutor.Context(
extra_flags=pipeline_args,
tmp_dir=args.temp_directory_path,
unique_id='')
executor = executor_cls(executor_context)
absl.logging.info('Starting executor')
executor.Do(inputs, outputs, exec_properties)
# The last line of stdout will be pushed to xcom by Airflow.
if args.write_outputs_stdout:
print(artifact_utils.jsonify_artifact_dict(outputs)) | r"""Select a particular executor and run it based on name. # pylint: disable=line-too-long _run_executor() is used to invoke a class subclassing tfx.dsl.components.base.base_executor.BaseExecutor. This function can be used for both invoking the executor on remote environments as well as for unit testing of executors. How to invoke an executor as standalone: # TODO(b/132958430): Create utility script to generate arguments for run_executor.py First, the input data needs to be prepared. An easy way to generate the test data is to fully run the pipeline once. This will generate the data to be used for testing as well as log the artifacts to be used as input parameters. In each executed component, three log entries will be generated similar to the below: ``` [2019-05-16 08:59:27,117] {logging_mixin.py:95} INFO - [2019-05-16 08:59:27,116] {base_executor.py:72} INFO - Starting Executor execution. [2019-05-16 08:59:27,117] {logging_mixin.py:95} INFO - [2019-05-16 08:59:27,117] {base_executor.py:74} INFO - Inputs for Executor is: {"input_base": [{"artifact": {"id": "1", "typeId": "1", "uri": "/usr/local/google/home/khaas/taxi/data/simple", "properties": {"split": {"stringValue": ""}, "state": {"stringValue": "published"}, "span": {"intValue": "1"}, "type_name": {"stringValue": "ExternalPath"}}}, "artifact_type": {"id": "1", "name": "ExternalPath", "properties": {"span": "INT", "name": "STRING", "type_name": "STRING", "split": "STRING", "state": "STRING"}}}]} [2019-05-16 08:59:27,117] {logging_mixin.py:95} INFO - [2019-05-16 08:59:27,117] {base_executor.py:76} INFO - Outputs for Executor is: {"examples": [{"artifact": {"uri": "/usr/local/google/home/khaas/tfx/pipelines/chicago_taxi_simple/CsvExampleGen/examples/1/train/", "properties": {"type_name": {"stringValue": "ExamplesPath"}, "split": {"stringValue": "train"}, "span": {"intValue": "1"}}}, "artifact_type": {"name": "ExamplesPath", "properties": {"name": "STRING", "type_name": "STRING", "split": "STRING", "state": "STRING", "span": "INT"}}}, {"artifact": {"uri": "/usr/local/google/home/khaas/tfx/pipelines/chicago_taxi_simple/CsvExampleGen/examples/1/eval/", "properties": {"type_name": {"stringValue": "ExamplesPath"}, "split": {"stringValue": "eval"}, "span": {"intValue": "1"}}}, "artifact_type": {"name": "ExamplesPath", "properties": {"name": "STRING", "type_name": "STRING", "split": "STRING", "state": "STRING", "span": "INT"}}}]} [2019-05-16 08:59:27,117] {logging_mixin.py:95} INFO - [2019-05-16 08:59:27,117] {base_executor.py:78} INFO - Execution properties for Executor is: {"output": "{ \"splitConfig\": {\"splits\": [{\"name\": \"train\", \"hashBuckets\": 2}, {\"name\": \"eval\",\"hashBuckets\": 1}]}}"} ``` Each of these map directly to the input parameters expected by run_executor(): ``` python scripts/run_executor.py \ --executor_class_path=tfx.components.example_gen.csv_example_gen.executor.Executor \ --inputs={"input_base": [{"artifact": {"id": "1", "typeId": "1", "uri": "/usr/local/google/home/khaas/taxi/data/simple", "properties": {"split": {"stringValue": ""}, "state": {"stringValue": "published"}, "span": {"intValue": "1"}, "type_name": {"stringValue": "ExternalPath"}}}, "artifact_type": {"id": "1", "name": "ExternalPath", "properties": {"span": "INT", "name": "STRING", "type_name": "STRING", "split": "STRING", "state": "STRING"}}}]} \ --outputs={"examples": [{"artifact": {"uri": "/usr/local/google/home/khaas/tfx/pipelines/chicago_taxi_simple/CsvExampleGen/examples/1/train/", "properties": {"type_name": {"stringValue": "ExamplesPath"}, "split": {"stringValue": "train"}, "span": {"intValue": "1"}}}, "artifact_type": {"name": "ExamplesPath", "properties": {"name": "STRING", "type_name": "STRING", "split": "STRING", "state": "STRING", "span": "INT"}}}, {"artifact": {"uri": "/usr/local/google/home/khaas/tfx/pipelines/chicago_taxi_simple/CsvExampleGen/examples/1/eval/", "properties": {"type_name": {"stringValue": "ExamplesPath"}, "split": {"stringValue": "eval"}, "span": {"intValue": "1"}}}, "artifact_type": {"name": "ExamplesPath", "properties": {"name": "STRING", "type_name": "STRING", "split": "STRING", "state": "STRING", "span": "INT"}}}]} \ --exec-properties={"output": "{ \"splitConfig\": {\"splits\": [{\"name\": \"train\", \"hashBuckets\": 2}, {\"name\": \"eval\",\"hashBuckets\": 1}]}}"} ``` # pylint: disable=line-too-long Args: args: - inputs: The input artifacts for this execution, serialized as JSON. - outputs: The output artifacts to be generated by this execution, serialized as JSON. - exec_properties: The execution properties to be used by this execution, serialized as JSON. pipeline_args: Optional parameter that maps to the optional_pipeline_args parameter in the pipeline, which provides additional configuration options for apache-beam and tensorflow.logging. Returns: None Raises: None |
166,422 | import argparse
import base64
import json
from typing import List, Tuple
import absl
from absl import app
from absl.flags import argparse_flags
from tfx.dsl.components.base import base_beam_executor
from tfx.dsl.components.base import base_executor
from tfx.types import artifact_utils
from tfx.utils import import_utils
The provided code snippet includes necessary dependencies for implementing the `parse_flags` function. Write a Python function `def parse_flags(argv: List[str]) -> Tuple[argparse.Namespace, List[str]]` to solve the following problem:
Parses command line arguments. # pylint: disable=line-too-long Args: argv: Unparsed arguments for run_executor.py --executor_class_path: Python class of executor in format of <module>.<class>. --temp_directory_path: Common temp directory path for executors. --inputs: JSON serialized dict of input artifacts. If the input needs to be base64-encoded, use --inputs-base64 instead. --inputs-base64: base64-encoded JSON serialized dict of input artifacts. If the input is not base64-encoded, use --inputs instead. --outputs: JSON serialized dict of output artifacts. If the output needs to be base64-encoded, use --outputs-base64 instead. --outputs-base64: base64-encoded JSON serialized dict of output artifacts. If the output is not base64-encoded, use --outputs instead. --exec_properties: JSON serialized dict of (non artifact) execution properties. If the execution properties need to be base64-encoded, use --exec_properties-base64 instead. --exec_properties-base64: base64-encoded JSON serialized dict of (non artifact) execution properties. If the execution properties are not base64-encoded, use --exec_properties instead. --write_outputs_stdout: Write outputs to last line of stdout, which will be pushed to xcom in Airflow. Please ignore by other users or orchestrators. # pylint: disable=line-too-long Returns: None Raises: None
Here is the function:
def parse_flags(argv: List[str]) -> Tuple[argparse.Namespace, List[str]]:
"""Parses command line arguments.
# pylint: disable=line-too-long
Args:
argv: Unparsed arguments for run_executor.py
--executor_class_path: Python class of executor in format of
<module>.<class>.
--temp_directory_path: Common temp directory path for executors.
--inputs: JSON serialized dict of input artifacts. If the input needs to
be base64-encoded, use --inputs-base64 instead.
--inputs-base64: base64-encoded JSON serialized dict of input artifacts.
If the input is not base64-encoded, use --inputs instead.
--outputs: JSON serialized dict of output artifacts. If the output needs
to be base64-encoded, use --outputs-base64 instead.
--outputs-base64: base64-encoded JSON serialized dict of output artifacts.
If the output is not base64-encoded, use --outputs instead.
--exec_properties: JSON serialized dict of (non artifact) execution
properties. If the execution properties need to be base64-encoded, use
--exec_properties-base64 instead.
--exec_properties-base64: base64-encoded JSON serialized dict of (non
artifact) execution properties. If the execution properties are not
base64-encoded, use --exec_properties instead.
--write_outputs_stdout: Write outputs to last line of stdout, which will
be pushed to xcom in Airflow. Please ignore by other users or
orchestrators.
# pylint: disable=line-too-long
Returns:
None
Raises:
None
"""
parser = argparse_flags.ArgumentParser()
parser.add_argument(
'--executor_class_path',
type=str,
required=True,
help='Python class of executor in format of <module>.<class>.')
parser.add_argument(
'--temp_directory_path',
type=str,
help='common temp directory path for executors')
inputs_group = parser.add_mutually_exclusive_group(required=True)
inputs_group.add_argument(
'--inputs',
type=str,
help='json serialized dict of input artifacts.')
inputs_group.add_argument(
'--inputs-base64',
type=str,
help='base64 encoded json serialized dict of input artifacts.')
outputs_group = parser.add_mutually_exclusive_group(required=True)
outputs_group.add_argument(
'--outputs',
type=str,
help='json serialized dict of output artifacts.')
outputs_group.add_argument(
'--outputs-base64',
type=str,
help='base64 encoded json serialized dict of output artifacts.')
execution_group = parser.add_mutually_exclusive_group(required=True)
execution_group.add_argument(
'--exec-properties',
type=str,
help='json serialized dict of (non artifact) execution properties.')
execution_group.add_argument(
'--exec-properties-base64',
type=str,
help='base64 encoded dict of (non artifact) execution properties.')
parser.add_argument(
'--write-outputs-stdout',
dest='write_outputs_stdout',
action='store_true',
help='Write outputs to last line of stdout, which will '
'be pushed to xcom in Airflow. Please ignore by other users or '
'orchestrators.')
return parser.parse_known_args(argv) | Parses command line arguments. # pylint: disable=line-too-long Args: argv: Unparsed arguments for run_executor.py --executor_class_path: Python class of executor in format of <module>.<class>. --temp_directory_path: Common temp directory path for executors. --inputs: JSON serialized dict of input artifacts. If the input needs to be base64-encoded, use --inputs-base64 instead. --inputs-base64: base64-encoded JSON serialized dict of input artifacts. If the input is not base64-encoded, use --inputs instead. --outputs: JSON serialized dict of output artifacts. If the output needs to be base64-encoded, use --outputs-base64 instead. --outputs-base64: base64-encoded JSON serialized dict of output artifacts. If the output is not base64-encoded, use --outputs instead. --exec_properties: JSON serialized dict of (non artifact) execution properties. If the execution properties need to be base64-encoded, use --exec_properties-base64 instead. --exec_properties-base64: base64-encoded JSON serialized dict of (non artifact) execution properties. If the execution properties are not base64-encoded, use --exec_properties instead. --write_outputs_stdout: Write outputs to last line of stdout, which will be pushed to xcom in Airflow. Please ignore by other users or orchestrators. # pylint: disable=line-too-long Returns: None Raises: None |
166,423 | import os
import sys
from typing import List, Optional
from tfx.dsl.components.base import base_beam_executor
from tfx.dsl.components.base import base_executor
from tfx.types import channel_utils
from tfx.types.artifact import PropertyType
from tfx.utils import import_utils
from tfx.utils import proto_utils
from google.protobuf import message
PropertyType = artifact_property.PropertyType
The provided code snippet includes necessary dependencies for implementing the `run_component` function. Write a Python function `def run_component(full_component_class_name: str, temp_directory_path: Optional[str] = None, beam_pipeline_args: Optional[List[str]] = None, **arguments)` to solve the following problem:
r"""Loads a component, instantiates it with arguments and runs its executor. The component class is instantiated, so the component code is executed, not just the executor code. To pass artifact URI, use <input_name>_uri argument name. To pass artifact property, use <input_name>_<property> argument name. Protobuf property values can be passed as JSON-serialized protobufs. # pylint: disable=line-too-long Example:: # When run as a script: python3 scripts/run_component.py \ --full-component-class-name tfx.components.StatisticsGen \ --examples-uri gs://my_bucket/chicago_taxi_simple/CsvExamplesGen/examples/1/ \ --examples-split-names '["train", "eval"]' \ --output-uri gs://my_bucket/chicago_taxi_simple/StatisticsGen/output/1/ # When run as a function: run_component( full_component_class_name='tfx.components.StatisticsGen', examples_uri='gs://my_bucket/chicago_taxi_simple/CsvExamplesGen/sxamples/1/', examples_split_names='["train", "eval"]', output_uri='gs://my_bucket/chicago_taxi_simple/StatisticsGen/output/1/', ) Args: full_component_class_name: The component class name including module name. temp_directory_path: Optional. Temporary directory path for the executor. beam_pipeline_args: Optional. Arguments to pass to the Beam pipeline. **arguments: Key-value pairs with component arguments.
Here is the function:
def run_component(full_component_class_name: str,
temp_directory_path: Optional[str] = None,
beam_pipeline_args: Optional[List[str]] = None,
**arguments):
r"""Loads a component, instantiates it with arguments and runs its executor.
The component class is instantiated, so the component code is executed,
not just the executor code.
To pass artifact URI, use <input_name>_uri argument name.
To pass artifact property, use <input_name>_<property> argument name.
Protobuf property values can be passed as JSON-serialized protobufs.
# pylint: disable=line-too-long
Example::
# When run as a script:
python3 scripts/run_component.py \
--full-component-class-name tfx.components.StatisticsGen \
--examples-uri gs://my_bucket/chicago_taxi_simple/CsvExamplesGen/examples/1/ \
--examples-split-names '["train", "eval"]' \
--output-uri gs://my_bucket/chicago_taxi_simple/StatisticsGen/output/1/
# When run as a function:
run_component(
full_component_class_name='tfx.components.StatisticsGen',
examples_uri='gs://my_bucket/chicago_taxi_simple/CsvExamplesGen/sxamples/1/',
examples_split_names='["train", "eval"]',
output_uri='gs://my_bucket/chicago_taxi_simple/StatisticsGen/output/1/',
)
Args:
full_component_class_name: The component class name including module name.
temp_directory_path: Optional. Temporary directory path for the executor.
beam_pipeline_args: Optional. Arguments to pass to the Beam pipeline.
**arguments: Key-value pairs with component arguments.
"""
component_class = import_utils.import_class_by_path(full_component_class_name)
component_arguments = {}
for name, execution_param in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value = arguments.get(name, None)
if argument_value is None:
continue
param_type = execution_param.type
if (isinstance(param_type, type) and
issubclass(param_type, message.Message)):
argument_value_obj = param_type()
proto_utils.json_to_proto(argument_value, argument_value_obj)
elif param_type is int:
argument_value_obj = int(argument_value)
elif param_type is float:
argument_value_obj = float(argument_value)
else:
argument_value_obj = argument_value
component_arguments[name] = argument_value_obj
for input_name, channel_param in component_class.SPEC_CLASS.INPUTS.items():
uri = (arguments.get(input_name + '_uri') or
arguments.get(input_name + '_path'))
if uri:
artifact = channel_param.type()
artifact.uri = uri
# Setting the artifact properties
for property_name, property_spec in (channel_param.type.PROPERTIES or
{}).items():
property_arg_name = input_name + '_' + property_name
if property_arg_name in arguments:
property_value = arguments[property_arg_name]
if property_spec.type == PropertyType.INT:
property_value = int(property_value)
if property_spec.type == PropertyType.FLOAT:
property_value = float(property_value)
setattr(artifact, property_name, property_value)
component_arguments[input_name] = channel_utils.as_channel([artifact])
component_instance = component_class(**component_arguments)
input_dict = channel_utils.unwrap_channel_dict(component_instance.inputs)
output_dict = channel_utils.unwrap_channel_dict(component_instance.outputs)
exec_properties = component_instance.exec_properties
# Generating paths for output artifacts
for output_name, channel_param in component_class.SPEC_CLASS.OUTPUTS.items():
uri = (arguments.get('output_' + output_name + '_uri') or
arguments.get(output_name + '_uri') or
arguments.get(output_name + '_path'))
if uri:
artifacts = output_dict[output_name]
if not artifacts:
artifacts.append(channel_param.type())
for artifact in artifacts:
artifact.uri = uri
if issubclass(component_instance.executor_spec.executor_class,
base_beam_executor.BaseBeamExecutor):
executor_context = base_beam_executor.BaseBeamExecutor.Context(
beam_pipeline_args=beam_pipeline_args,
tmp_dir=temp_directory_path,
unique_id='',
)
else:
executor_context = base_executor.BaseExecutor.Context(
extra_flags=beam_pipeline_args,
tmp_dir=temp_directory_path,
unique_id='',
)
executor = component_instance.executor_spec.executor_class(executor_context)
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
# Writing out the output artifact properties
for output_name, channel_param in component_class.SPEC_CLASS.OUTPUTS.items():
for property_name in channel_param.type.PROPERTIES or []:
property_path_arg_name = output_name + '_' + property_name + '_path'
property_path = arguments.get(property_path_arg_name)
if property_path:
artifacts = output_dict[output_name]
for artifact in artifacts:
property_value = getattr(artifact, property_name)
os.makedirs(os.path.dirname(property_path), exist_ok=True)
with open(property_path, 'w') as f:
f.write(str(property_value)) | r"""Loads a component, instantiates it with arguments and runs its executor. The component class is instantiated, so the component code is executed, not just the executor code. To pass artifact URI, use <input_name>_uri argument name. To pass artifact property, use <input_name>_<property> argument name. Protobuf property values can be passed as JSON-serialized protobufs. # pylint: disable=line-too-long Example:: # When run as a script: python3 scripts/run_component.py \ --full-component-class-name tfx.components.StatisticsGen \ --examples-uri gs://my_bucket/chicago_taxi_simple/CsvExamplesGen/examples/1/ \ --examples-split-names '["train", "eval"]' \ --output-uri gs://my_bucket/chicago_taxi_simple/StatisticsGen/output/1/ # When run as a function: run_component( full_component_class_name='tfx.components.StatisticsGen', examples_uri='gs://my_bucket/chicago_taxi_simple/CsvExamplesGen/sxamples/1/', examples_split_names='["train", "eval"]', output_uri='gs://my_bucket/chicago_taxi_simple/StatisticsGen/output/1/', ) Args: full_component_class_name: The component class name including module name. temp_directory_path: Optional. Temporary directory path for the executor. beam_pipeline_args: Optional. Arguments to pass to the Beam pipeline. **arguments: Key-value pairs with component arguments. |
166,424 | from typing import Dict, List, Optional
from tfx import types
from tfx.dsl.input_resolution import resolver_op
from tfx.dsl.input_resolution.ops import ops_utils
from tfx.dsl.input_resolution.ops import training_range_op
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.utils import typing_utils
The provided code snippet includes necessary dependencies for implementing the `_validate_input_dict` function. Write a Python function `def _validate_input_dict(input_dict: typing_utils.ArtifactMultiMap)` to solve the following problem:
Checks that the input_dict is properly formatted.
Here is the function:
def _validate_input_dict(input_dict: typing_utils.ArtifactMultiMap):
"""Checks that the input_dict is properly formatted."""
if not input_dict:
raise exceptions.SkipSignal()
valid_keys = {ops_utils.MODEL_KEY, ops_utils.EXAMPLES_KEY}
ops_utils.validate_input_dict(input_dict, valid_keys, requires_all=True)
if (
not input_dict[ops_utils.MODEL_KEY]
or not input_dict[ops_utils.EXAMPLES_KEY]
):
raise exceptions.SkipSignal() | Checks that the input_dict is properly formatted. |
166,425 | import collections
from typing import List, Iterable, Tuple
from tfx.dsl.input_resolution import resolver_op
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.utils import typing_utils
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `_get_neighbor_artifact_pairs` function. Write a Python function `def _get_neighbor_artifact_pairs( events: List[metadata_store_pb2.Event], ) -> Iterable[Tuple[int, int]]` to solve the following problem:
Gets artifact_id pair of neighbors from the list of Events. Artifact a and b is considered neighbor if there exist events e1 and e2 s.t. (e1.artifact_id = a) AND (e2.artifact_id = b) AND (e1.execution_id = e2.execution_id) Args: events: A list of MLMD Events. Yields: Edge as a tuple (artifact_id_1, artifact_id_2).
Here is the function:
def _get_neighbor_artifact_pairs(
events: List[metadata_store_pb2.Event],
) -> Iterable[Tuple[int, int]]:
"""Gets artifact_id pair of neighbors from the list of Events.
Artifact a and b is considered neighbor if there exist events e1 and e2 s.t.
(e1.artifact_id = a) AND (e2.artifact_id = b) AND
(e1.execution_id = e2.execution_id)
Args:
events: A list of MLMD Events.
Yields:
Edge as a tuple (artifact_id_1, artifact_id_2).
"""
execs_by_art = collections.defaultdict(set)
arts_by_exec = collections.defaultdict(set)
for event in events:
execs_by_art[event.artifact_id].add(event.execution_id)
arts_by_exec[event.execution_id].add(event.artifact_id)
for a1 in execs_by_art:
for a2 in set.union(*[arts_by_exec[e] for e in execs_by_art[a1]]):
if a1 < a2: # Skip symmetric or self edge.
yield a1, a2 | Gets artifact_id pair of neighbors from the list of Events. Artifact a and b is considered neighbor if there exist events e1 and e2 s.t. (e1.artifact_id = a) AND (e2.artifact_id = b) AND (e1.execution_id = e2.execution_id) Args: events: A list of MLMD Events. Yields: Edge as a tuple (artifact_id_1, artifact_id_2). |
166,426 | import collections
from typing import List, Iterable, Tuple
from tfx.dsl.input_resolution import resolver_op
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.utils import typing_utils
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `_find_disjoint_sets` function. Write a Python function `def _find_disjoint_sets( verts: Iterable[int], edges: Iterable[Tuple[int, int]] ) -> List[List[int]]` to solve the following problem:
Finds disjoint sets.
Here is the function:
def _find_disjoint_sets(
verts: Iterable[int], edges: Iterable[Tuple[int, int]]
) -> List[List[int]]:
"""Finds disjoint sets."""
parents = {a: a for a in verts}
def find(a: int):
if parents[a] != a:
parents[a] = find(parents[a])
return parents[a]
def union(a: int, b: int):
x, y = find(a), find(b)
if x != y:
# Union in a direction that smaller number node becomes the parent node.
# By result, the root node of each disjoint set will be the one with the
# smallest number.
parents[max(x, y)] = min(x, y)
for a, b in edges:
union(a, b)
# Python dict "order is guaranteed to be insertion order" from python 3.7
# (https://docs.python.org/3/library/stdtypes.html#dict).
# As it loops over the sorted node number, and since the root node of each
# disjoint set is the one with the smallest node number, both the inner and
# the outer lists of the result would be sorted.
disjoint_sets = {}
for a in sorted(verts):
disjoint_sets.setdefault(find(a), []).append(a)
return list(disjoint_sets.values()) | Finds disjoint sets. |
166,427 | from typing import Dict, Type, Union
from tfx.dsl.components.common import resolver
from tfx.dsl.input_resolution import resolver_op
from tfx.dsl.input_resolution.ops import all_spans_op
from tfx.dsl.input_resolution.ops import consecutive_spans_op
from tfx.dsl.input_resolution.ops import equal_property_values_op
from tfx.dsl.input_resolution.ops import exclude_spans_op
from tfx.dsl.input_resolution.ops import graph_traversal_op
from tfx.dsl.input_resolution.ops import group_by_lineage_op
from tfx.dsl.input_resolution.ops import latest_create_time_op
from tfx.dsl.input_resolution.ops import latest_pipeline_run_outputs_op as latest_pipeline_run_op
from tfx.dsl.input_resolution.ops import latest_policy_model_op
from tfx.dsl.input_resolution.ops import latest_span_op
from tfx.dsl.input_resolution.ops import latest_version_op
from tfx.dsl.input_resolution.ops import paired_spans_op
from tfx.dsl.input_resolution.ops import shuffle_op
from tfx.dsl.input_resolution.ops import siblings_op
from tfx.dsl.input_resolution.ops import skip_if_empty_op
from tfx.dsl.input_resolution.ops import skip_if_less_than_n_spans_op
from tfx.dsl.input_resolution.ops import slice_op
from tfx.dsl.input_resolution.ops import sliding_window_op
from tfx.dsl.input_resolution.ops import span_driven_evaluator_inputs_op as evaluator_op
from tfx.dsl.input_resolution.ops import static_span_range_op
from tfx.dsl.input_resolution.ops import training_range_op
from tfx.dsl.input_resolution.ops import unnest_op
from tfx.dsl.input_resolution.strategies import conditional_strategy
from tfx.dsl.input_resolution.strategies import latest_artifact_strategy
from tfx.dsl.input_resolution.strategies import latest_blessed_model_strategy
from tfx.dsl.input_resolution.strategies import span_range_strategy
from tfx.utils import name_utils
from tfx.utils import typing_utils
_OpTypes = Union[_ResolverOpType, _ResolverStrategyType]
def _register_op(cls: _ResolverOpType) -> None:
class_path = name_utils.get_full_name(cls, strict_check=False)
if class_path in _OPS_BY_CLASSPATH:
raise ValueError(f'Duplicated class path {class_path} while registering.')
_OPS_BY_CLASSPATH[class_path] = cls
if cls.canonical_name in _OPS_BY_NAME:
raise ValueError(f'Duplicated name {cls.canonical_name} while registering.')
_OPS_BY_NAME[cls.canonical_name] = cls
_register_op(AllSpans)
_register_op(ConsecutiveSpans)
_register_op(EqualPropertyValues)
_register_op(ExcludeSpans)
_register_op(GraphTraversal)
_register_op(GroupByDisjointLineage)
_register_op(GroupByPivot)
_register_op(LatestCreateTime)
_register_op(LatestPipelineRunOutputs)
_register_op(LatestPolicyModel)
_register_op(LatestSpan)
_register_op(LatestVersion)
_register_op(PairedSpans)
_register_op(Shuffle)
_register_op(Siblings)
_register_op(SkipIfEmpty)
_register_op(SkipIfLessThanNSpans)
_register_op(Slice)
_register_op(SlidingWindow)
_register_op(SpanDrivenEvaluatorInputs)
_register_op(StaticSpanRange)
_register_op(TrainingRange)
_register_op(Unnest)
def _register_strategy(cls: _ResolverStrategyType) -> None:
class_path = name_utils.get_full_name(cls, strict_check=False)
if class_path in _OPS_BY_CLASSPATH:
raise ValueError(f'Duplicated class path {class_path} while registering.')
_OPS_BY_CLASSPATH[class_path] = cls
_register_strategy(conditional_strategy.ConditionalStrategy)
_register_strategy(latest_artifact_strategy.LatestArtifactStrategy)
_register_strategy(latest_blessed_model_strategy.LatestBlessedModelStrategy)
_register_strategy(span_range_strategy.SpanRangeStrategy)
def testonly_register(cls: _OpTypes) -> _OpTypes:
if issubclass(cls, resolver_op.ResolverOp):
_register_op(cls)
else:
_register_strategy(cls)
return cls | null |
166,428 | import functools
from typing import Dict, List, Optional, Sequence, Set
from tfx import types
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.orchestration.portable.mlmd import filter_query_builder as q
from tfx.utils import typing_utils
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `get_valid_artifact_states_filter_query` function. Write a Python function `def get_valid_artifact_states_filter_query( valid_artifact_states: Sequence['metadata_store_pb2.Artifact.State'], ) -> str` to solve the following problem:
Returns a filter query for valid artifact states.
Here is the function:
def get_valid_artifact_states_filter_query(
valid_artifact_states: Sequence['metadata_store_pb2.Artifact.State'],
) -> str:
"""Returns a filter query for valid artifact states."""
return f'state IN {q.to_sql_string(valid_artifact_states)}' | Returns a filter query for valid artifact states. |
166,429 | import functools
from typing import Dict, List, Optional, Sequence, Set
from tfx import types
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.orchestration.portable.mlmd import filter_query_builder as q
from tfx.utils import typing_utils
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `validate_argument` function. Write a Python function `def validate_argument( argument_name: str, argument_value: int, min_value: Optional[int] = None, max_value: Optional[int] = None, )` to solve the following problem:
Validates that the argument is >= a min value and/or <= a max value.
Here is the function:
def validate_argument(
argument_name: str,
argument_value: int,
min_value: Optional[int] = None,
max_value: Optional[int] = None,
):
"""Validates that the argument is >= a min value and/or <= a max value."""
if min_value is not None and argument_value < min_value:
raise exceptions.InvalidArgument(
f'{argument_name} must be >= {min_value} but was set to'
f' {argument_value}.'
)
if max_value is not None and argument_value > max_value:
raise exceptions.InvalidArgument(
f'{argument_name} must be <= {max_value} but was set to'
f' {argument_value}.'
) | Validates that the argument is >= a min value and/or <= a max value. |
166,430 | import functools
from typing import Dict, List, Optional, Sequence, Set
from tfx import types
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.orchestration.portable.mlmd import filter_query_builder as q
from tfx.utils import typing_utils
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `get_valid_artifacts` function. Write a Python function `def get_valid_artifacts( artifacts: Sequence[types.Artifact], property_types: Dict[str, types.artifact.PropertyType], ) -> List[types.Artifact]` to solve the following problem:
Returns artifacts that have the required property names and types.
Here is the function:
def get_valid_artifacts(
artifacts: Sequence[types.Artifact],
property_types: Dict[str, types.artifact.PropertyType],
) -> List[types.Artifact]:
"""Returns artifacts that have the required property names and types."""
valid_artifacts = []
for artifact in artifacts:
if artifact.PROPERTIES is None:
continue
for property_name, property_type in property_types.items():
if (
property_name not in artifact.PROPERTIES
or artifact.PROPERTIES[property_name].type != property_type
):
break
else:
valid_artifacts.append(artifact)
return valid_artifacts | Returns artifacts that have the required property names and types. |
166,431 | import functools
from typing import Dict, List, Optional, Sequence, Set
from tfx import types
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.orchestration.portable.mlmd import filter_query_builder as q
from tfx.utils import typing_utils
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `filter_artifacts_by_span` function. Write a Python function `def filter_artifacts_by_span( artifacts: List[types.Artifact], span_descending: bool, n: int = 1, skip_last_n: int = 0, keep_all_versions: bool = False, min_span: Optional[int] = None, version_sort_keys: Sequence[str] = (), ) -> List[types.Artifact]` to solve the following problem:
Filters artifacts by their "span" PROPERTY. This should only be used a shared utility for LatestSpan and ConsecutiveSpans. Args: artifacts: The list of Artifacts to filter. span_descending: If true, then the artifacts will be sorted by span in descending order. Else, they will be sorted in ascending order by span. Set to true for LatestSpan, and set to false for ConsecutiveSpans. n: The number of spans to return. If n <= 0, then n is set to the total number of unique spans. skip_last_n: Number of largest spans to skip. For example, if the spans are [1, 2, 3] and skip_last_n=1, then only spans [1, 2] will be considered. keep_all_versions: If true, all versions of the n spans are returned. Else, only the latest version is returned. min_span: Minimum span before which no span will be considered. version_sort_keys: List of string artifact attributes to sort or filter the versions witin the spans, applied in order of specification. Nested keys can use '.' separator for e.g. 'mlmd_artifact.create_time_since_epoch'. The default key is version number, create time and id in the same order. Returns: The filtered artifacts.
Here is the function:
def filter_artifacts_by_span(
artifacts: List[types.Artifact],
span_descending: bool,
n: int = 1,
skip_last_n: int = 0,
keep_all_versions: bool = False,
min_span: Optional[int] = None,
version_sort_keys: Sequence[str] = (),
) -> List[types.Artifact]:
"""Filters artifacts by their "span" PROPERTY.
This should only be used a shared utility for LatestSpan and ConsecutiveSpans.
Args:
artifacts: The list of Artifacts to filter.
span_descending: If true, then the artifacts will be sorted by span in
descending order. Else, they will be sorted in ascending order by span.
Set to true for LatestSpan, and set to false for ConsecutiveSpans.
n: The number of spans to return. If n <= 0, then n is set to the total
number of unique spans.
skip_last_n: Number of largest spans to skip. For example, if the spans are
[1, 2, 3] and skip_last_n=1, then only spans [1, 2] will be considered.
keep_all_versions: If true, all versions of the n spans are returned. Else,
only the latest version is returned.
min_span: Minimum span before which no span will be considered.
version_sort_keys: List of string artifact attributes to sort or filter the
versions witin the spans, applied in order of specification. Nested keys
can use '.' separator for e.g. 'mlmd_artifact.create_time_since_epoch'.
The default key is version number, create time and id in the same order.
Returns:
The filtered artifacts.
"""
if not artifacts:
return []
# Only keep artifacts with spans >= min_span and account for skip_last_n
spans = sorted({a.span for a in artifacts})
if min_span is not None:
spans = [s for s in spans if s >= min_span]
if skip_last_n:
spans = spans[:-skip_last_n]
# Sort spans in descending order, if specified.
if span_descending:
spans = spans[::-1]
# Keep n spans, if n is positive.
if n > 0:
spans = spans[:n]
if not spans:
return []
artifacts_by_span = {}
for artifact in artifacts:
artifacts_by_span.setdefault(artifact.span, []).append(artifact)
if version_sort_keys:
# Recursively resolve nested key attributes like
# 'mlmd_artifact.create_time_since_epoch' to the form
# getattr(getattr(artifact, 'mlmd_artifact'), 'create_time_since_epoch')
key = lambda a: (
tuple(
functools.reduce(getattr, k.split('.'), a)
for k in version_sort_keys
)
)
else:
# span_descending only applies to sorting by span, but version should
# always be sorted in ascending order. By default, latest version is defined
# as the largest version and ties are broken by create_time and id.
key = lambda a: ( # pylint: disable=g-long-lambda
a.version,
a.mlmd_artifact.create_time_since_epoch,
a.id,
)
result = []
for span in sorted(spans):
if keep_all_versions:
result.extend(sorted(artifacts_by_span[span], key=key))
else:
result.append(max(artifacts_by_span[span], key=key))
return result | Filters artifacts by their "span" PROPERTY. This should only be used a shared utility for LatestSpan and ConsecutiveSpans. Args: artifacts: The list of Artifacts to filter. span_descending: If true, then the artifacts will be sorted by span in descending order. Else, they will be sorted in ascending order by span. Set to true for LatestSpan, and set to false for ConsecutiveSpans. n: The number of spans to return. If n <= 0, then n is set to the total number of unique spans. skip_last_n: Number of largest spans to skip. For example, if the spans are [1, 2, 3] and skip_last_n=1, then only spans [1, 2] will be considered. keep_all_versions: If true, all versions of the n spans are returned. Else, only the latest version is returned. min_span: Minimum span before which no span will be considered. version_sort_keys: List of string artifact attributes to sort or filter the versions witin the spans, applied in order of specification. Nested keys can use '.' separator for e.g. 'mlmd_artifact.create_time_since_epoch'. The default key is version number, create time and id in the same order. Returns: The filtered artifacts. |
166,432 | import functools
from typing import Dict, List, Optional, Sequence, Set
from tfx import types
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.orchestration.portable.mlmd import filter_query_builder as q
from tfx.utils import typing_utils
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `sort_artifact_dict` function. Write a Python function `def sort_artifact_dict( artifacts_by_key: Dict[str, List[types.Artifact]] ) -> Dict[str, List[types.Artifact]]` to solve the following problem:
Sorts the artifact dict values by (creation time, id).
Here is the function:
def sort_artifact_dict(
artifacts_by_key: Dict[str, List[types.Artifact]]
) -> Dict[str, List[types.Artifact]]:
"""Sorts the artifact dict values by (creation time, id)."""
for key, artifacts in artifacts_by_key.items():
artifacts_by_key[key] = sorted(
artifacts,
# If the user wants to sort Examples artifacts by span/version, they
# can call the all_spans(...) canned resolver functions.
key=lambda a: (a.mlmd_artifact.create_time_since_epoch, a.id),
)
return artifacts_by_key | Sorts the artifact dict values by (creation time, id). |
166,433 | from typing import Any, List, Sequence
from tfx import types
from tfx.dsl.input_resolution import resolver_op
from tfx.dsl.input_resolution.ops import ops_utils
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.orchestration.portable.mlmd import event_lib
from tfx.types import artifact_utils
from ml_metadata.proto import metadata_store_pb2
from ml_metadata.tools.mlmd_resolver import metadata_resolver
The provided code snippet includes necessary dependencies for implementing the `_validate_input_list` function. Write a Python function `def _validate_input_list( input_list: Sequence[types.Artifact], ) -> types.Artifact` to solve the following problem:
Checks that input_list contains only a single Model, and returns it.
Here is the function:
def _validate_input_list(
input_list: Sequence[types.Artifact],
) -> types.Artifact:
"""Checks that input_list contains only a single Model, and returns it."""
if (
len(input_list) > 1
or input_list[0].TYPE_NAME != ops_utils.MODEL_TYPE_NAME
):
raise exceptions.InvalidArgument(
'The input_list for TrainingRange expects only a single Model artifact.'
)
return input_list[0] | Checks that input_list contains only a single Model, and returns it. |
166,434 | from typing import Any, List, Sequence
from tfx import types
from tfx.dsl.input_resolution import resolver_op
from tfx.dsl.input_resolution.ops import ops_utils
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.orchestration.portable.mlmd import event_lib
from tfx.types import artifact_utils
from ml_metadata.proto import metadata_store_pb2
from ml_metadata.tools.mlmd_resolver import metadata_resolver
The provided code snippet includes necessary dependencies for implementing the `training_range` function. Write a Python function `def training_range( store: Any, model: types.Artifact, use_transformed_examples: bool = False ) -> List[types.Artifact]` to solve the following problem:
ContainsTrainingRange implementation, for shared use across ResolverOps. Returns the Examples artifact the Model was trained on. Note that only the standard TFleX Model and Examples artifacts are supported. Args: store: The MetadataStore. model: The Model artifact whose trained Examples to return. use_transformed_examples: Whether to return the materialized Examples produced by the Transform component. Should only be used if a Model was trained on materialized transformed Examples produced by a Transform. Defaults to False. Returns: List of Examples artifacts if found, else empty list. We intentionally don't raise SkipSignal, such that the caller can decide to raise it or not.
Here is the function:
def training_range(
store: Any, model: types.Artifact, use_transformed_examples: bool = False
) -> List[types.Artifact]:
"""ContainsTrainingRange implementation, for shared use across ResolverOps.
Returns the Examples artifact the Model was trained on.
Note that only the standard TFleX Model and Examples artifacts are supported.
Args:
store: The MetadataStore.
model: The Model artifact whose trained Examples to return.
use_transformed_examples: Whether to return the materialized Examples
produced by the Transform component. Should only be used if a Model was
trained on materialized transformed Examples produced by a Transform.
Defaults to False.
Returns:
List of Examples artifacts if found, else empty list. We intentionally don't
raise SkipSignal, such that the caller can decide to raise it or not.
"""
# In MLMD, an Examples and Model are related by:
#
# Event 1 Event 2
# Examples ------> Execution ------> Model
#
#
# or, in the case where a Transform component materializes Examples:
#
# Event 1 Event 2 Event 3
# Examples ------> Execution ------> Examples -----> Execution ------> Model
#
#
# For a single Model, there may be many parent Examples it was trained on.
# TODO(kshivvy): Support querying multiple Model ids at once, to reduce the
# number of round trip MLMD queries. This will be useful for resolving inputs
# of a span driven evaluator.
# Get all upstream Examples artifacts associated with the Model.
mlmd_resolver = metadata_resolver.MetadataResolver(store)
upstream_examples_dict = mlmd_resolver.get_upstream_artifacts_by_artifact_ids(
artifact_ids=[model.id],
# In MLMD, artifacts are 2 hops away. Because we are considering
# Example -> (transformd) Examples -> Model, we set max_num_hops to 4.
max_num_hops=4,
filter_query=f'type="{ops_utils.EXAMPLES_TYPE_NAME}"',
)
if not upstream_examples_dict:
return []
upstream_examples = upstream_examples_dict[model.id]
if not upstream_examples:
return []
# Get the sets of artifact IDs for Examples produced by Transform and by
# ExampleGen.
all_examples_ids = {a.id for a in upstream_examples}
transformed_examples_ids = set()
for event in store.get_events_by_artifact_ids(all_examples_ids):
if event_lib.is_valid_output_event(
event, expected_output_key=ops_utils.TRANSFORMED_EXAMPLES_KEY
):
transformed_examples_ids.add(event.artifact_id)
# We intentionally do set subtraction instead of filtering by the output_key
# "examples", in case the Examples artifact is produced by a custom
# component.
examples_ids = all_examples_ids - transformed_examples_ids
mlmd_artifacts = []
for artifact in upstream_examples:
# Only consider Examples artifacts that are marked LIVE. This excludes
# garbage collected artifacts (which are marked as DELETED).
if artifact.state != metadata_store_pb2.Artifact.State.LIVE:
continue
elif use_transformed_examples and artifact.id in transformed_examples_ids:
mlmd_artifacts.append(artifact)
elif not use_transformed_examples and artifact.id in examples_ids:
mlmd_artifacts.append(artifact)
if not mlmd_artifacts:
return []
# Find the ArtifactType associated with the artifacts.
artifact_type = store.get_artifact_types_by_id([mlmd_artifacts[0].type_id])[0]
# Return the sorted, serialized Examples.
artifacts = artifact_utils.deserialize_artifacts(
artifact_type, mlmd_artifacts
)
return sorted(
artifacts, key=lambda a: (a.mlmd_artifact.create_time_since_epoch, a.id)
) | ContainsTrainingRange implementation, for shared use across ResolverOps. Returns the Examples artifact the Model was trained on. Note that only the standard TFleX Model and Examples artifacts are supported. Args: store: The MetadataStore. model: The Model artifact whose trained Examples to return. use_transformed_examples: Whether to return the materialized Examples produced by the Transform component. Should only be used if a Model was trained on materialized transformed Examples produced by a Transform. Defaults to False. Returns: List of Examples artifacts if found, else empty list. We intentionally don't raise SkipSignal, such that the caller can decide to raise it or not. |
166,435 | from __future__ import annotations
import abc
from typing import Any, Generic, Literal, Mapping, Optional, Sequence, Set, Type, TypeVar, Union
import attr
from tfx import types
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import json_utils
from tfx.utils import typing_utils
import ml_metadata as mlmd
class Node:
output_data_type: DataType
def __eq__(self, other):
if not isinstance(other, Node):
return NotImplemented
return self is other
def __hash__(self):
return hash(id(self))
class OpNode(Node):
"""Node that represents a ResolverOp invocation and its result."""
# ResolverOp class that is used for the Node.
op_type = attr.ib()
# Output data type of ResolverOp.
output_data_type = attr.ib(
type=DataType,
default=DataType.ARTIFACT_MULTIMAP)
# Arguments of the ResolverOp.
args = attr.ib(type=Sequence[Node], default=())
# Property for the ResolverOp, given as keyword arguments.
kwargs = attr.ib(type=Mapping[str, Any], factory=dict)
def _validate_args(self, attribute, value):
del attribute # Unused.
if not typing_utils.is_compatible(value, Sequence[Node]):
raise TypeError(f'`args` should be a Sequence[Node] but got {value!r}.')
def __repr__(self):
all_args = [repr(arg) for arg in self.args]
all_args.extend(f'{k}={repr(v)}' for k, v in self.kwargs.items())
return f'{self.op_type.__qualname__}({", ".join(all_args)})'
class InputNode(Node):
"""Node that represents the input arguments of the resolver function."""
output_data_type = DataType.ARTIFACT_LIST
def __init__(self, wrapped: types.BaseChannel):
self.wrapped = wrapped
def __repr__(self) -> str:
return 'Input()'
def __eq__(self, others):
if not isinstance(others, InputNode):
return NotImplemented
return self.wrapped == others.wrapped
def __hash__(self):
return hash(self.wrapped)
class DictNode(Node):
"""Node that represents a dict of Node values."""
output_data_type = DataType.ARTIFACT_MULTIMAP
def __init__(self, nodes: Mapping[str, Node]):
if not typing_utils.is_compatible(nodes, Mapping[str, Node]) or any(
v.output_data_type != DataType.ARTIFACT_LIST for v in nodes.values()):
raise ValueError(
'Expected dict[str, Node] s.t. all node.output_data_type == '
f'ARTIFACT_LIST, but got {nodes}.')
self.nodes = nodes
def __eq__(self, other):
if not isinstance(other, DictNode):
return NotImplemented
return self.nodes == other.nodes
def __hash__(self):
return hash(tuple(sorted(self.nodes.items())))
def __repr__(self) -> str:
args = [f'{k}={v!r}' for k, v in self.nodes.items()]
return f'Dict({", ".join(args)})'
The provided code snippet includes necessary dependencies for implementing the `get_input_nodes` function. Write a Python function `def get_input_nodes(node: Node) -> Set[InputNode]` to solve the following problem:
Get `InputNode`s that are used to produce the given `output_node`.
Here is the function:
def get_input_nodes(node: Node) -> Set[InputNode]:
"""Get `InputNode`s that are used to produce the given `output_node`."""
if isinstance(node, InputNode):
return {node}
elif isinstance(node, OpNode):
result = set()
for arg_node in node.args:
result.update(get_input_nodes(arg_node))
return result
elif isinstance(node, DictNode):
result = set()
for wrapped in node.nodes.values():
result.update(get_input_nodes(wrapped))
return result
else:
raise AssertionError(
'Should not reach here; '
f'type={type(node).__name__}, value={node}') | Get `InputNode`s that are used to produce the given `output_node`. |
166,436 | from typing import Dict, List, Optional, Any
from tfx import types
from tfx.dsl.components.common import resolver
from tfx.utils import doc_controls
import ml_metadata as mlmd
def _get_span_custom_property(artifact: types.Artifact) -> int:
# For backward compatibility, span may be stored as a string.
str_span = artifact.get_string_custom_property(_SPAN)
if str_span:
return int(str_span)
return artifact.get_int_custom_property(_SPAN) | null |
166,437 | from typing import Optional, Sequence, Union
from absl import logging
from tfx.dsl.input_resolution import resolver_function
from tfx.dsl.input_resolution.ops import ops
from tfx.types import artifact
from tfx.types import channel as channel_types
The provided code snippet includes necessary dependencies for implementing the `latest_created` function. Write a Python function `def latest_created(artifacts, n: int = 1)` to solve the following problem:
Returns the n latest createst artifacts, ties broken by artifact id. Args: artifacts: The artifacts to filter. n: The number of latest artifacts to return, must be > 0. Returns: The n latest artifacts.
Here is the function:
def latest_created(artifacts, n: int = 1):
"""Returns the n latest createst artifacts, ties broken by artifact id.
Args:
artifacts: The artifacts to filter.
n: The number of latest artifacts to return, must be > 0.
Returns:
The n latest artifacts.
"""
return ops.LatestCreateTime(artifacts, n=n) | Returns the n latest createst artifacts, ties broken by artifact id. Args: artifacts: The artifacts to filter. n: The number of latest artifacts to return, must be > 0. Returns: The n latest artifacts. |
166,438 | from typing import Optional, Sequence, Union
from absl import logging
from tfx.dsl.input_resolution import resolver_function
from tfx.dsl.input_resolution.ops import ops
from tfx.types import artifact
from tfx.types import channel as channel_types
The provided code snippet includes necessary dependencies for implementing the `latest_version` function. Write a Python function `def latest_version(artifacts, n: int = 1)` to solve the following problem:
Returns the n latest version artifacts, ties broken by artifact id. Args: artifacts: The artifacts to filter. n: The number of latest artifacts to return, must be > 0. Returns: The n latest artifacts.
Here is the function:
def latest_version(artifacts, n: int = 1):
"""Returns the n latest version artifacts, ties broken by artifact id.
Args:
artifacts: The artifacts to filter.
n: The number of latest artifacts to return, must be > 0.
Returns:
The n latest artifacts.
"""
return ops.LatestVersion(artifacts, n=n) | Returns the n latest version artifacts, ties broken by artifact id. Args: artifacts: The artifacts to filter. n: The number of latest artifacts to return, must be > 0. Returns: The n latest artifacts. |
166,439 | from typing import Optional, Sequence, Union
from absl import logging
from tfx.dsl.input_resolution import resolver_function
from tfx.dsl.input_resolution.ops import ops
from tfx.types import artifact
from tfx.types import channel as channel_types
The provided code snippet includes necessary dependencies for implementing the `static_range` function. Write a Python function `def static_range( artifacts, *, start_span_number: int = -1, end_span_number: int = -1, keep_all_versions: bool = False, exclude_span_numbers: Sequence[int] = (), min_spans: Optional[int] = None, )` to solve the following problem:
Returns artifacts with spans in [start_span, end_span] inclusive. This resolver function is based on the span-version semantics, which only considers the latest version of each span. If you want to keep all versions, then set keep_all_versions=True. Input artifacts must have both "span" int property and "version" int property. Please note that the spans in exclude_span_numbers are excluded AFTER getting the artifacts with spans in the range. If there are less than min_spans unique spans present in the resolved artifacts, then the component execution will be skipped. Corresponds to StaticRange in TFX. Example usage: Consider 8 artifacts with: spans = [0, 1, 2, 3, 3, 5, 7, 10] versions = [0, 0, 0, 0, 3, 0, 0, 0] static_range( end_span_number=5, keep_all_versions=False, exclude_span_numbers=[2]) Because start_span_number = -1, it is set to the smallest span, 0. Spans in the range [0, 5] will be considered. Because keep_all_versions=False, only the artifact with span=3 and version=3 will be considered, even though there are two artifacts with span=3. Because exclude_span_numbers=[2], the artifacts with span=2 will not be kept, even though it is in the range. min_spans is None but end_span_number < 0, so min_spans is not automatically set. The artifacts that will be returned are: spans = [0, 1, 3, 5] versions = [0, 0, 3, 0] Args: artifacts: The artifacts to filter. start_span_number: The smallest span number to keep, inclusive. If < 0, set to the smallest span in the artifacts. end_span_number: The largest span number to keep, inclusive. If < 0, set to the largest span in the artifacts. keep_all_versions: If true, all artifacts with spans in the range are kept. If false then if multiple artifacts have the same span, only the span with the latest version is kept. Defaults to False. exclude_span_numbers: The span numbers to exclude. min_spans: Minimum number of desired example spans in the range. If min_spans is None, and if both end_span_number and start_span_number are positive, it is set to end_span_number - start_span_number + 1. Else if min_spans is None, it is set to -1, meaning all unique spans will be considered. Returns: Artifacts with spans in [start_span, end_span] inclusive.
Here is the function:
def static_range(
artifacts,
*,
start_span_number: int = -1,
end_span_number: int = -1,
keep_all_versions: bool = False,
exclude_span_numbers: Sequence[int] = (),
min_spans: Optional[int] = None,
):
"""Returns artifacts with spans in [start_span, end_span] inclusive.
This resolver function is based on the span-version semantics, which only
considers the latest version of each span. If you want to keep all versions,
then set keep_all_versions=True. Input artifacts must have both "span" int
property and "version" int property.
Please note that the spans in exclude_span_numbers are excluded AFTER getting
the artifacts with spans in the range.
If there are less than min_spans unique spans present in the resolved
artifacts, then the component execution will be skipped.
Corresponds to StaticRange in TFX.
Example usage:
Consider 8 artifacts with:
spans = [0, 1, 2, 3, 3, 5, 7, 10]
versions = [0, 0, 0, 0, 3, 0, 0, 0]
static_range(
end_span_number=5,
keep_all_versions=False,
exclude_span_numbers=[2])
Because start_span_number = -1, it is set to the smallest span, 0.
Spans in the range [0, 5] will be considered.
Because keep_all_versions=False, only the artifact with span=3 and version=3
will be considered, even though there are two artifacts with span=3.
Because exclude_span_numbers=[2], the artifacts with span=2 will not be
kept, even though it is in the range.
min_spans is None but end_span_number < 0, so min_spans is not automatically
set.
The artifacts that will be returned are:
spans = [0, 1, 3, 5]
versions = [0, 0, 3, 0]
Args:
artifacts: The artifacts to filter.
start_span_number: The smallest span number to keep, inclusive. If < 0, set
to the smallest span in the artifacts.
end_span_number: The largest span number to keep, inclusive. If < 0, set to
the largest span in the artifacts.
keep_all_versions: If true, all artifacts with spans in the range are kept.
If false then if multiple artifacts have the same span, only the span with
the latest version is kept. Defaults to False.
exclude_span_numbers: The span numbers to exclude.
min_spans: Minimum number of desired example spans in the range. If
min_spans is None, and if both end_span_number and start_span_number are
positive, it is set to end_span_number - start_span_number + 1. Else if
min_spans is None, it is set to -1, meaning all unique spans will be
considered.
Returns:
Artifacts with spans in [start_span, end_span] inclusive.
"""
resolved_artifacts = ops.StaticSpanRange(
artifacts,
start_span=start_span_number,
end_span=end_span_number,
keep_all_versions=keep_all_versions,
)
if exclude_span_numbers:
resolved_artifacts = ops.ExcludeSpans(
resolved_artifacts, denylist=exclude_span_numbers
)
if min_spans is None:
# We check that start_span_number and end_span_number are positive to ensure
# min_spans is well defined. Else, it is set to -1, meaning all the unique
# spans will be considered.
if start_span_number >= 0 and end_span_number >= 0:
min_spans = end_span_number - start_span_number + 1
# Decrement min_spans by the number of spans in exclude_span_numbers that
# are in the range [start_span_number, end_span_number].
num_excluded_spans = 0
for excluded_span in exclude_span_numbers:
if (
excluded_span >= start_span_number
and excluded_span <= end_span_number
):
num_excluded_spans += 1
min_spans -= num_excluded_spans
logging.warning(
'min_spans for static_range(...) was not set and is being set to '
'end_span_number - start_span_number + 1 - '
'(number of excluded spans in the range [start_span, end_span]) = '
'%s - %s + 1 - %s = %s.',
end_span_number,
start_span_number,
num_excluded_spans,
min_spans,
)
else:
min_spans = -1
logging.warning(
'min_spans for static_range(...) was not set and is being set to -1, '
'meaning static_range(...) will never throw a SkipSignal.'
)
return ops.SkipIfLessThanNSpans(resolved_artifacts, n=min_spans) | Returns artifacts with spans in [start_span, end_span] inclusive. This resolver function is based on the span-version semantics, which only considers the latest version of each span. If you want to keep all versions, then set keep_all_versions=True. Input artifacts must have both "span" int property and "version" int property. Please note that the spans in exclude_span_numbers are excluded AFTER getting the artifacts with spans in the range. If there are less than min_spans unique spans present in the resolved artifacts, then the component execution will be skipped. Corresponds to StaticRange in TFX. Example usage: Consider 8 artifacts with: spans = [0, 1, 2, 3, 3, 5, 7, 10] versions = [0, 0, 0, 0, 3, 0, 0, 0] static_range( end_span_number=5, keep_all_versions=False, exclude_span_numbers=[2]) Because start_span_number = -1, it is set to the smallest span, 0. Spans in the range [0, 5] will be considered. Because keep_all_versions=False, only the artifact with span=3 and version=3 will be considered, even though there are two artifacts with span=3. Because exclude_span_numbers=[2], the artifacts with span=2 will not be kept, even though it is in the range. min_spans is None but end_span_number < 0, so min_spans is not automatically set. The artifacts that will be returned are: spans = [0, 1, 3, 5] versions = [0, 0, 3, 0] Args: artifacts: The artifacts to filter. start_span_number: The smallest span number to keep, inclusive. If < 0, set to the smallest span in the artifacts. end_span_number: The largest span number to keep, inclusive. If < 0, set to the largest span in the artifacts. keep_all_versions: If true, all artifacts with spans in the range are kept. If false then if multiple artifacts have the same span, only the span with the latest version is kept. Defaults to False. exclude_span_numbers: The span numbers to exclude. min_spans: Minimum number of desired example spans in the range. If min_spans is None, and if both end_span_number and start_span_number are positive, it is set to end_span_number - start_span_number + 1. Else if min_spans is None, it is set to -1, meaning all unique spans will be considered. Returns: Artifacts with spans in [start_span, end_span] inclusive. |
166,440 | from typing import Optional, Sequence, Union
from absl import logging
from tfx.dsl.input_resolution import resolver_function
from tfx.dsl.input_resolution.ops import ops
from tfx.types import artifact
from tfx.types import channel as channel_types
The provided code snippet includes necessary dependencies for implementing the `rolling_range` function. Write a Python function `def rolling_range( artifacts, *, start_span_number: int = 0, num_spans: int = 1, skip_num_recent_spans: int = 0, keep_all_versions: bool = False, exclude_span_numbers: Sequence[int] = (), min_spans: Optional[int] = None, version_sort_keys: Sequence[str] = (), )` to solve the following problem:
Returns artifacts with spans in a rolling range. A rolling range covers the latest (largest) spans. It's calculated in the following order: 1. Sort the artifacts by span in ascending order. 2. Remove the last skip_num_recent_spans number of spans (removing the largest spans). 3. Select the last num_spans number of spans (the remaining largest spans). 4. Exclude the spans of exclude_span_numbers. Note that this exclusion happens last for backward compatibility. This can result in having less than num_spans spans, meaning the consumer component would be skipped due to lack of inputs. To avoid this, you would have to decrease min_spans. Pythonically, this range is equivalent to: sorted_spans[:-skip_num_recent_spans][-num_spans:] This resolver function is based on the span-version semantics, which only considers the latest version of each span. The version semantics can be optionally changed by providing a list of artifact attributes that can be used to sort versions within a particular span. If you want to keep all versions, then set keep_all_versions=True. Input artifacts must have both "span" int property and "version" int property. Please note that the spans in exclude_span_numbers are excluded AFTER getting the latest spans. If there are less than min_spans unique spans present in the resolved artifacts, then the component execution will be skipped. Corresponds to RollingRange in TFX. Example usage: Consider 6 artifacts with: spans = [1, 2, 3, 3, 7, 8] versions = [0, 0, 1, 0, 1, 2] rolling_range( start_span_number=3, num_spans=5, skip_num_recent_spans=1, keep_all_versions=True, exclude_span_numbers=[7], min_spans=1) spans 1 and 2 are removed because they are < start_span_number=3. The sorted unique spans are [3, 7, 8]. span 8 is removed because skip_num_recent_spans=1, leaving spans [3, 7]. Although num_spans=5, only two unique span numbers are available, 3 and 7, so both spans [3, 7] are kept. Because keep_all_versions=True, both artifacts with span=3 are kept. Because exclude_span_numbers=[7], the artifact with span=7 will not be kept, even though it is in the range. The artifacts that will be returned are: spans = [3, 3] versions = [1, 0] Note min_spans=1, so a SkipSignal will not be present in the compiled IR. Args: artifacts: The artifacts to filter. start_span_number: The smallest span number to keep, inclusive. Defaults to 0. num_spans: The length of the range. If num_spans <= 0, then num_spans is set to the total number of unique spans. skip_num_recent_spans: Number of most recently available (largest) spans to skip. Defaults to 0. keep_all_versions: If true, all artifacts with spans in the range are kept. If false then if multiple artifacts have the same span, only the span with the latest version is kept. Defaults to False. exclude_span_numbers: The span numbers to exclude. min_spans: Minimum number of desired example spans in the range. If min_spans is None, it is set to num_spans. version_sort_keys: List of string artifact attributes to sort or filter the versions witin the spans, applied in order of specification. Nested keys can use '.' separator for e.g. 'mlmd_artifact.create_time_since_epoch'. It can be used to override the default behavior, which is sort by version number and break ties by create time and id. Returns: Artifacts with spans in the rolling range.
Here is the function:
def rolling_range(
artifacts,
*,
start_span_number: int = 0,
num_spans: int = 1,
skip_num_recent_spans: int = 0,
keep_all_versions: bool = False,
exclude_span_numbers: Sequence[int] = (),
min_spans: Optional[int] = None,
version_sort_keys: Sequence[str] = (),
):
"""Returns artifacts with spans in a rolling range.
A rolling range covers the latest (largest) spans. It's calculated in the
following order:
1. Sort the artifacts by span in ascending order.
2. Remove the last skip_num_recent_spans number of spans (removing the largest
spans).
3. Select the last num_spans number of spans (the remaining largest spans).
4. Exclude the spans of exclude_span_numbers. Note that this exclusion
happens last for backward compatibility. This can result in having less
than num_spans spans, meaning the consumer component would be skipped due
to lack of inputs. To avoid this, you would have to decrease min_spans.
Pythonically, this range is equivalent to:
sorted_spans[:-skip_num_recent_spans][-num_spans:]
This resolver function is based on the span-version semantics, which only
considers the latest version of each span. The version semantics can be
optionally changed by providing a list of artifact attributes that can be used
to sort versions within a particular span. If you want to keep all versions,
then set keep_all_versions=True. Input artifacts must have both "span" int
property and "version" int property.
Please note that the spans in exclude_span_numbers are excluded AFTER getting
the latest spans.
If there are less than min_spans unique spans present in the resolved
artifacts, then the component execution will be skipped.
Corresponds to RollingRange in TFX.
Example usage:
Consider 6 artifacts with:
spans = [1, 2, 3, 3, 7, 8]
versions = [0, 0, 1, 0, 1, 2]
rolling_range(
start_span_number=3,
num_spans=5,
skip_num_recent_spans=1,
keep_all_versions=True,
exclude_span_numbers=[7],
min_spans=1)
spans 1 and 2 are removed because they are < start_span_number=3. The
sorted unique spans are [3, 7, 8].
span 8 is removed because skip_num_recent_spans=1, leaving spans [3, 7].
Although num_spans=5, only two unique span numbers are available, 3 and 7,
so both spans [3, 7] are kept.
Because keep_all_versions=True, both artifacts with span=3 are kept.
Because exclude_span_numbers=[7], the artifact with span=7 will not be
kept, even though it is in the range.
The artifacts that will be returned are:
spans = [3, 3]
versions = [1, 0]
Note min_spans=1, so a SkipSignal will not be present in the compiled IR.
Args:
artifacts: The artifacts to filter.
start_span_number: The smallest span number to keep, inclusive. Defaults to
0.
num_spans: The length of the range. If num_spans <= 0, then num_spans is set
to the total number of unique spans.
skip_num_recent_spans: Number of most recently available (largest) spans to
skip. Defaults to 0.
keep_all_versions: If true, all artifacts with spans in the range are kept.
If false then if multiple artifacts have the same span, only the span with
the latest version is kept. Defaults to False.
exclude_span_numbers: The span numbers to exclude.
min_spans: Minimum number of desired example spans in the range. If
min_spans is None, it is set to num_spans.
version_sort_keys: List of string artifact attributes to sort or filter the
versions witin the spans, applied in order of specification. Nested keys
can use '.' separator for e.g. 'mlmd_artifact.create_time_since_epoch'. It
can be used to override the default behavior, which is sort by version
number and break ties by create time and id.
Returns:
Artifacts with spans in the rolling range.
"""
resolved_artifacts = ops.LatestSpan(
artifacts,
min_span=start_span_number,
n=num_spans,
skip_last_n=skip_num_recent_spans,
keep_all_versions=keep_all_versions,
version_sort_keys=version_sort_keys,
)
if exclude_span_numbers:
resolved_artifacts = ops.ExcludeSpans(
resolved_artifacts, denylist=exclude_span_numbers
)
if min_spans is None:
logging.warning(
'min_spans for rolling_range(...) was not set, so it is defaulting to '
'num_spans = %s. If skip_num_recent_spans is set, this may delay '
'the component triggering on the first run until sufficient Examples '
'artifacts are available.',
num_spans,
)
min_spans = num_spans
return ops.SkipIfLessThanNSpans(resolved_artifacts, n=min_spans) | Returns artifacts with spans in a rolling range. A rolling range covers the latest (largest) spans. It's calculated in the following order: 1. Sort the artifacts by span in ascending order. 2. Remove the last skip_num_recent_spans number of spans (removing the largest spans). 3. Select the last num_spans number of spans (the remaining largest spans). 4. Exclude the spans of exclude_span_numbers. Note that this exclusion happens last for backward compatibility. This can result in having less than num_spans spans, meaning the consumer component would be skipped due to lack of inputs. To avoid this, you would have to decrease min_spans. Pythonically, this range is equivalent to: sorted_spans[:-skip_num_recent_spans][-num_spans:] This resolver function is based on the span-version semantics, which only considers the latest version of each span. The version semantics can be optionally changed by providing a list of artifact attributes that can be used to sort versions within a particular span. If you want to keep all versions, then set keep_all_versions=True. Input artifacts must have both "span" int property and "version" int property. Please note that the spans in exclude_span_numbers are excluded AFTER getting the latest spans. If there are less than min_spans unique spans present in the resolved artifacts, then the component execution will be skipped. Corresponds to RollingRange in TFX. Example usage: Consider 6 artifacts with: spans = [1, 2, 3, 3, 7, 8] versions = [0, 0, 1, 0, 1, 2] rolling_range( start_span_number=3, num_spans=5, skip_num_recent_spans=1, keep_all_versions=True, exclude_span_numbers=[7], min_spans=1) spans 1 and 2 are removed because they are < start_span_number=3. The sorted unique spans are [3, 7, 8]. span 8 is removed because skip_num_recent_spans=1, leaving spans [3, 7]. Although num_spans=5, only two unique span numbers are available, 3 and 7, so both spans [3, 7] are kept. Because keep_all_versions=True, both artifacts with span=3 are kept. Because exclude_span_numbers=[7], the artifact with span=7 will not be kept, even though it is in the range. The artifacts that will be returned are: spans = [3, 3] versions = [1, 0] Note min_spans=1, so a SkipSignal will not be present in the compiled IR. Args: artifacts: The artifacts to filter. start_span_number: The smallest span number to keep, inclusive. Defaults to 0. num_spans: The length of the range. If num_spans <= 0, then num_spans is set to the total number of unique spans. skip_num_recent_spans: Number of most recently available (largest) spans to skip. Defaults to 0. keep_all_versions: If true, all artifacts with spans in the range are kept. If false then if multiple artifacts have the same span, only the span with the latest version is kept. Defaults to False. exclude_span_numbers: The span numbers to exclude. min_spans: Minimum number of desired example spans in the range. If min_spans is None, it is set to num_spans. version_sort_keys: List of string artifact attributes to sort or filter the versions witin the spans, applied in order of specification. Nested keys can use '.' separator for e.g. 'mlmd_artifact.create_time_since_epoch'. It can be used to override the default behavior, which is sort by version number and break ties by create time and id. Returns: Artifacts with spans in the rolling range. |
166,441 | from typing import Optional, Sequence, Union
from absl import logging
from tfx.dsl.input_resolution import resolver_function
from tfx.dsl.input_resolution.ops import ops
from tfx.types import artifact
from tfx.types import channel as channel_types
The provided code snippet includes necessary dependencies for implementing the `all_spans` function. Write a Python function `def all_spans(artifacts)` to solve the following problem:
Returns the sorted artifacts with unique spans. By default, all artifacts with unique spans (ties broken by latest version) are returned. Example usage: Consider 6 artifacts with: spans = [1, 3, 3, 2, 8, 7] versions = [0, 0, 1, 0, 1, 2] all_spans() will return artifacts: spans = [1, 2, 3, 7, 8] versions = [0, 0, 1, 2, 1] Note that there are 2 artifacts with span 3, but only the one with the latest version is returned. Spans are sorted in ascending order. Args: artifacts: The artifacts to filter. Returns: Sorted Artifacts with unique spans.
Here is the function:
def all_spans(artifacts):
"""Returns the sorted artifacts with unique spans.
By default, all artifacts with unique spans (ties broken by latest version)
are returned.
Example usage:
Consider 6 artifacts with:
spans = [1, 3, 3, 2, 8, 7]
versions = [0, 0, 1, 0, 1, 2]
all_spans()
will return artifacts:
spans = [1, 2, 3, 7, 8]
versions = [0, 0, 1, 2, 1]
Note that there are 2 artifacts with span 3, but only the one with the
latest version is returned. Spans are sorted in ascending order.
Args:
artifacts: The artifacts to filter.
Returns:
Sorted Artifacts with unique spans.
"""
return ops.AllSpans(artifacts) | Returns the sorted artifacts with unique spans. By default, all artifacts with unique spans (ties broken by latest version) are returned. Example usage: Consider 6 artifacts with: spans = [1, 3, 3, 2, 8, 7] versions = [0, 0, 1, 0, 1, 2] all_spans() will return artifacts: spans = [1, 2, 3, 7, 8] versions = [0, 0, 1, 2, 1] Note that there are 2 artifacts with span 3, but only the one with the latest version is returned. Spans are sorted in ascending order. Args: artifacts: The artifacts to filter. Returns: Sorted Artifacts with unique spans. |
166,442 | from typing import Optional, Sequence, Union
from absl import logging
from tfx.dsl.input_resolution import resolver_function
from tfx.dsl.input_resolution.ops import ops
from tfx.types import artifact
from tfx.types import channel as channel_types
The provided code snippet includes necessary dependencies for implementing the `shuffle` function. Write a Python function `def shuffle(artifacts)` to solve the following problem:
Shuffles the artifacts (in a uniform random way) by span. Example usage: Consider 4 artifacts with: spans = [1, 2, 3, 4] shuffle() will return artifacts randomly shuffled, e.g.: spans = [3, 4, 2, 1] Args: artifacts: The artifacts to filter. Returns: The randomly shuffled artifacts.
Here is the function:
def shuffle(artifacts):
"""Shuffles the artifacts (in a uniform random way) by span.
Example usage:
Consider 4 artifacts with:
spans = [1, 2, 3, 4]
shuffle()
will return artifacts randomly shuffled, e.g.:
spans = [3, 4, 2, 1]
Args:
artifacts: The artifacts to filter.
Returns:
The randomly shuffled artifacts.
"""
return ops.Shuffle(artifacts) | Shuffles the artifacts (in a uniform random way) by span. Example usage: Consider 4 artifacts with: spans = [1, 2, 3, 4] shuffle() will return artifacts randomly shuffled, e.g.: spans = [3, 4, 2, 1] Args: artifacts: The artifacts to filter. Returns: The randomly shuffled artifacts. |
166,443 | from typing import Optional, Sequence, Union
from absl import logging
from tfx.dsl.input_resolution import resolver_function
from tfx.dsl.input_resolution.ops import ops
from tfx.types import artifact
from tfx.types import channel as channel_types
The provided code snippet includes necessary dependencies for implementing the `latest_pipeline_run_outputs` function. Write a Python function `def latest_pipeline_run_outputs(pipeline, output_keys: Sequence[str] = ())` to solve the following problem:
Returns the artifacts in the latest COMPLETE pipeline run. Example usage: producer_pipeline = Pipeline(outputs={ 'examples': example_gen.outputs['examples'], 'schema': schema_gen.outputs['schema'] }) consumer_pipeline_inputs = PipelineInputs( latest_pipeline_run_outputs(producer_pipeline), output_keys=['examples', 'schema']) trainer = TFTrainer( examples=consumer_pipeline_inputs.inputs['examples'], schema=consumer_pipeline_inputs.inputs['schema']) consumer_pipeline = Pipeline( inputs=consumer_pipeline_inputs, components=[trainer], ) Args: pipeline: The pipeline producing the artifacts output_keys: (Optional) A list of output keys. If provided, only the artifacts of the key in this list will return by this function, otherwise, all available output keys of the producer pipeline will be used. Returns: The artifacts in the latest COMPLETE pipeline run.
Here is the function:
def latest_pipeline_run_outputs(pipeline, output_keys: Sequence[str] = ()):
"""Returns the artifacts in the latest COMPLETE pipeline run.
Example usage:
producer_pipeline = Pipeline(outputs={
'examples': example_gen.outputs['examples'],
'schema': schema_gen.outputs['schema']
})
consumer_pipeline_inputs = PipelineInputs(
latest_pipeline_run_outputs(producer_pipeline),
output_keys=['examples', 'schema'])
trainer = TFTrainer(
examples=consumer_pipeline_inputs.inputs['examples'],
schema=consumer_pipeline_inputs.inputs['schema'])
consumer_pipeline = Pipeline(
inputs=consumer_pipeline_inputs,
components=[trainer],
)
Args:
pipeline: The pipeline producing the artifacts
output_keys: (Optional) A list of output keys. If provided, only the
artifacts of the key in this list will return by this function, otherwise,
all available output keys of the producer pipeline will be used.
Returns:
The artifacts in the latest COMPLETE pipeline run.
"""
for output_key in output_keys:
if output_key not in pipeline.outputs:
raise ValueError(
f'Output key {output_key} does not exist in pipeline {pipeline.id}. '
f'Available: {list(pipeline.outputs)}'
)
return ops.LatestPipelineRunOutputs(
pipeline_name=pipeline.pipeline_name, output_keys=output_keys
) | Returns the artifacts in the latest COMPLETE pipeline run. Example usage: producer_pipeline = Pipeline(outputs={ 'examples': example_gen.outputs['examples'], 'schema': schema_gen.outputs['schema'] }) consumer_pipeline_inputs = PipelineInputs( latest_pipeline_run_outputs(producer_pipeline), output_keys=['examples', 'schema']) trainer = TFTrainer( examples=consumer_pipeline_inputs.inputs['examples'], schema=consumer_pipeline_inputs.inputs['schema']) consumer_pipeline = Pipeline( inputs=consumer_pipeline_inputs, components=[trainer], ) Args: pipeline: The pipeline producing the artifacts output_keys: (Optional) A list of output keys. If provided, only the artifacts of the key in this list will return by this function, otherwise, all available output keys of the producer pipeline will be used. Returns: The artifacts in the latest COMPLETE pipeline run. |
166,444 | from typing import Optional, Sequence, Union
from absl import logging
from tfx.dsl.input_resolution import resolver_function
from tfx.dsl.input_resolution.ops import ops
from tfx.types import artifact
from tfx.types import channel as channel_types
The provided code snippet includes necessary dependencies for implementing the `_infer_latest_pipeline_run_type` function. Write a Python function `def _infer_latest_pipeline_run_type(pipeline, output_keys: Sequence[str] = ())` to solve the following problem:
Output type inferrer of resolver function latest_pipeline_run_outputs. Args: pipeline: The pipeline producing the artifacts. output_keys: (Optional) A list of output keys. If provided, only the artifacts of the key in this list will return by this function, otherwise, all available output keys of the producer pipeline will be used. Returns: A Dict: key is output key, value is output type.
Here is the function:
def _infer_latest_pipeline_run_type(pipeline, output_keys: Sequence[str] = ()):
"""Output type inferrer of resolver function latest_pipeline_run_outputs.
Args:
pipeline: The pipeline producing the artifacts.
output_keys: (Optional) A list of output keys. If provided, only the
artifacts of the key in this list will return by this function, otherwise,
all available output keys of the producer pipeline will be used.
Returns:
A Dict: key is output key, value is output type.
"""
if not output_keys:
output_keys = list(pipeline.outputs)
return {
output_key: channel.type
for output_key, channel in pipeline.outputs.items()
if output_key in output_keys
} | Output type inferrer of resolver function latest_pipeline_run_outputs. Args: pipeline: The pipeline producing the artifacts. output_keys: (Optional) A list of output keys. If provided, only the artifacts of the key in this list will return by this function, otherwise, all available output keys of the producer pipeline will be used. Returns: A Dict: key is output key, value is output type. |
166,445 | from typing import Optional, Sequence, Union
from absl import logging
from tfx.dsl.input_resolution import resolver_function
from tfx.dsl.input_resolution.ops import ops
from tfx.types import artifact
from tfx.types import channel as channel_types
The provided code snippet includes necessary dependencies for implementing the `sequential_rolling_range` function. Write a Python function `def sequential_rolling_range( artifacts, *, start_span_number: Optional[int] = None, num_spans: int = 1, skip_num_recent_spans: int = 0, keep_all_versions: bool = False, exclude_span_numbers: Sequence[int] = (), )` to solve the following problem:
Returns artifacts with spans in a sequential rolling range. Sequential rolling range is a sliding window on the oldest consecutive spans. The consecutive spans must be in the range: [start_span_number, max_span - skip_num_recent_spans], where max_span is the maximum span present in the artifacts. This range is modified to account for exclude_span_numbers, for details see the ConsecutiveSpans ResolverOp implementation. The window size is num_spans and has a stride of 1. If the spans are not consecutive, then the sequential rolling range waits for the missing span to arrive. This resolver function is based on the span-version semantics, which only considers the latest version of each span. If you want to keep all versions, then set keep_all_versions=True. Input artifacts must have both "span" int property and "version" int property. Corresponds to SequentialRollingRange in TFX. Example usage: Consider 5 artifacts [A, B, C, D, E] with spans = [1, 2, 3, 4, 7]. sequential_rolling_range( start_span_number=1, num_spans=3, skip_num_recent_spans=1, keep_all_versions=False, exclude_span_numbers=[]) The consecutive spans to consider are [1, 2, 3, 4] The artifacts will be returned with a sliding window of size num_spans=3 and stride 1 applied: [[A, B, C], [B, C, D]] However, if nums_spans=5, there are only 4 consecutive spans to consider, so [], no artifacts, will be returned. Since sequential_rolling_range returns multiple windows, it must be used together with ForEach. For example: with ForEach(sequential_rolling_range( all_examples, num_spans=10)) as examples_window: trainer = Trainer(examples=shuffle(examples_window)) Args: artifacts: The artifacts to filter. start_span_number: The smallest span number to keep, inclusive. Optional, if not set then defaults to the minimum span. If the start_span_number is configured wrong (so that it is smaller than the first span number), we will wait indefinitely until the missing spans between start_span_number and the first span number to be appeared. num_spans: The length of the range. If num_spans <= 0, then num_spans is set to the total number of artifacts with consecutive spans in the range. Note that this is also the size of the sliding window of the sequential rolling range. skip_num_recent_spans: Number of most recently available (largest) spans to skip. Defaults to 0. keep_all_versions: If true, all artifacts with spans in the range are kept. If false then if multiple artifacts have the same span, only the span with the latest version is kept. Defaults to False. exclude_span_numbers: The list of missing/bad span numbers to exclude. Returns: Artifacts with spans in the sequential rolling range.
Here is the function:
def sequential_rolling_range(
artifacts,
*,
start_span_number: Optional[int] = None,
num_spans: int = 1,
skip_num_recent_spans: int = 0,
keep_all_versions: bool = False,
exclude_span_numbers: Sequence[int] = (),
):
"""Returns artifacts with spans in a sequential rolling range.
Sequential rolling range is a sliding window on the oldest consecutive spans.
The consecutive spans must be in the range:
[start_span_number, max_span - skip_num_recent_spans], where max_span is the
maximum span present in the artifacts. This range is modified to account for
exclude_span_numbers, for details see the ConsecutiveSpans ResolverOp
implementation.
The window size is num_spans and has a stride of 1. If the spans are not
consecutive, then the sequential rolling range waits for the missing span to
arrive.
This resolver function is based on the span-version semantics, which only
considers the latest version of each span. If you want to keep all versions,
then set keep_all_versions=True. Input artifacts must have both "span" int
property and "version" int property.
Corresponds to SequentialRollingRange in TFX.
Example usage:
Consider 5 artifacts [A, B, C, D, E] with spans = [1, 2, 3, 4, 7].
sequential_rolling_range(
start_span_number=1,
num_spans=3,
skip_num_recent_spans=1,
keep_all_versions=False,
exclude_span_numbers=[])
The consecutive spans to consider are [1, 2, 3, 4]
The artifacts will be returned with a sliding window of size num_spans=3 and
stride 1 applied:
[[A, B, C], [B, C, D]]
However, if nums_spans=5, there are only 4 consecutive spans to consider, so
[], no artifacts, will be returned.
Since sequential_rolling_range returns multiple windows, it must be used
together with ForEach. For example:
with ForEach(sequential_rolling_range(
all_examples, num_spans=10)) as examples_window:
trainer = Trainer(examples=shuffle(examples_window))
Args:
artifacts: The artifacts to filter.
start_span_number: The smallest span number to keep, inclusive. Optional, if
not set then defaults to the minimum span. If the start_span_number is
configured wrong (so that it is smaller than the first span number), we
will wait indefinitely until the missing spans between start_span_number
and the first span number to be appeared.
num_spans: The length of the range. If num_spans <= 0, then num_spans is set
to the total number of artifacts with consecutive spans in the range. Note
that this is also the size of the sliding window of the sequential rolling
range.
skip_num_recent_spans: Number of most recently available (largest) spans to
skip. Defaults to 0.
keep_all_versions: If true, all artifacts with spans in the range are kept.
If false then if multiple artifacts have the same span, only the span with
the latest version is kept. Defaults to False.
exclude_span_numbers: The list of missing/bad span numbers to exclude.
Returns:
Artifacts with spans in the sequential rolling range.
"""
resolved_artifacts = ops.ConsecutiveSpans(
artifacts,
first_span=start_span_number if start_span_number is not None else -1,
skip_last_n=skip_num_recent_spans,
keep_all_versions=keep_all_versions,
denylist=exclude_span_numbers,
)
return ops.SlidingWindow(resolved_artifacts, window_size=num_spans) | Returns artifacts with spans in a sequential rolling range. Sequential rolling range is a sliding window on the oldest consecutive spans. The consecutive spans must be in the range: [start_span_number, max_span - skip_num_recent_spans], where max_span is the maximum span present in the artifacts. This range is modified to account for exclude_span_numbers, for details see the ConsecutiveSpans ResolverOp implementation. The window size is num_spans and has a stride of 1. If the spans are not consecutive, then the sequential rolling range waits for the missing span to arrive. This resolver function is based on the span-version semantics, which only considers the latest version of each span. If you want to keep all versions, then set keep_all_versions=True. Input artifacts must have both "span" int property and "version" int property. Corresponds to SequentialRollingRange in TFX. Example usage: Consider 5 artifacts [A, B, C, D, E] with spans = [1, 2, 3, 4, 7]. sequential_rolling_range( start_span_number=1, num_spans=3, skip_num_recent_spans=1, keep_all_versions=False, exclude_span_numbers=[]) The consecutive spans to consider are [1, 2, 3, 4] The artifacts will be returned with a sliding window of size num_spans=3 and stride 1 applied: [[A, B, C], [B, C, D]] However, if nums_spans=5, there are only 4 consecutive spans to consider, so [], no artifacts, will be returned. Since sequential_rolling_range returns multiple windows, it must be used together with ForEach. For example: with ForEach(sequential_rolling_range( all_examples, num_spans=10)) as examples_window: trainer = Trainer(examples=shuffle(examples_window)) Args: artifacts: The artifacts to filter. start_span_number: The smallest span number to keep, inclusive. Optional, if not set then defaults to the minimum span. If the start_span_number is configured wrong (so that it is smaller than the first span number), we will wait indefinitely until the missing spans between start_span_number and the first span number to be appeared. num_spans: The length of the range. If num_spans <= 0, then num_spans is set to the total number of artifacts with consecutive spans in the range. Note that this is also the size of the sliding window of the sequential rolling range. skip_num_recent_spans: Number of most recently available (largest) spans to skip. Defaults to 0. keep_all_versions: If true, all artifacts with spans in the range are kept. If false then if multiple artifacts have the same span, only the span with the latest version is kept. Defaults to False. exclude_span_numbers: The list of missing/bad span numbers to exclude. Returns: Artifacts with spans in the sequential rolling range. |
166,446 | from typing import Optional, Sequence, Union
from absl import logging
from tfx.dsl.input_resolution import resolver_function
from tfx.dsl.input_resolution.ops import ops
from tfx.types import artifact
from tfx.types import channel as channel_types
def _infer_seqential_rolling_range_type(channel, **kwargs): # pylint: disable=unused-argument
return {'window': channel.type} | null |
166,447 | from typing import Optional, Sequence, Union
from absl import logging
from tfx.dsl.input_resolution import resolver_function
from tfx.dsl.input_resolution.ops import ops
from tfx.types import artifact
from tfx.types import channel as channel_types
The provided code snippet includes necessary dependencies for implementing the `paired_spans` function. Write a Python function `def paired_spans( artifacts, *, match_version: bool = True, keep_all_versions: bool = False, )` to solve the following problem:
Pairs up Examples from different channels, matching by (span, version). This enables grouping together Artifacts from separate channels. Example usage: NOTE: Notation here is `{artifact_type}:{span}:{version}` >>> paired_spans({'x': channel([X:0:0, X:0:1, X:1:0, X:2:0]), 'y': channel([Y:0:0, Y:0:1, Y:1:0, Y:3:0])}) Loopable([ {'x': channel([X:0:1]), 'y': channel([Y:0:1])}, {'x': channel([X:1:0]), 'y': channel([Y:1:0])}, ]) Note that the span `0` has two versions, but only the latest version `1` is selected. This is the default semantics of the span & version where only the latest version is considered valid of each span. If you want to select all versions including the non-latest ones, you can set `keep_all_versions=True`. >>> paired_spans({'x': channel([X:0:0, X:0:1, X:1:0]), 'y': channel([Y:0:0, Y:0:1, Y:1:0]}, keep_all_versions=True) Loopable([ {'x': channel([X:0:0]), 'y': channel([Y:0:0])}, {'x': channel([X:0:1]), 'y': channel([Y:0:1])}, {'x': channel([X:1:0]), 'y': channel([Y:1:0])}, ]) By default, the version property is considered for pairing, meaning that the version should exact match, otherwise it is not considered the pair. >>> paired_spans({'x': channel([X:0:999, X:1:999]), 'y': channel([Y:0:0, Y:1:0])}) Loopable([]) If you do not care about version, and just want to pair artifacts that consider only the span property (and select latest version for each span), you can set `match_version=False`. >>> paired_spans({'x': channel([X:0:999, X:1:999]), 'y': channel([Y:0:0, Y:1:0, Y:1:1])}, match_version=False) Loopable([ {'x': channel([X:0:999]), 'y': channel([Y:0:0])}, {'x': channel([X:1:999]), 'y': channel([Y:1:1])}, ]) Since `match_version=False` only consideres the latest version of each span, this cannot be used together with `keep_all_versions=True`. As `paired_spans` returns a `Loopable`, it must be used together with `ForEach`. For example: ```python with ForEach(paired_spans({'a' : channel_a, 'b' : channel_b})) as pair: component = Component(a=pair['a'], b=pair['b']) ``` NOTE: `paired_spans` can pair Artifacts from N >= 2 channels. Args: artifacts: A dictionary of artifacts. match_version: Whether the version of each span should exactly match. keep_all_versions: Whether to pair up all versions of artifacts, or only the latest version. Defaults to False. Requires match_version = True. Returns: A list of artifact dicts where each dict has as its key the channel key, and as its value has a list with a single artifact having the same span and version across the dict.
Here is the function:
def paired_spans(
artifacts,
*,
match_version: bool = True,
keep_all_versions: bool = False,
):
"""Pairs up Examples from different channels, matching by (span, version).
This enables grouping together Artifacts from separate channels.
Example usage:
NOTE: Notation here is `{artifact_type}:{span}:{version}`
>>> paired_spans({'x': channel([X:0:0, X:0:1, X:1:0, X:2:0]),
'y': channel([Y:0:0, Y:0:1, Y:1:0, Y:3:0])})
Loopable([
{'x': channel([X:0:1]), 'y': channel([Y:0:1])},
{'x': channel([X:1:0]), 'y': channel([Y:1:0])},
])
Note that the span `0` has two versions, but only the latest version `1` is
selected. This is the default semantics of the span & version where only the
latest version is considered valid of each span.
If you want to select all versions including the non-latest ones, you can
set `keep_all_versions=True`.
>>> paired_spans({'x': channel([X:0:0, X:0:1, X:1:0]),
'y': channel([Y:0:0, Y:0:1, Y:1:0]},
keep_all_versions=True)
Loopable([
{'x': channel([X:0:0]), 'y': channel([Y:0:0])},
{'x': channel([X:0:1]), 'y': channel([Y:0:1])},
{'x': channel([X:1:0]), 'y': channel([Y:1:0])},
])
By default, the version property is considered for pairing, meaning that the
version should exact match, otherwise it is not considered the pair.
>>> paired_spans({'x': channel([X:0:999, X:1:999]),
'y': channel([Y:0:0, Y:1:0])})
Loopable([])
If you do not care about version, and just want to pair artifacts that
consider only the span property (and select latest version for each span),
you can set `match_version=False`.
>>> paired_spans({'x': channel([X:0:999, X:1:999]),
'y': channel([Y:0:0, Y:1:0, Y:1:1])},
match_version=False)
Loopable([
{'x': channel([X:0:999]), 'y': channel([Y:0:0])},
{'x': channel([X:1:999]), 'y': channel([Y:1:1])},
])
Since `match_version=False` only consideres the latest version of each span,
this cannot be used together with `keep_all_versions=True`.
As `paired_spans` returns a `Loopable`, it must be used together with
`ForEach`. For example:
```python
with ForEach(paired_spans({'a' : channel_a, 'b' : channel_b})) as pair:
component = Component(a=pair['a'], b=pair['b'])
```
NOTE: `paired_spans` can pair Artifacts from N >= 2 channels.
Args:
artifacts: A dictionary of artifacts.
match_version: Whether the version of each span should exactly match.
keep_all_versions: Whether to pair up all versions of artifacts, or only the
latest version. Defaults to False. Requires match_version = True.
Returns:
A list of artifact dicts where each dict has as its key the channel key,
and as its value has a list with a single artifact having the same span and
version across the dict.
"""
if keep_all_versions and not match_version:
raise ValueError('keep_all_versions = True requires match_version = True.')
# TODO: b/322812375 - Remove kwargs dict handling once orchestrator knows
# match_version argument.
kwargs = {}
if not match_version:
kwargs['match_version'] = False
return ops.PairedSpans(
artifacts,
keep_all_versions=keep_all_versions,
**kwargs,
) | Pairs up Examples from different channels, matching by (span, version). This enables grouping together Artifacts from separate channels. Example usage: NOTE: Notation here is `{artifact_type}:{span}:{version}` >>> paired_spans({'x': channel([X:0:0, X:0:1, X:1:0, X:2:0]), 'y': channel([Y:0:0, Y:0:1, Y:1:0, Y:3:0])}) Loopable([ {'x': channel([X:0:1]), 'y': channel([Y:0:1])}, {'x': channel([X:1:0]), 'y': channel([Y:1:0])}, ]) Note that the span `0` has two versions, but only the latest version `1` is selected. This is the default semantics of the span & version where only the latest version is considered valid of each span. If you want to select all versions including the non-latest ones, you can set `keep_all_versions=True`. >>> paired_spans({'x': channel([X:0:0, X:0:1, X:1:0]), 'y': channel([Y:0:0, Y:0:1, Y:1:0]}, keep_all_versions=True) Loopable([ {'x': channel([X:0:0]), 'y': channel([Y:0:0])}, {'x': channel([X:0:1]), 'y': channel([Y:0:1])}, {'x': channel([X:1:0]), 'y': channel([Y:1:0])}, ]) By default, the version property is considered for pairing, meaning that the version should exact match, otherwise it is not considered the pair. >>> paired_spans({'x': channel([X:0:999, X:1:999]), 'y': channel([Y:0:0, Y:1:0])}) Loopable([]) If you do not care about version, and just want to pair artifacts that consider only the span property (and select latest version for each span), you can set `match_version=False`. >>> paired_spans({'x': channel([X:0:999, X:1:999]), 'y': channel([Y:0:0, Y:1:0, Y:1:1])}, match_version=False) Loopable([ {'x': channel([X:0:999]), 'y': channel([Y:0:0])}, {'x': channel([X:1:999]), 'y': channel([Y:1:1])}, ]) Since `match_version=False` only consideres the latest version of each span, this cannot be used together with `keep_all_versions=True`. As `paired_spans` returns a `Loopable`, it must be used together with `ForEach`. For example: ```python with ForEach(paired_spans({'a' : channel_a, 'b' : channel_b})) as pair: component = Component(a=pair['a'], b=pair['b']) ``` NOTE: `paired_spans` can pair Artifacts from N >= 2 channels. Args: artifacts: A dictionary of artifacts. match_version: Whether the version of each span should exactly match. keep_all_versions: Whether to pair up all versions of artifacts, or only the latest version. Defaults to False. Requires match_version = True. Returns: A list of artifact dicts where each dict has as its key the channel key, and as its value has a list with a single artifact having the same span and version across the dict. |
166,448 | from typing import Optional, Sequence, Union
from absl import logging
from tfx.dsl.input_resolution import resolver_function
from tfx.dsl.input_resolution.ops import ops
from tfx.types import artifact
from tfx.types import channel as channel_types
The provided code snippet includes necessary dependencies for implementing the `filter_property_equal` function. Write a Python function `def filter_property_equal( artifacts, *, key: str, value: Union[int, float, str, bool, artifact.JsonValueType], )` to solve the following problem:
Returns artifacts with matching property values. Example usage: Consider artifacts [A, B, C] with bool property 'blessed' set to [True, True, False]. filter_property_equal( [A, B, C], property_key='blessed', property_value=False, ) will return [C]. Args: artifacts: The list of artifacts to filter. key: The property key to match by. value: The expected property value to match by. Returns: Artifact(s) with matching custom property (or property) values.
Here is the function:
def filter_property_equal(
artifacts,
*,
key: str,
value: Union[int, float, str, bool, artifact.JsonValueType],
):
"""Returns artifacts with matching property values.
Example usage:
Consider artifacts [A, B, C] with bool property 'blessed' set to
[True, True, False].
filter_property_equal(
[A, B, C],
property_key='blessed',
property_value=False,
)
will return [C].
Args:
artifacts: The list of artifacts to filter.
key: The property key to match by.
value: The expected property value to match by.
Returns:
Artifact(s) with matching custom property (or property) values.
"""
return ops.EqualPropertyValues(
artifacts,
property_key=key,
property_value=value,
is_custom_property=False,
) | Returns artifacts with matching property values. Example usage: Consider artifacts [A, B, C] with bool property 'blessed' set to [True, True, False]. filter_property_equal( [A, B, C], property_key='blessed', property_value=False, ) will return [C]. Args: artifacts: The list of artifacts to filter. key: The property key to match by. value: The expected property value to match by. Returns: Artifact(s) with matching custom property (or property) values. |
166,449 | from typing import Optional, Sequence, Union
from absl import logging
from tfx.dsl.input_resolution import resolver_function
from tfx.dsl.input_resolution.ops import ops
from tfx.types import artifact
from tfx.types import channel as channel_types
The provided code snippet includes necessary dependencies for implementing the `filter_custom_property_equal` function. Write a Python function `def filter_custom_property_equal( artifacts, *, key: str, value: Union[int, float, str, bool, artifact.JsonValueType], )` to solve the following problem:
Returns artifacts with matching custom property values. Example usage: Consider artifact [A, B, C] with int custom property 'purity' set to [1, 1, 2]. filter_custom_property_equal( [A, B, C], property_key='purity', property_value=2, ) will return [C]. Args: artifacts: The list of artifacts to filter. key: The property key to match by. value: The expected property value to match by. Returns: Artifact(s) with matching custom property (or property) values.
Here is the function:
def filter_custom_property_equal(
artifacts,
*,
key: str,
value: Union[int, float, str, bool, artifact.JsonValueType],
):
"""Returns artifacts with matching custom property values.
Example usage:
Consider artifact [A, B, C] with int custom property 'purity' set to
[1, 1, 2].
filter_custom_property_equal(
[A, B, C],
property_key='purity',
property_value=2,
)
will return [C].
Args:
artifacts: The list of artifacts to filter.
key: The property key to match by.
value: The expected property value to match by.
Returns:
Artifact(s) with matching custom property (or property) values.
"""
return ops.EqualPropertyValues(
artifacts,
property_key=key,
property_value=value,
is_custom_property=True,
) | Returns artifacts with matching custom property values. Example usage: Consider artifact [A, B, C] with int custom property 'purity' set to [1, 1, 2]. filter_custom_property_equal( [A, B, C], property_key='purity', property_value=2, ) will return [C]. Args: artifacts: The list of artifacts to filter. key: The property key to match by. value: The expected property value to match by. Returns: Artifact(s) with matching custom property (or property) values. |
166,450 | from typing import Optional, Sequence, Union
from absl import logging
from tfx.dsl.input_resolution import resolver_function
from tfx.dsl.input_resolution.ops import ops
from tfx.types import artifact
from tfx.types import channel as channel_types
def _slice(artifacts, **kwargs):
# It's important to not pass the None value which cannot be serialized to IR.
kwargs = {k: v for k, v in kwargs.items() if v is not None}
return ops.Slice(artifacts, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `pick` function. Write a Python function `def pick(channel: channel_types.BaseChannel, i: int, /)` to solve the following problem:
Pick an i'th artifact from channel. Like in python, negative indexing is allowed. If the index is out of range, in synchronous pipeline it raises an error and the component would not get executed. In asynchronous pipeline, it will wait until the input length is sufficient to handle the index. Usage: ```python # In ASYNC pipeline: with ForEach(example_gen.outputs['examples']) as each_example: statistics_gen = StatisticsGen(examples=each_example) latest_statistics_pair = latest_created( statistics_gen.outputs['statistics'], n=2 ) validator = DistributionValidator( baseline_statistics=pick(latest_statistics_pair, 0), statistics=pick(latest_statistics_pair, 1), ... ) ``` Args: channel: A channel instance (e.g. `my_component.outputs['x']`). i: An index to pick. Can be negative. Returns: A channel that represents `inputs[i]`.
Here is the function:
def pick(channel: channel_types.BaseChannel, i: int, /):
"""Pick an i'th artifact from channel.
Like in python, negative indexing is allowed.
If the index is out of range, in synchronous pipeline it raises an error and
the component would not get executed. In asynchronous pipeline, it will wait
until the input length is sufficient to handle the index.
Usage:
```python
# In ASYNC pipeline:
with ForEach(example_gen.outputs['examples']) as each_example:
statistics_gen = StatisticsGen(examples=each_example)
latest_statistics_pair = latest_created(
statistics_gen.outputs['statistics'], n=2
)
validator = DistributionValidator(
baseline_statistics=pick(latest_statistics_pair, 0),
statistics=pick(latest_statistics_pair, 1),
...
)
```
Args:
channel: A channel instance (e.g. `my_component.outputs['x']`).
i: An index to pick. Can be negative.
Returns:
A channel that represents `inputs[i]`.
"""
return _slice(channel, start=i, stop=(i + 1) or None, min_count=1) | Pick an i'th artifact from channel. Like in python, negative indexing is allowed. If the index is out of range, in synchronous pipeline it raises an error and the component would not get executed. In asynchronous pipeline, it will wait until the input length is sufficient to handle the index. Usage: ```python # In ASYNC pipeline: with ForEach(example_gen.outputs['examples']) as each_example: statistics_gen = StatisticsGen(examples=each_example) latest_statistics_pair = latest_created( statistics_gen.outputs['statistics'], n=2 ) validator = DistributionValidator( baseline_statistics=pick(latest_statistics_pair, 0), statistics=pick(latest_statistics_pair, 1), ... ) ``` Args: channel: A channel instance (e.g. `my_component.outputs['x']`). i: An index to pick. Can be negative. Returns: A channel that represents `inputs[i]`. |
166,451 | from typing import Optional, Sequence, Union
from absl import logging
from tfx.dsl.input_resolution import resolver_function
from tfx.dsl.input_resolution.ops import ops
from tfx.types import artifact
from tfx.types import channel as channel_types
def _slice(artifacts, **kwargs):
# It's important to not pass the None value which cannot be serialized to IR.
kwargs = {k: v for k, v in kwargs.items() if v is not None}
return ops.Slice(artifacts, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `slice` function. Write a Python function `def slice( # pylint: disable=redefined-builtin channel: channel_types.BaseChannel, /, start: Optional[int] = None, stop: Optional[int] = None, min_count: Optional[int] = None, )` to solve the following problem:
Pick slice(start, stop) of the input artifacts. Like in python, negative indexing is allowed. If the range is larger than the number of artifacts in the channel, then like in python slice, the result would be truncated to the available values. You can use min_count to ensure the range has enough values. In synchronous pipeline, it is an error if the min_count is not met. In asynchronous pipeline, it will wait until min_count is met. None value in the start or stop index means beginning and the end of the range respectively. For example, `pick_range(x, start=-2, end=None)` means `x[-2:]`. Usage: ```python # In asynchronous pipeline last_week_before_yesterday = pick_range( example_gen.outputs['examples'], start=-7, stop=-1 ) with ForEach(last_week_before_yesterday) as each_example: evaluator = Evaluator(example=each_example, model=latest_model) ``` Args: channel: A channel instance (e.g. `my_component.outputs['x']`). start: A start index (inclusive) of the range. Can be negative. stop: A stop index (exclusive) of the range. Can be negative. min_count: A minimum number of values the range should contain. When specified, synchronous (DAG) pipeline will fail if the min_count is not met. Asynchronous (continuous) pipeine will wait until min_count is met. Returns: A channel that represents `inputs[start:stop]` slice range.
Here is the function:
def slice( # pylint: disable=redefined-builtin
channel: channel_types.BaseChannel,
/,
start: Optional[int] = None,
stop: Optional[int] = None,
min_count: Optional[int] = None,
):
"""Pick slice(start, stop) of the input artifacts.
Like in python, negative indexing is allowed.
If the range is larger than the number of artifacts in the channel, then like
in python slice, the result would be truncated to the available values. You
can use min_count to ensure the range has enough values. In synchronous
pipeline, it is an error if the min_count is not met. In asynchronous
pipeline, it will wait until min_count is met.
None value in the start or stop index means beginning and the end of the range
respectively. For example, `pick_range(x, start=-2, end=None)` means `x[-2:]`.
Usage:
```python
# In asynchronous pipeline
last_week_before_yesterday = pick_range(
example_gen.outputs['examples'], start=-7, stop=-1
)
with ForEach(last_week_before_yesterday) as each_example:
evaluator = Evaluator(example=each_example, model=latest_model)
```
Args:
channel: A channel instance (e.g. `my_component.outputs['x']`).
start: A start index (inclusive) of the range. Can be negative.
stop: A stop index (exclusive) of the range. Can be negative.
min_count: A minimum number of values the range should contain. When
specified, synchronous (DAG) pipeline will fail if the min_count is not
met. Asynchronous (continuous) pipeine will wait until min_count is met.
Returns:
A channel that represents `inputs[start:stop]` slice range.
"""
# Slice(start=None, stop=None) is a no-op and we can return the input as is.
if start is None and stop is None:
return channel
return _slice(channel, start=start, stop=stop, min_count=min_count) | Pick slice(start, stop) of the input artifacts. Like in python, negative indexing is allowed. If the range is larger than the number of artifacts in the channel, then like in python slice, the result would be truncated to the available values. You can use min_count to ensure the range has enough values. In synchronous pipeline, it is an error if the min_count is not met. In asynchronous pipeline, it will wait until min_count is met. None value in the start or stop index means beginning and the end of the range respectively. For example, `pick_range(x, start=-2, end=None)` means `x[-2:]`. Usage: ```python # In asynchronous pipeline last_week_before_yesterday = pick_range( example_gen.outputs['examples'], start=-7, stop=-1 ) with ForEach(last_week_before_yesterday) as each_example: evaluator = Evaluator(example=each_example, model=latest_model) ``` Args: channel: A channel instance (e.g. `my_component.outputs['x']`). start: A start index (inclusive) of the range. Can be negative. stop: A stop index (exclusive) of the range. Can be negative. min_count: A minimum number of values the range should contain. When specified, synchronous (DAG) pipeline will fail if the min_count is not met. Asynchronous (continuous) pipeine will wait until min_count is met. Returns: A channel that represents `inputs[start:stop]` slice range. |
166,452 | from typing import Optional, Sequence, Union
from absl import logging
from tfx.dsl.input_resolution import resolver_function
from tfx.dsl.input_resolution.ops import ops
from tfx.types import artifact
from tfx.types import channel as channel_types
The provided code snippet includes necessary dependencies for implementing the `sliding_window` function. Write a Python function `def sliding_window(channel: channel_types.BaseChannel, window_size: int)` to solve the following problem:
Returns artifacts with a sliding window applied. For example, for a channel with artifacts [A, B, C, D] and window_size = 2, [[A, B], [B, C], [C, D]] will be returned. For Examples artifacts, sequential_rolling_range() should be used instead. Because sliding_window() returns multiple windows, it must be used together with ForEach. Usage: ```python # In ASYNC pipeline: with ForEach( sliding_window(statistics_gen.outputs['statistics'], window_size=2) ) as statistics_pair: distribution_validator = DistributionValidator( baseline_statistics=pick(statistics_pair, 0), statistics=pick(statistics_pair, 1), ... ) ``` Args: channel: A channel instance (e.g. `my_component.outputs['x']`). window_size: The length of the sliding window, must be > 0. Returns: Artifacts with a sliding window applied.
Here is the function:
def sliding_window(channel: channel_types.BaseChannel, window_size: int):
"""Returns artifacts with a sliding window applied.
For example, for a channel with artifacts [A, B, C, D] and window_size = 2,
[[A, B], [B, C], [C, D]] will be returned.
For Examples artifacts, sequential_rolling_range() should be used instead.
Because sliding_window() returns multiple windows, it must be used
together with ForEach.
Usage:
```python
# In ASYNC pipeline:
with ForEach(
sliding_window(statistics_gen.outputs['statistics'], window_size=2)
) as statistics_pair:
distribution_validator = DistributionValidator(
baseline_statistics=pick(statistics_pair, 0),
statistics=pick(statistics_pair, 1),
...
)
```
Args:
channel: A channel instance (e.g. `my_component.outputs['x']`).
window_size: The length of the sliding window, must be > 0.
Returns:
Artifacts with a sliding window applied.
"""
return ops.SlidingWindow(channel, window_size=window_size) | Returns artifacts with a sliding window applied. For example, for a channel with artifacts [A, B, C, D] and window_size = 2, [[A, B], [B, C], [C, D]] will be returned. For Examples artifacts, sequential_rolling_range() should be used instead. Because sliding_window() returns multiple windows, it must be used together with ForEach. Usage: ```python # In ASYNC pipeline: with ForEach( sliding_window(statistics_gen.outputs['statistics'], window_size=2) ) as statistics_pair: distribution_validator = DistributionValidator( baseline_statistics=pick(statistics_pair, 0), statistics=pick(statistics_pair, 1), ... ) ``` Args: channel: A channel instance (e.g. `my_component.outputs['x']`). window_size: The length of the sliding window, must be > 0. Returns: Artifacts with a sliding window applied. |
166,453 | from typing import Optional, Sequence, Union
from absl import logging
from tfx.dsl.input_resolution import resolver_function
from tfx.dsl.input_resolution.ops import ops
from tfx.types import artifact
from tfx.types import channel as channel_types
def _infer_sliding_window_type(channel: channel_types.BaseChannel, **kwargs): # pylint: disable=unused-argument
return {'window': channel.type} | null |
166,454 | import contextlib
import inspect
from typing import Callable, Type, Union, Mapping, Any, Optional, Sequence, cast, overload
from tfx.dsl.control_flow import for_each_internal
from tfx.dsl.input_resolution import resolver_op
from tfx.types import artifact
from tfx.types import channel
from tfx.types import resolved_channel
from tfx.utils import doc_controls
from tfx.utils import typing_utils
_TypeHint = Union[_ArtifactType, _ArtifactTypeMap]
The provided code snippet includes necessary dependencies for implementing the `_default_type_inferrer` function. Write a Python function `def _default_type_inferrer(*args: Any, **kwargs: Any) -> Optional[_TypeHint]` to solve the following problem:
Default _TypeInferrer that mirrors args[0] type.
Here is the function:
def _default_type_inferrer(*args: Any, **kwargs: Any) -> Optional[_TypeHint]:
"""Default _TypeInferrer that mirrors args[0] type."""
del kwargs
if len(args) == 1:
only_arg = args[0]
if typing_utils.is_compatible(only_arg, Mapping[str, channel.BaseChannel]):
return {k: v.type for k, v in only_arg.items()}
if isinstance(only_arg, channel.BaseChannel):
return only_arg.type
return None | Default _TypeInferrer that mirrors args[0] type. |
166,455 | import contextlib
import inspect
from typing import Callable, Type, Union, Mapping, Any, Optional, Sequence, cast, overload
from tfx.dsl.control_flow import for_each_internal
from tfx.dsl.input_resolution import resolver_op
from tfx.types import artifact
from tfx.types import channel
from tfx.types import resolved_channel
from tfx.utils import doc_controls
from tfx.utils import typing_utils
class ResolverFunction:
def __init__(
self,
f: Callable[..., resolver_op.Node],
*,
output_type: Optional[_TypeHint] = None,
output_type_inferrer: _TypeInferFn = _default_type_inferrer,
loopable_transform: Optional[_LoopableTransformFn] = None,
):
def given_output_type(self, output_type: _TypeHint) -> 'ResolverFunction':
def given_invocation(
self,
f: Callable[..., Any],
*,
args: Sequence[Any],
kwargs: Mapping[str, Any],
) -> 'ResolverFunction':
def _try_convert_to_node(value: Any) -> Any:
def output_type_inferrer(self, f: _TypeInferFn) -> _TypeInferFn:
def __call__(self, *args, **kwargs):
def loop_var_factory(for_each_context: for_each_internal.ForEachContext):
def trace(self, *args: resolver_op.Node, **kwargs: Any) -> resolver_op.Node:
def resolver_function(
f: Callable[..., resolver_op.OpNode],
) -> ResolverFunction:
... | null |
166,456 | import contextlib
import inspect
from typing import Callable, Type, Union, Mapping, Any, Optional, Sequence, cast, overload
from tfx.dsl.control_flow import for_each_internal
from tfx.dsl.input_resolution import resolver_op
from tfx.types import artifact
from tfx.types import channel
from tfx.types import resolved_channel
from tfx.utils import doc_controls
from tfx.utils import typing_utils
_TypeHint = Union[_ArtifactType, _ArtifactTypeMap]
class ResolverFunction:
"""ResolverFunction represents a traceable function of resolver operators.
Resolver function returns some form of channel depending on the function
definition.
It can return a single channel:
trainer = Trainer(
examples=latest_created(example_gen.outputs['examples']))
or a dictionary of channels:
k_fold_inputs = k_fold(example_gen.outputs['examples'], splits=5)
trainer = Trainer(
examples=k_fold_inputs['train'],
)
evaluator = Evaluator(
examples=k_fold_inputs['eval'],
model=trainer.outputs['model'],
)
or a ForEach-loopable channels:
with ForEach(
tfx.dsl.inputs.sequential_rolling_range(
example_gens.outputs['examples'], n=3)) as train_window:
trainer = Trainer(
examples=train_window['examples'])
"""
def __init__(
self,
f: Callable[..., resolver_op.Node],
*,
output_type: Optional[_TypeHint] = None,
output_type_inferrer: _TypeInferFn = _default_type_inferrer,
loopable_transform: Optional[_LoopableTransformFn] = None,
):
"""Constructor.
Args:
f: A python function consists of ResolverOp invocations.
output_type: Static output type, either a single ArtifactType or a
dict[str, ArtifactType]. If output_type is not given,
output_type_inferrer will be used to infer the output type.
output_type_inferrer: An output type inferrer function, which takes the
same arguments as the resolver function and returns the output_type. If
not given, default inferrer (which mirrors the args[0] type) would be
used.
loopable_transform: If the resolver function returns
ARTIFACT_MULTIMAP_LIST, resolver function invocation returns a loopable
value (the type that can be used with ForEach). This transform function
is applied to the `ForEach` with clause target value (often called loop
variable) so that the `as` variables are more easy-to-use. For example,
one can automatically unwrap the dict key if the dict key is
internal-only value and would not be exposed to the user side, using
`loopable_transform = lambda d: d[key]`.
"""
# This instance is a decorated callable so it should reuse the decorated
# function's system attributes.
if hasattr(f, '__name__'):
self.__name__ = f.__name__
if hasattr(f, '__qualname__'):
self.__qualname__ = f.__qualname__
if hasattr(f, '__module__'):
self.__module__ = f.__module__
if hasattr(f, '__doc__'):
self.__doc__ = f.__doc__
self.__signature__ = inspect.signature(f)
self.__wrapped__ = f
self._output_type = output_type
self._output_type_inferrer = output_type_inferrer
self._loopable_transform = loopable_transform
self._invocation = None
def given_output_type(self, output_type: _TypeHint) -> 'ResolverFunction':
"""Temporarily patches output_type."""
if not typing_utils.is_compatible(output_type, _TypeHint):
raise ValueError(
f'Invalid output_type: {output_type}, should be {_TypeHint}.'
)
original = self._output_type
try:
self._output_type = output_type
yield self
finally:
self._output_type = original
def given_invocation(
self,
f: Callable[..., Any],
*,
args: Sequence[Any],
kwargs: Mapping[str, Any],
) -> 'ResolverFunction':
"""Temporarily patches Invocation."""
invocation = resolved_channel.Invocation(
function=f, args=args, kwargs=kwargs
)
if self._invocation is not None:
raise RuntimeError(f'{self.__name__} has already given an invocation.')
self._invocation = invocation
try:
yield self
finally:
self._invocation = None
def _try_convert_to_node(value: Any) -> Any:
"""Try converting python value to resolver_op.Node."""
if isinstance(value, channel.BaseChannel):
return resolver_op.InputNode(value)
if typing_utils.is_compatible(value, Mapping[str, channel.BaseChannel]):
return resolver_op.DictNode(
{
input_key: resolver_op.InputNode(input_channel)
for input_key, input_channel in value.items()
}
)
return value
def output_type_inferrer(self, f: _TypeInferFn) -> _TypeInferFn:
"""Decorator to register resolver function type inferrer.
Usage:
def latest(channel):
...
def latest_type(channel):
return channel.type
Args:
f: A type inference function to decorate.
Returns:
The given function.
"""
self._output_type_inferrer = f
return f
def __call__(self, *args, **kwargs):
"""Invoke a resolver function.
This would trace the @resolver_function with given arguments. BaseChannel
argument is converted to InputNode for tracing. Return value depends on the
actual return value of the @resolver_function.
* If the function returns ARTIFACT_LIST type, __call__ returns a BaseChannel
instance that can be used as a component inputs.
* If the function returns ARTIFACT_MULTIMAP type, __call__ returns a
Mapping[str, BaseChannel].
* If the function returns ARTIFACT_MULTIMAP_LIST, then __call__ returns a
intermediate object that can be unwrapped to Mapping[str, BaseChannel] with
ForEach context manager.
Args:
*args: Arguments to the wrapped function.
**kwargs: Keyword arguments to the wrapped function.
Raises:
RuntimeError: if output_type is invalid or unset.
Returns:
Resolver function result as a BaseChannels.
"""
output_type = self._output_type or (
self._output_type_inferrer(*args, **kwargs)
)
if output_type is None:
raise RuntimeError(
'Unable to infer output type. Please use '
'resolver_function.with_output_type()'
)
args = [self._try_convert_to_node(v) for v in args]
kwargs = {k: self._try_convert_to_node(v) for k, v in kwargs.items()}
out = self.trace(*args, **kwargs)
invocation = self._invocation or resolved_channel.Invocation(
function=self, args=args, kwargs=kwargs
)
if out.output_data_type == resolver_op.DataType.ARTIFACT_LIST:
if self._loopable_transform is not None:
raise TypeError(
'loopable_transform is not applicable for ARTIFACT_LIST output'
)
if not typing_utils.is_compatible(output_type, _ArtifactType):
raise RuntimeError(
f'Invalid output_type {output_type}. Expected {_ArtifactType}'
)
output_type = cast(_ArtifactType, output_type)
return resolved_channel.ResolvedChannel(
artifact_type=output_type, output_node=out, invocation=invocation
)
if out.output_data_type == resolver_op.DataType.ARTIFACT_MULTIMAP:
if self._loopable_transform is not None:
raise TypeError(
'loopable_transform is not applicable for ARTIFACT_MULTIMAP output'
)
if not typing_utils.is_compatible(output_type, _ArtifactTypeMap):
raise RuntimeError(
f'Invalid output_type {output_type}. Expected {_ArtifactTypeMap}'
)
output_type = cast(_ArtifactTypeMap, output_type)
result = {}
for key, artifact_type in output_type.items():
result[key] = resolved_channel.ResolvedChannel(
artifact_type=artifact_type,
output_node=out,
output_key=key,
invocation=invocation,
)
return result
if out.output_data_type == resolver_op.DataType.ARTIFACT_MULTIMAP_LIST:
if not typing_utils.is_compatible(output_type, _ArtifactTypeMap):
raise RuntimeError(
f'Invalid output_type {output_type}. Expected {_ArtifactTypeMap}'
)
def loop_var_factory(for_each_context: for_each_internal.ForEachContext):
result = {}
for key, artifact_type in output_type.items():
result[key] = resolved_channel.ResolvedChannel(
artifact_type=artifact_type,
output_node=out,
output_key=key,
invocation=invocation,
for_each_context=for_each_context,
)
if self._loopable_transform:
result = self._loopable_transform(result)
return result
return for_each_internal.Loopable(loop_var_factory)
# TODO(b/236140660): Make trace() private and only use __call__.
def trace(self, *args: resolver_op.Node, **kwargs: Any) -> resolver_op.Node:
"""Trace resolver function with given node arguments.
Do not call this function directly; Use __call__ only.
Tracing happens by substituting the input arguments (from BaseChannel to
InputNode) and calling the inner python function. Traced result is the
return value of the inner python function. Since ResolverOp invocation
stores all the input arguments (which originated from InputNode), we can
analyze the full ResolverOp invocation graph from the return value.
Trace happens only once during the resolver function invocation. Traced
resolver function (which is a resolver_op.Node) is serialized to the
pipeline IR during compilation (i.e. inner python function is serialized),
and the inner python function is not invoked again on IR interpretation.
Args:
*args: Substituted arguments to the resolver function.
**kwargs: Substitued keyword arguments to the resolver function.
Raises:
RuntimeError: if the tracing fails.
Returns:
A traced result, which is a resolver_op.Node.
"""
# TODO(b/188023509): Better debug support & error message.
result = self.__wrapped__(*args, **kwargs)
if typing_utils.is_compatible(result, Mapping[str, resolver_op.Node]):
result = resolver_op.DictNode(
cast(Mapping[str, resolver_op.Node], result)
)
if not isinstance(result, resolver_op.Node):
raise RuntimeError(
f'Invalid resolver function trace result {result}. Expected to '
'return an output of ResolverOp or a dict of outputs.'
)
return result
def resolver_function(
*,
output_type: Optional[_TypeHint] = None,
unwrap_dict_key: Optional[Union[str, Sequence[str]]] = None,
) -> Callable[..., ResolverFunction]:
... | null |
166,457 | import contextlib
import inspect
from typing import Callable, Type, Union, Mapping, Any, Optional, Sequence, cast, overload
from tfx.dsl.control_flow import for_each_internal
from tfx.dsl.input_resolution import resolver_op
from tfx.types import artifact
from tfx.types import channel
from tfx.types import resolved_channel
from tfx.utils import doc_controls
from tfx.utils import typing_utils
_TypeHint = Union[_ArtifactType, _ArtifactTypeMap]
class ResolverFunction:
"""ResolverFunction represents a traceable function of resolver operators.
Resolver function returns some form of channel depending on the function
definition.
It can return a single channel:
trainer = Trainer(
examples=latest_created(example_gen.outputs['examples']))
or a dictionary of channels:
k_fold_inputs = k_fold(example_gen.outputs['examples'], splits=5)
trainer = Trainer(
examples=k_fold_inputs['train'],
)
evaluator = Evaluator(
examples=k_fold_inputs['eval'],
model=trainer.outputs['model'],
)
or a ForEach-loopable channels:
with ForEach(
tfx.dsl.inputs.sequential_rolling_range(
example_gens.outputs['examples'], n=3)) as train_window:
trainer = Trainer(
examples=train_window['examples'])
"""
def __init__(
self,
f: Callable[..., resolver_op.Node],
*,
output_type: Optional[_TypeHint] = None,
output_type_inferrer: _TypeInferFn = _default_type_inferrer,
loopable_transform: Optional[_LoopableTransformFn] = None,
):
"""Constructor.
Args:
f: A python function consists of ResolverOp invocations.
output_type: Static output type, either a single ArtifactType or a
dict[str, ArtifactType]. If output_type is not given,
output_type_inferrer will be used to infer the output type.
output_type_inferrer: An output type inferrer function, which takes the
same arguments as the resolver function and returns the output_type. If
not given, default inferrer (which mirrors the args[0] type) would be
used.
loopable_transform: If the resolver function returns
ARTIFACT_MULTIMAP_LIST, resolver function invocation returns a loopable
value (the type that can be used with ForEach). This transform function
is applied to the `ForEach` with clause target value (often called loop
variable) so that the `as` variables are more easy-to-use. For example,
one can automatically unwrap the dict key if the dict key is
internal-only value and would not be exposed to the user side, using
`loopable_transform = lambda d: d[key]`.
"""
# This instance is a decorated callable so it should reuse the decorated
# function's system attributes.
if hasattr(f, '__name__'):
self.__name__ = f.__name__
if hasattr(f, '__qualname__'):
self.__qualname__ = f.__qualname__
if hasattr(f, '__module__'):
self.__module__ = f.__module__
if hasattr(f, '__doc__'):
self.__doc__ = f.__doc__
self.__signature__ = inspect.signature(f)
self.__wrapped__ = f
self._output_type = output_type
self._output_type_inferrer = output_type_inferrer
self._loopable_transform = loopable_transform
self._invocation = None
def given_output_type(self, output_type: _TypeHint) -> 'ResolverFunction':
"""Temporarily patches output_type."""
if not typing_utils.is_compatible(output_type, _TypeHint):
raise ValueError(
f'Invalid output_type: {output_type}, should be {_TypeHint}.'
)
original = self._output_type
try:
self._output_type = output_type
yield self
finally:
self._output_type = original
def given_invocation(
self,
f: Callable[..., Any],
*,
args: Sequence[Any],
kwargs: Mapping[str, Any],
) -> 'ResolverFunction':
"""Temporarily patches Invocation."""
invocation = resolved_channel.Invocation(
function=f, args=args, kwargs=kwargs
)
if self._invocation is not None:
raise RuntimeError(f'{self.__name__} has already given an invocation.')
self._invocation = invocation
try:
yield self
finally:
self._invocation = None
def _try_convert_to_node(value: Any) -> Any:
"""Try converting python value to resolver_op.Node."""
if isinstance(value, channel.BaseChannel):
return resolver_op.InputNode(value)
if typing_utils.is_compatible(value, Mapping[str, channel.BaseChannel]):
return resolver_op.DictNode(
{
input_key: resolver_op.InputNode(input_channel)
for input_key, input_channel in value.items()
}
)
return value
def output_type_inferrer(self, f: _TypeInferFn) -> _TypeInferFn:
"""Decorator to register resolver function type inferrer.
Usage:
def latest(channel):
...
def latest_type(channel):
return channel.type
Args:
f: A type inference function to decorate.
Returns:
The given function.
"""
self._output_type_inferrer = f
return f
def __call__(self, *args, **kwargs):
"""Invoke a resolver function.
This would trace the @resolver_function with given arguments. BaseChannel
argument is converted to InputNode for tracing. Return value depends on the
actual return value of the @resolver_function.
* If the function returns ARTIFACT_LIST type, __call__ returns a BaseChannel
instance that can be used as a component inputs.
* If the function returns ARTIFACT_MULTIMAP type, __call__ returns a
Mapping[str, BaseChannel].
* If the function returns ARTIFACT_MULTIMAP_LIST, then __call__ returns a
intermediate object that can be unwrapped to Mapping[str, BaseChannel] with
ForEach context manager.
Args:
*args: Arguments to the wrapped function.
**kwargs: Keyword arguments to the wrapped function.
Raises:
RuntimeError: if output_type is invalid or unset.
Returns:
Resolver function result as a BaseChannels.
"""
output_type = self._output_type or (
self._output_type_inferrer(*args, **kwargs)
)
if output_type is None:
raise RuntimeError(
'Unable to infer output type. Please use '
'resolver_function.with_output_type()'
)
args = [self._try_convert_to_node(v) for v in args]
kwargs = {k: self._try_convert_to_node(v) for k, v in kwargs.items()}
out = self.trace(*args, **kwargs)
invocation = self._invocation or resolved_channel.Invocation(
function=self, args=args, kwargs=kwargs
)
if out.output_data_type == resolver_op.DataType.ARTIFACT_LIST:
if self._loopable_transform is not None:
raise TypeError(
'loopable_transform is not applicable for ARTIFACT_LIST output'
)
if not typing_utils.is_compatible(output_type, _ArtifactType):
raise RuntimeError(
f'Invalid output_type {output_type}. Expected {_ArtifactType}'
)
output_type = cast(_ArtifactType, output_type)
return resolved_channel.ResolvedChannel(
artifact_type=output_type, output_node=out, invocation=invocation
)
if out.output_data_type == resolver_op.DataType.ARTIFACT_MULTIMAP:
if self._loopable_transform is not None:
raise TypeError(
'loopable_transform is not applicable for ARTIFACT_MULTIMAP output'
)
if not typing_utils.is_compatible(output_type, _ArtifactTypeMap):
raise RuntimeError(
f'Invalid output_type {output_type}. Expected {_ArtifactTypeMap}'
)
output_type = cast(_ArtifactTypeMap, output_type)
result = {}
for key, artifact_type in output_type.items():
result[key] = resolved_channel.ResolvedChannel(
artifact_type=artifact_type,
output_node=out,
output_key=key,
invocation=invocation,
)
return result
if out.output_data_type == resolver_op.DataType.ARTIFACT_MULTIMAP_LIST:
if not typing_utils.is_compatible(output_type, _ArtifactTypeMap):
raise RuntimeError(
f'Invalid output_type {output_type}. Expected {_ArtifactTypeMap}'
)
def loop_var_factory(for_each_context: for_each_internal.ForEachContext):
result = {}
for key, artifact_type in output_type.items():
result[key] = resolved_channel.ResolvedChannel(
artifact_type=artifact_type,
output_node=out,
output_key=key,
invocation=invocation,
for_each_context=for_each_context,
)
if self._loopable_transform:
result = self._loopable_transform(result)
return result
return for_each_internal.Loopable(loop_var_factory)
# TODO(b/236140660): Make trace() private and only use __call__.
def trace(self, *args: resolver_op.Node, **kwargs: Any) -> resolver_op.Node:
"""Trace resolver function with given node arguments.
Do not call this function directly; Use __call__ only.
Tracing happens by substituting the input arguments (from BaseChannel to
InputNode) and calling the inner python function. Traced result is the
return value of the inner python function. Since ResolverOp invocation
stores all the input arguments (which originated from InputNode), we can
analyze the full ResolverOp invocation graph from the return value.
Trace happens only once during the resolver function invocation. Traced
resolver function (which is a resolver_op.Node) is serialized to the
pipeline IR during compilation (i.e. inner python function is serialized),
and the inner python function is not invoked again on IR interpretation.
Args:
*args: Substituted arguments to the resolver function.
**kwargs: Substitued keyword arguments to the resolver function.
Raises:
RuntimeError: if the tracing fails.
Returns:
A traced result, which is a resolver_op.Node.
"""
# TODO(b/188023509): Better debug support & error message.
result = self.__wrapped__(*args, **kwargs)
if typing_utils.is_compatible(result, Mapping[str, resolver_op.Node]):
result = resolver_op.DictNode(
cast(Mapping[str, resolver_op.Node], result)
)
if not isinstance(result, resolver_op.Node):
raise RuntimeError(
f'Invalid resolver function trace result {result}. Expected to '
'return an output of ResolverOp or a dict of outputs.'
)
return result
The provided code snippet includes necessary dependencies for implementing the `resolver_function` function. Write a Python function `def resolver_function( f: Optional[Callable[..., resolver_op.OpNode]] = None, *, output_type: Optional[_TypeHint] = None, unwrap_dict_key: Optional[Union[str, Sequence[str]]] = None, )` to solve the following problem:
Decorator for the resolver function. Args: f: Python function to decorate. See the usage at canned_resolver_function.py output_type: Optional static output type hint. unwrap_dict_key: If present, it will add loopable transform that unwraps dictionary key(s) so that `ForEach` captured value is a single channel, or a tuple of channels. This is only valid if the resolver function return value is ARTIFACT_MULTIMAP_LIST type. Returns: A ResolverFunction, or a decorator to create ResolverFunction.
Here is the function:
def resolver_function(
f: Optional[Callable[..., resolver_op.OpNode]] = None,
*,
output_type: Optional[_TypeHint] = None,
unwrap_dict_key: Optional[Union[str, Sequence[str]]] = None,
):
"""Decorator for the resolver function.
Args:
f: Python function to decorate. See the usage at canned_resolver_function.py
output_type: Optional static output type hint.
unwrap_dict_key: If present, it will add loopable transform that unwraps
dictionary key(s) so that `ForEach` captured value is a single channel, or
a tuple of channels. This is only valid if the resolver function return
value is ARTIFACT_MULTIMAP_LIST type.
Returns:
A ResolverFunction, or a decorator to create ResolverFunction.
"""
if f is not None:
return ResolverFunction(f)
loopable_transform = None
if unwrap_dict_key:
if isinstance(unwrap_dict_key, str):
key = cast(str, unwrap_dict_key)
loopable_transform = lambda d: d[key]
elif typing_utils.is_compatible(unwrap_dict_key, Sequence[str]):
keys = cast(Sequence[str], unwrap_dict_key)
loopable_transform = lambda d: tuple(d[key] for key in keys)
else:
raise ValueError(
'Invalid unwrap_dict_key: Expected str or Sequence[str] but got '
f'{unwrap_dict_key}'
)
def decorator(f):
return ResolverFunction(
f,
output_type=output_type,
loopable_transform=loopable_transform,
)
return decorator | Decorator for the resolver function. Args: f: Python function to decorate. See the usage at canned_resolver_function.py output_type: Optional static output type hint. unwrap_dict_key: If present, it will add loopable transform that unwraps dictionary key(s) so that `ForEach` captured value is a single channel, or a tuple of channels. This is only valid if the resolver function return value is ARTIFACT_MULTIMAP_LIST type. Returns: A ResolverFunction, or a decorator to create ResolverFunction. |
166,458 | import inspect
from typing import Any, Dict, Generic, List, Type, TypeVar, Union, get_args, get_origin
from tfx.dsl.component.experimental import json_compat
from tfx.types import artifact
from tfx.utils import deprecation_utils
The provided code snippet includes necessary dependencies for implementing the `_check_valid_input_artifact_params` function. Write a Python function `def _check_valid_input_artifact_params(params)` to solve the following problem:
Check if the annotation params is an Artifact or a List[Artifact].
Here is the function:
def _check_valid_input_artifact_params(params):
"""Check if the annotation params is an Artifact or a List[Artifact]."""
# If the typehint is List[Artifact], unwrap it.
if params is list or get_origin(params) is list:
generic_arg = get_args(params)
if generic_arg is not None and len(generic_arg) == 1:
params = generic_arg[0]
else:
return False
if (
inspect.isclass(params)
and issubclass(params, artifact.Artifact)
and hasattr(params, 'TYPE_NAME')
):
return True
else:
return False | Check if the annotation params is an Artifact or a List[Artifact]. |
166,459 | import copy
import functools
import types
import typing
from typing import Any, Callable, ClassVar, Dict, List, Optional, Protocol, Type, Union
from tfx import types as tfx_types
from tfx.dsl.component.experimental import function_parser
from tfx.dsl.component.experimental import json_compat
from tfx.dsl.component.experimental import utils
from tfx.dsl.components.base import base_beam_component
from tfx.dsl.components.base import base_beam_executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_executor
from tfx.dsl.components.base import executor_spec
from tfx.types import channel
from tfx.types import system_executions
The provided code snippet includes necessary dependencies for implementing the `_extract_func_args` function. Write a Python function `def _extract_func_args( obj: str, arg_formats: Dict[str, int], arg_defaults: Dict[str, Any], input_dict: Dict[str, List[tfx_types.Artifact]], output_dict: Dict[str, List[tfx_types.Artifact]], exec_properties: Dict[str, Any], beam_pipeline: Optional[_BeamPipeline] = None, ) -> Dict[str, Any]` to solve the following problem:
Extracts function arguments for the decorated function.
Here is the function:
def _extract_func_args(
obj: str,
arg_formats: Dict[str, int],
arg_defaults: Dict[str, Any],
input_dict: Dict[str, List[tfx_types.Artifact]],
output_dict: Dict[str, List[tfx_types.Artifact]],
exec_properties: Dict[str, Any],
beam_pipeline: Optional[_BeamPipeline] = None,
) -> Dict[str, Any]:
"""Extracts function arguments for the decorated function."""
result = {}
for name, arg_format in arg_formats.items():
if arg_format == utils.ArgFormats.INPUT_ARTIFACT:
input_list = input_dict.get(name, [])
if len(input_list) == 1:
result[name] = input_list[0]
elif not input_list and name in arg_defaults:
# Do not pass the missing optional input.
pass
else:
raise ValueError(
('Expected input %r to %s to be a singleton ValueArtifact channel '
'(got %s instead).') % (name, obj, input_list))
elif arg_format == utils.ArgFormats.LIST_INPUT_ARTIFACTS:
result[name] = input_dict.get(name, [])
elif arg_format == utils.ArgFormats.OUTPUT_ARTIFACT:
output_list = output_dict.get(name, [])
if len(output_list) == 1:
result[name] = output_list[0]
else:
raise ValueError(
('Expected output %r to %s to be a singleton ValueArtifact channel '
'(got %s instead).') % (name, obj, output_list))
elif arg_format == utils.ArgFormats.ARTIFACT_VALUE:
input_list = input_dict.get(name, [])
if len(input_list) == 1:
result[name] = input_list[0].value
elif not input_list and name in arg_defaults:
# Do not pass the missing optional input.
pass
else:
raise ValueError(
('Expected input %r to %s to be a singleton ValueArtifact channel '
'(got %s instead).') % (name, obj, input_list))
elif arg_format == utils.ArgFormats.PARAMETER:
if name in exec_properties:
result[name] = exec_properties[name]
elif name in arg_defaults:
# Do not pass the missing optional input.
pass
else:
raise ValueError(
('Expected non-optional parameter %r of %s to be provided, but no '
'value was passed.') % (name, obj))
elif arg_format == utils.ArgFormats.BEAM_PARAMETER:
result[name] = beam_pipeline
if name in arg_defaults and arg_defaults[name] is not None:
raise ValueError('beam Pipeline parameter does not allow default ',
'value other than None.')
else:
raise ValueError('Unknown argument format: %r' % (arg_format,))
return result | Extracts function arguments for the decorated function. |
166,460 | import copy
import functools
import types
import typing
from typing import Any, Callable, ClassVar, Dict, List, Optional, Protocol, Type, Union
from tfx import types as tfx_types
from tfx.dsl.component.experimental import function_parser
from tfx.dsl.component.experimental import json_compat
from tfx.dsl.component.experimental import utils
from tfx.dsl.components.base import base_beam_component
from tfx.dsl.components.base import base_beam_executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_executor
from tfx.dsl.components.base import executor_spec
from tfx.types import channel
from tfx.types import system_executions
The provided code snippet includes necessary dependencies for implementing the `_assign_returned_values` function. Write a Python function `def _assign_returned_values( function, outputs: Dict[str, Any], returned_values: Dict[str, Any], output_dict: Dict[str, List[tfx_types.Artifact]], json_typehints: Dict[str, Type], # pylint: disable=g-bare-generic ) -> Dict[str, List[tfx_types.Artifact]]` to solve the following problem:
Validates and assigns the outputs to the output_dict.
Here is the function:
def _assign_returned_values(
function,
outputs: Dict[str, Any],
returned_values: Dict[str, Any],
output_dict: Dict[str, List[tfx_types.Artifact]],
json_typehints: Dict[str, Type], # pylint: disable=g-bare-generic
) -> Dict[str, List[tfx_types.Artifact]]:
"""Validates and assigns the outputs to the output_dict."""
result = copy.deepcopy(output_dict)
if not isinstance(outputs, dict):
raise ValueError(
('Expected component executor function %s to return a dict of '
'outputs (got %r instead).') % (function, outputs))
# Assign returned ValueArtifact values.
for name, is_optional in returned_values.items():
if name not in outputs:
raise ValueError(
'Did not receive expected output %r as return value from '
'component executor function %s.' % (name, function))
if not is_optional and outputs[name] is None:
raise ValueError('Non-nullable output %r received None return value from '
'component executor function %s.' % (name, function))
try:
result[name][0].value = outputs[name]
except TypeError as e:
raise TypeError(
('Return value %r for output %r is incompatible with output type '
'%r.') %
(outputs[name], name, result[name][0].__class__)) from e
# Handle JsonValue runtime type check.
if name in json_typehints:
ret = json_compat.check_strict_json_compat(outputs[name],
json_typehints[name])
if not ret:
raise TypeError(
('Return value %r for output %r is incompatible with output type '
'%r.') % (outputs[name], name, json_typehints[name]))
return result | Validates and assigns the outputs to the output_dict. |
166,461 | from typing import Union, cast, Any
from tfx.dsl.context_managers import dsl_context_manager
from tfx.dsl.control_flow import for_each_internal
from tfx.dsl.input_resolution import resolver_function
from tfx.dsl.input_resolution.ops import ops
from tfx.types import channel as channel_types
def _for_each_impl(channel: channel_types.BaseChannel):
return ops.Unnest({'out': channel}, key='out') | null |
166,462 | from typing import Union, cast, Any
from tfx.dsl.context_managers import dsl_context_manager
from tfx.dsl.control_flow import for_each_internal
from tfx.dsl.input_resolution import resolver_function
from tfx.dsl.input_resolution.ops import ops
from tfx.types import channel as channel_types
def _for_each_output_type(channel: channel_types.BaseChannel):
return {'out': channel.type} | null |
166,463 | import itertools
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Type, cast
from tfx import types
from tfx.dsl.compiler import compiler_context
from tfx.dsl.compiler import compiler_utils
from tfx.dsl.compiler import constants
from tfx.dsl.compiler import node_execution_options_utils
from tfx.dsl.compiler import node_inputs_compiler
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_node
from tfx.dsl.experimental.node_execution_options import utils as execution_options_utils
from tfx.dsl.placeholder import placeholder
from tfx.orchestration import data_types
from tfx.orchestration import data_types_utils
from tfx.orchestration import pipeline
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import channel as channel_types
from tfx.types import channel_utils
from tfx.utils import deprecation_utils
from tfx.utils import name_utils
def _fully_qualified_name(cls: Type[Any]):
cls = deprecation_utils.get_first_nondeprecated_class(cls)
return name_utils.get_full_name(cls) | null |
166,464 | import itertools
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Type, cast
from tfx import types
from tfx.dsl.compiler import compiler_context
from tfx.dsl.compiler import compiler_utils
from tfx.dsl.compiler import constants
from tfx.dsl.compiler import node_execution_options_utils
from tfx.dsl.compiler import node_inputs_compiler
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_node
from tfx.dsl.experimental.node_execution_options import utils as execution_options_utils
from tfx.dsl.placeholder import placeholder
from tfx.orchestration import data_types
from tfx.orchestration import data_types_utils
from tfx.orchestration import pipeline
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import channel as channel_types
from tfx.types import channel_utils
from tfx.utils import deprecation_utils
from tfx.utils import name_utils
The provided code snippet includes necessary dependencies for implementing the `_validate_pipeline` function. Write a Python function `def _validate_pipeline(tfx_pipeline: pipeline.Pipeline, parent_pipelines: List[pipeline.Pipeline])` to solve the following problem:
Performs pre-compile validations.
Here is the function:
def _validate_pipeline(tfx_pipeline: pipeline.Pipeline,
parent_pipelines: List[pipeline.Pipeline]):
"""Performs pre-compile validations."""
if tfx_pipeline.execution_mode == pipeline.ExecutionMode.ASYNC:
if compiler_utils.has_task_dependency(tfx_pipeline):
raise ValueError("Task dependency is not supported in ASYNC mode.")
if compiler_utils.has_resolver_node(tfx_pipeline):
raise ValueError(
"Resolver nodes can not be used in ASYNC mode. Use resolver "
"functions instead."
)
if not compiler_utils.ensure_topological_order(tfx_pipeline.components):
raise ValueError("Pipeline components are not topologically sorted.")
if (
parent_pipelines
and tfx_pipeline.execution_mode != pipeline.ExecutionMode.SYNC
):
raise ValueError("Subpipeline has to be Sync execution mode.") | Performs pre-compile validations. |
166,465 | import itertools
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Type, cast
from tfx import types
from tfx.dsl.compiler import compiler_context
from tfx.dsl.compiler import compiler_utils
from tfx.dsl.compiler import constants
from tfx.dsl.compiler import node_execution_options_utils
from tfx.dsl.compiler import node_inputs_compiler
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_node
from tfx.dsl.experimental.node_execution_options import utils as execution_options_utils
from tfx.dsl.placeholder import placeholder
from tfx.orchestration import data_types
from tfx.orchestration import data_types_utils
from tfx.orchestration import pipeline
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import channel as channel_types
from tfx.types import channel_utils
from tfx.utils import deprecation_utils
from tfx.utils import name_utils
The provided code snippet includes necessary dependencies for implementing the `_set_node_context` function. Write a Python function `def _set_node_context(node: pipeline_pb2.PipelineNode, pipeline_ctx: compiler_context.PipelineContext)` to solve the following problem:
Compiles the node contexts of a pipeline node.
Here is the function:
def _set_node_context(node: pipeline_pb2.PipelineNode,
pipeline_ctx: compiler_context.PipelineContext):
"""Compiles the node contexts of a pipeline node."""
# Context for the pipeline, across pipeline runs.
pipeline_context_pb = node.contexts.contexts.add()
pipeline_context_pb.type.name = constants.PIPELINE_CONTEXT_TYPE_NAME
pipeline_context_pb.name.field_value.string_value = (
pipeline_ctx.pipeline_info.pipeline_context_name)
# Context for the current pipeline run.
if pipeline_ctx.is_sync_mode:
pipeline_run_context_pb = node.contexts.contexts.add()
pipeline_run_context_pb.type.name = constants.PIPELINE_RUN_CONTEXT_TYPE_NAME
# TODO(kennethyang): Miragte pipeline run id to structural_runtime_parameter
# To keep existing IR textprotos used in tests unchanged, we only use
# structural_runtime_parameter for subpipelines. After the subpipeline being
# implemented, we will need to migrate normal pipelines to
# structural_runtime_parameter as well for consistency. Similar for below.
if pipeline_ctx.is_subpipeline:
compiler_utils.set_structural_runtime_parameter_pb(
pipeline_run_context_pb.name.structural_runtime_parameter, [
f"{pipeline_ctx.pipeline_info.pipeline_context_name}_",
(constants.PIPELINE_RUN_ID_PARAMETER_NAME, str)
])
else:
compiler_utils.set_runtime_parameter_pb(
pipeline_run_context_pb.name.runtime_parameter,
constants.PIPELINE_RUN_ID_PARAMETER_NAME, str)
# Contexts inherited from the parent pipelines.
for i, parent_pipeline in enumerate(pipeline_ctx.parent_pipelines[::-1]):
parent_pipeline_context_pb = node.contexts.contexts.add()
parent_pipeline_context_pb.type.name = constants.PIPELINE_CONTEXT_TYPE_NAME
parent_pipeline_context_pb.name.field_value.string_value = (
parent_pipeline.pipeline_info.pipeline_context_name)
if parent_pipeline.execution_mode == pipeline.ExecutionMode.SYNC:
pipeline_run_context_pb = node.contexts.contexts.add()
pipeline_run_context_pb.type.name = (
constants.PIPELINE_RUN_CONTEXT_TYPE_NAME)
# TODO(kennethyang): Miragte pipeline run id to structural runtime
# parameter for the similar reason mentioned above.
# Use structural runtime parameter to represent pipeline_run_id except
# for the root level pipeline, for backward compatibility.
if i == len(pipeline_ctx.parent_pipelines) - 1:
compiler_utils.set_runtime_parameter_pb(
pipeline_run_context_pb.name.runtime_parameter,
constants.PIPELINE_RUN_ID_PARAMETER_NAME, str)
else:
compiler_utils.set_structural_runtime_parameter_pb(
pipeline_run_context_pb.name.structural_runtime_parameter, [
f"{parent_pipeline.pipeline_info.pipeline_context_name}_",
(constants.PIPELINE_RUN_ID_PARAMETER_NAME, str)
])
# Context for the node, across pipeline runs.
node_context_pb = node.contexts.contexts.add()
node_context_pb.type.name = constants.NODE_CONTEXT_TYPE_NAME
node_context_pb.name.field_value.string_value = (
compiler_utils.node_context_name(
pipeline_ctx.pipeline_info.pipeline_context_name,
node.node_info.id)) | Compiles the node contexts of a pipeline node. |
166,466 | import itertools
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Type, cast
from tfx import types
from tfx.dsl.compiler import compiler_context
from tfx.dsl.compiler import compiler_utils
from tfx.dsl.compiler import constants
from tfx.dsl.compiler import node_execution_options_utils
from tfx.dsl.compiler import node_inputs_compiler
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_node
from tfx.dsl.experimental.node_execution_options import utils as execution_options_utils
from tfx.dsl.placeholder import placeholder
from tfx.orchestration import data_types
from tfx.orchestration import data_types_utils
from tfx.orchestration import pipeline
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import channel as channel_types
from tfx.types import channel_utils
from tfx.utils import deprecation_utils
from tfx.utils import name_utils
The provided code snippet includes necessary dependencies for implementing the `_set_node_outputs` function. Write a Python function `def _set_node_outputs(node: pipeline_pb2.PipelineNode, tfx_node_outputs: Dict[str, types.Channel])` to solve the following problem:
Compiles the outputs of a pipeline node.
Here is the function:
def _set_node_outputs(node: pipeline_pb2.PipelineNode,
tfx_node_outputs: Dict[str, types.Channel]):
"""Compiles the outputs of a pipeline node."""
for key, value in tfx_node_outputs.items():
node.outputs.outputs[key].CopyFrom(
compiler_utils.output_spec_from_channel(
channel=value, node_id=node.node_info.id)) | Compiles the outputs of a pipeline node. |
166,467 | import itertools
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Type, cast
from tfx import types
from tfx.dsl.compiler import compiler_context
from tfx.dsl.compiler import compiler_utils
from tfx.dsl.compiler import constants
from tfx.dsl.compiler import node_execution_options_utils
from tfx.dsl.compiler import node_inputs_compiler
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_node
from tfx.dsl.experimental.node_execution_options import utils as execution_options_utils
from tfx.dsl.placeholder import placeholder
from tfx.orchestration import data_types
from tfx.orchestration import data_types_utils
from tfx.orchestration import pipeline
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import channel as channel_types
from tfx.types import channel_utils
from tfx.utils import deprecation_utils
from tfx.utils import name_utils
def _generate_input_spec_for_outputs(
node: pipeline_pb2.PipelineNode,
tfx_node_outputs: Dict[str, types.Channel],
negative_context_filter: Callable[[pipeline_pb2.ContextSpec],
bool] = lambda _: False
) -> Iterator[Tuple[types.Channel, pipeline_pb2.InputSpec.Channel]]:
"""Generates InputSpec in producer node, to be used by consumer node later."""
for key, value in tfx_node_outputs.items():
channel_pb = pipeline_pb2.InputSpec.Channel()
channel_pb.producer_node_query.id = node.node_info.id
for context in node.contexts.contexts:
if negative_context_filter(context):
continue
context_query = channel_pb.context_queries.add()
context_query.type.CopyFrom(context.type)
context_query.name.CopyFrom(context.name)
artifact_type = value.type._get_artifact_type() # pylint: disable=protected-access
channel_pb.artifact_query.type.CopyFrom(artifact_type)
channel_pb.artifact_query.type.ClearField("properties")
channel_pb.output_key = key
yield value, channel_pb
The provided code snippet includes necessary dependencies for implementing the `_generate_input_spec_for_pipeline_outputs` function. Write a Python function `def _generate_input_spec_for_pipeline_outputs( end_node: pipeline_pb2.PipelineNode, p: pipeline.Pipeline ) -> Iterator[Tuple[types.Channel, pipeline_pb2.InputSpec.Channel]]` to solve the following problem:
Generates InputSpec for pipeline outputs to be consumed later.
Here is the function:
def _generate_input_spec_for_pipeline_outputs(
end_node: pipeline_pb2.PipelineNode, p: pipeline.Pipeline
) -> Iterator[Tuple[types.Channel, pipeline_pb2.InputSpec.Channel]]:
"""Generates InputSpec for pipeline outputs to be consumed later."""
def remove_inner_pipeline_run_context(c: pipeline_pb2.ContextSpec) -> bool:
return (c.type.name == constants.PIPELINE_RUN_CONTEXT_TYPE_NAME and
c.name.HasField("structural_runtime_parameter"))
yield from _generate_input_spec_for_outputs(
end_node, p.outputs, remove_inner_pipeline_run_context) | Generates InputSpec for pipeline outputs to be consumed later. |
166,468 | import itertools
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Type, cast
from tfx import types
from tfx.dsl.compiler import compiler_context
from tfx.dsl.compiler import compiler_utils
from tfx.dsl.compiler import constants
from tfx.dsl.compiler import node_execution_options_utils
from tfx.dsl.compiler import node_inputs_compiler
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_node
from tfx.dsl.experimental.node_execution_options import utils as execution_options_utils
from tfx.dsl.placeholder import placeholder
from tfx.orchestration import data_types
from tfx.orchestration import data_types_utils
from tfx.orchestration import pipeline
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import channel as channel_types
from tfx.types import channel_utils
from tfx.utils import deprecation_utils
from tfx.utils import name_utils
The provided code snippet includes necessary dependencies for implementing the `_set_node_parameters` function. Write a Python function `def _set_node_parameters(node: pipeline_pb2.PipelineNode, tfx_node: base_node.BaseNode)` to solve the following problem:
Compiles exec properties of a pipeline node.
Here is the function:
def _set_node_parameters(node: pipeline_pb2.PipelineNode,
tfx_node: base_node.BaseNode):
"""Compiles exec properties of a pipeline node."""
for key, value in tfx_node.exec_properties.items():
if value is None:
continue
parameter_value = node.parameters.parameters[key]
# Order matters, because runtime parameter can be in serialized string.
if isinstance(value, data_types.RuntimeParameter):
compiler_utils.set_runtime_parameter_pb(parameter_value.runtime_parameter,
value.name, value.ptype,
value.default)
elif isinstance(value, placeholder.Placeholder):
compiler_utils.validate_exec_property_placeholder(key, value)
parameter_value.placeholder.CopyFrom(
channel_utils.encode_placeholder_with_channels(
value, compiler_utils.implicit_channel_key
)
)
else:
try:
data_types_utils.set_parameter_value(parameter_value, value)
except ValueError as e:
raise ValueError(
"Component {} got unsupported parameter {} with type {}.".format(
tfx_node.id, key, type(value))) from e | Compiles exec properties of a pipeline node. |
166,469 | import itertools
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Type, cast
from tfx import types
from tfx.dsl.compiler import compiler_context
from tfx.dsl.compiler import compiler_utils
from tfx.dsl.compiler import constants
from tfx.dsl.compiler import node_execution_options_utils
from tfx.dsl.compiler import node_inputs_compiler
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_node
from tfx.dsl.experimental.node_execution_options import utils as execution_options_utils
from tfx.dsl.placeholder import placeholder
from tfx.orchestration import data_types
from tfx.orchestration import data_types_utils
from tfx.orchestration import pipeline
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import channel as channel_types
from tfx.types import channel_utils
from tfx.utils import deprecation_utils
from tfx.utils import name_utils
The provided code snippet includes necessary dependencies for implementing the `_set_node_execution_options` function. Write a Python function `def _set_node_execution_options( node: pipeline_pb2.PipelineNode, tfx_node: base_node.BaseNode, pipeline_ctx: compiler_context.PipelineContext, enable_cache: bool, )` to solve the following problem:
Compiles and sets NodeExecutionOptions of a pipeline node.
Here is the function:
def _set_node_execution_options(
node: pipeline_pb2.PipelineNode,
tfx_node: base_node.BaseNode,
pipeline_ctx: compiler_context.PipelineContext,
enable_cache: bool,
):
"""Compiles and sets NodeExecutionOptions of a pipeline node."""
options_py = tfx_node.node_execution_options
if options_py:
assert isinstance(options_py, execution_options_utils.NodeExecutionOptions)
if (
options_py.trigger_strategy
not in (
pipeline_pb2.NodeExecutionOptions.TriggerStrategy.TRIGGER_STRATEGY_UNSPECIFIED,
pipeline_pb2.NodeExecutionOptions.TriggerStrategy.ALL_UPSTREAM_NODES_SUCCEEDED,
)
or options_py.success_optional
or options_py.lifetime_start
) and pipeline_ctx.is_async_mode:
raise ValueError(
"Node level triggering strategies, success optionality, and resource"
" lifetimes are not allowed in ASYNC pipelines."
)
if (
options_py.trigger_strategy
== pipeline_pb2.NodeExecutionOptions.LIFETIME_END_WHEN_SUBGRAPH_CANNOT_PROGRESS
and not options_py.lifetime_start
):
raise ValueError(
f"Node {node.node_info.id} has the trigger strategy"
" LIFETIME_END_WHEN_SUBGRAPH_CANNOT_PROGRESS set but no"
" lifetime_start. In order to use the trigger strategy the node"
" must have a lifetime_start."
)
options_proto = node_execution_options_utils.compile_node_execution_options(
options_py
)
else:
options_proto = pipeline_pb2.NodeExecutionOptions()
options_proto.caching_options.enable_cache = enable_cache
node.execution_options.CopyFrom(options_proto)
# TODO: b/310726801 - We should throw an error if this is an invalid
# configuration.
if pipeline_ctx.is_async_mode:
input_triggers = node.execution_options.async_trigger.input_triggers
for input_key, input_channel in tfx_node.inputs.items():
if isinstance(input_channel.input_trigger, channel_types.NoTrigger):
input_triggers[input_key].no_trigger = True
if isinstance(
input_channel.input_trigger, channel_types.TriggerByProperty
):
input_triggers[input_key].trigger_by_property.property_keys.extend(
input_channel.input_trigger.property_keys
) | Compiles and sets NodeExecutionOptions of a pipeline node. |
166,470 | import itertools
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Type, cast
from tfx import types
from tfx.dsl.compiler import compiler_context
from tfx.dsl.compiler import compiler_utils
from tfx.dsl.compiler import constants
from tfx.dsl.compiler import node_execution_options_utils
from tfx.dsl.compiler import node_inputs_compiler
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_node
from tfx.dsl.experimental.node_execution_options import utils as execution_options_utils
from tfx.dsl.placeholder import placeholder
from tfx.orchestration import data_types
from tfx.orchestration import data_types_utils
from tfx.orchestration import pipeline
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import channel as channel_types
from tfx.types import channel_utils
from tfx.utils import deprecation_utils
from tfx.utils import name_utils
The provided code snippet includes necessary dependencies for implementing the `_find_runtime_upstream_node_ids` function. Write a Python function `def _find_runtime_upstream_node_ids( pipeline_ctx: compiler_context.PipelineContext, here: base_node.BaseNode) -> List[str]` to solve the following problem:
Finds all upstream nodes that the current node depends on.
Here is the function:
def _find_runtime_upstream_node_ids(
pipeline_ctx: compiler_context.PipelineContext,
here: base_node.BaseNode) -> List[str]:
"""Finds all upstream nodes that the current node depends on."""
result = set()
for up in itertools.chain(here.upstream_nodes,
pipeline_ctx.implicit_upstream_nodes(here)):
if pipeline_ctx.is_async_mode and compiler_utils.is_resolver(up):
result.update(_find_runtime_upstream_node_ids(pipeline_ctx, up))
else:
result.add(up.id)
# Validate that upstream nodes are present in the pipeline.
for up_id in result:
if up_id not in pipeline_ctx.pipeline_node_ids:
raise ValueError(f"Node {here.id} references upstream node {up_id} "
"which is not present in the pipeline.")
# Sort result so that compiler generates consistent results.
return sorted(result) | Finds all upstream nodes that the current node depends on. |
166,471 | import itertools
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Type, cast
from tfx import types
from tfx.dsl.compiler import compiler_context
from tfx.dsl.compiler import compiler_utils
from tfx.dsl.compiler import constants
from tfx.dsl.compiler import node_execution_options_utils
from tfx.dsl.compiler import node_inputs_compiler
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_node
from tfx.dsl.experimental.node_execution_options import utils as execution_options_utils
from tfx.dsl.placeholder import placeholder
from tfx.orchestration import data_types
from tfx.orchestration import data_types_utils
from tfx.orchestration import pipeline
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import channel as channel_types
from tfx.types import channel_utils
from tfx.utils import deprecation_utils
from tfx.utils import name_utils
The provided code snippet includes necessary dependencies for implementing the `_find_runtime_downstream_node_ids` function. Write a Python function `def _find_runtime_downstream_node_ids(context: compiler_context.PipelineContext, here: base_node.BaseNode) -> List[str]` to solve the following problem:
Finds all downstream nodes that depend on the current node.
Here is the function:
def _find_runtime_downstream_node_ids(context: compiler_context.PipelineContext,
here: base_node.BaseNode) -> List[str]:
"""Finds all downstream nodes that depend on the current node."""
result = set()
if not context:
return result
for down in itertools.chain(here.downstream_nodes,
context.implicit_downstream_nodes(here)):
if context.is_async_mode and compiler_utils.is_resolver(down):
result.update(_find_runtime_downstream_node_ids(context, down))
else:
result.add(down.id)
# Validate that downstream nodes are present in the pipeline.
for down_id in result:
if down_id not in context.pipeline_node_ids:
raise ValueError(f"Node {here.id} references downstream node {down_id} "
"which is not present in the pipeline.")
# Sort result so that compiler generates consistent results.
return sorted(result) | Finds all downstream nodes that depend on the current node. |
166,472 | import itertools
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Type, cast
from tfx import types
from tfx.dsl.compiler import compiler_context
from tfx.dsl.compiler import compiler_utils
from tfx.dsl.compiler import constants
from tfx.dsl.compiler import node_execution_options_utils
from tfx.dsl.compiler import node_inputs_compiler
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_node
from tfx.dsl.experimental.node_execution_options import utils as execution_options_utils
from tfx.dsl.placeholder import placeholder
from tfx.orchestration import data_types
from tfx.orchestration import data_types_utils
from tfx.orchestration import pipeline
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import channel as channel_types
from tfx.types import channel_utils
from tfx.utils import deprecation_utils
from tfx.utils import name_utils
The provided code snippet includes necessary dependencies for implementing the `_begin_node_is_upstream` function. Write a Python function `def _begin_node_is_upstream(node: base_node.BaseNode, tfx_pipeline: pipeline.Pipeline) -> bool` to solve the following problem:
Checks if a node needs to declare the begin node as its upstream node.
Here is the function:
def _begin_node_is_upstream(node: base_node.BaseNode,
tfx_pipeline: pipeline.Pipeline) -> bool:
"""Checks if a node needs to declare the begin node as its upstream node."""
# Check if the PipelineInputChannel (whose dependent node ID is the pipeline
# ID) is either directly or indirectly used for the node inputs.
return tfx_pipeline.id in compiler_utils.get_data_dependent_node_ids(node) | Checks if a node needs to declare the begin node as its upstream node. |
166,473 | import itertools
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Type, cast
from tfx import types
from tfx.dsl.compiler import compiler_context
from tfx.dsl.compiler import compiler_utils
from tfx.dsl.compiler import constants
from tfx.dsl.compiler import node_execution_options_utils
from tfx.dsl.compiler import node_inputs_compiler
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_node
from tfx.dsl.experimental.node_execution_options import utils as execution_options_utils
from tfx.dsl.placeholder import placeholder
from tfx.orchestration import data_types
from tfx.orchestration import data_types_utils
from tfx.orchestration import pipeline
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import channel as channel_types
from tfx.types import channel_utils
from tfx.utils import deprecation_utils
from tfx.utils import name_utils
The provided code snippet includes necessary dependencies for implementing the `_end_node_is_downstream` function. Write a Python function `def _end_node_is_downstream(node: base_node.BaseNode, tfx_pipeline: pipeline.Pipeline) -> bool` to solve the following problem:
Checks if a node needs to declare the eng node as its downstream node.
Here is the function:
def _end_node_is_downstream(node: base_node.BaseNode,
tfx_pipeline: pipeline.Pipeline) -> bool:
"""Checks if a node needs to declare the eng node as its downstream node."""
# Given a node N inside a pipeline P, N needs to declare P_end as its
# downstream node iff N produces at least a same output as P.
pipeline_outputs_set = {c.wrapped for c in tfx_pipeline.outputs.values()}
for node_output in node.outputs.values():
if node_output in pipeline_outputs_set:
return True
return False | Checks if a node needs to declare the eng node as its downstream node. |
166,474 | from typing import cast, List, Optional, Sequence, Tuple, Type, Union, Dict, Any, Set
from tfx import types
from tfx.dsl.compiler import constants
from tfx.dsl.components.base import base_node
from tfx.dsl.components.common import importer
from tfx.dsl.components.common import resolver
from tfx.dsl.context_managers import dsl_context_registry
from tfx.dsl.placeholder import placeholder as ph
from tfx.orchestration import data_types_utils
from tfx.orchestration import pipeline
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import channel as channel_types
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `resolve_execution_mode` function. Write a Python function `def resolve_execution_mode(tfx_pipeline: pipeline.Pipeline)` to solve the following problem:
Resolves execution mode for a tfx pipeline. Args: tfx_pipeline: a TFX pipeline python object assembled by SDK. Returns: a proto enum reflecting the execution mode of the pipeline. Raises: RuntimeError: when execution mode is ASYNC while `enable_cache` is true. ValueError: when seeing unrecognized execution mode.
Here is the function:
def resolve_execution_mode(tfx_pipeline: pipeline.Pipeline):
"""Resolves execution mode for a tfx pipeline.
Args:
tfx_pipeline: a TFX pipeline python object assembled by SDK.
Returns:
a proto enum reflecting the execution mode of the pipeline.
Raises:
RuntimeError: when execution mode is ASYNC while `enable_cache` is true.
ValueError: when seeing unrecognized execution mode.
"""
if tfx_pipeline.execution_mode == pipeline.ExecutionMode.SYNC:
return pipeline_pb2.Pipeline.ExecutionMode.SYNC
elif tfx_pipeline.execution_mode == pipeline.ExecutionMode.ASYNC:
if tfx_pipeline.enable_cache:
raise RuntimeError(
"Caching is a feature only available to synchronous execution pipelines."
)
return pipeline_pb2.Pipeline.ExecutionMode.ASYNC
else:
raise ValueError(
f"Got unsupported execution mode: {tfx_pipeline.execution_mode}") | Resolves execution mode for a tfx pipeline. Args: tfx_pipeline: a TFX pipeline python object assembled by SDK. Returns: a proto enum reflecting the execution mode of the pipeline. Raises: RuntimeError: when execution mode is ASYNC while `enable_cache` is true. ValueError: when seeing unrecognized execution mode. |
166,475 | from typing import cast, List, Optional, Sequence, Tuple, Type, Union, Dict, Any, Set
from tfx import types
from tfx.dsl.compiler import constants
from tfx.dsl.components.base import base_node
from tfx.dsl.components.common import importer
from tfx.dsl.components.common import resolver
from tfx.dsl.context_managers import dsl_context_registry
from tfx.dsl.placeholder import placeholder as ph
from tfx.orchestration import data_types_utils
from tfx.orchestration import pipeline
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import channel as channel_types
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `is_importer` function. Write a Python function `def is_importer(node: base_node.BaseNode) -> bool` to solve the following problem:
Helper function to check if a TFX node is an Importer.
Here is the function:
def is_importer(node: base_node.BaseNode) -> bool:
"""Helper function to check if a TFX node is an Importer."""
return isinstance(node, importer.Importer) | Helper function to check if a TFX node is an Importer. |
166,476 | from typing import cast, List, Optional, Sequence, Tuple, Type, Union, Dict, Any, Set
from tfx import types
from tfx.dsl.compiler import constants
from tfx.dsl.components.base import base_node
from tfx.dsl.components.common import importer
from tfx.dsl.components.common import resolver
from tfx.dsl.context_managers import dsl_context_registry
from tfx.dsl.placeholder import placeholder as ph
from tfx.orchestration import data_types_utils
from tfx.orchestration import pipeline
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import channel as channel_types
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `pipeline_begin_node_type_name` function. Write a Python function `def pipeline_begin_node_type_name(p: pipeline.Pipeline) -> str` to solve the following problem:
Builds the type name of a Pipeline Begin node.
Here is the function:
def pipeline_begin_node_type_name(p: pipeline.Pipeline) -> str:
"""Builds the type name of a Pipeline Begin node."""
return f"{p.type}{constants.PIPELINE_BEGIN_NODE_SUFFIX}" | Builds the type name of a Pipeline Begin node. |
166,477 | from typing import cast, List, Optional, Sequence, Tuple, Type, Union, Dict, Any, Set
from tfx import types
from tfx.dsl.compiler import constants
from tfx.dsl.components.base import base_node
from tfx.dsl.components.common import importer
from tfx.dsl.components.common import resolver
from tfx.dsl.context_managers import dsl_context_registry
from tfx.dsl.placeholder import placeholder as ph
from tfx.orchestration import data_types_utils
from tfx.orchestration import pipeline
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import channel as channel_types
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `pipeline_end_node_type_name` function. Write a Python function `def pipeline_end_node_type_name(p: pipeline.Pipeline) -> str` to solve the following problem:
Builds the type name of a Pipeline End node.
Here is the function:
def pipeline_end_node_type_name(p: pipeline.Pipeline) -> str:
"""Builds the type name of a Pipeline End node."""
return f"{p.type}{constants.PIPELINE_END_NODE_SUFFIX}" | Builds the type name of a Pipeline End node. |
166,478 | from typing import cast, List, Optional, Sequence, Tuple, Type, Union, Dict, Any, Set
from tfx import types
from tfx.dsl.compiler import constants
from tfx.dsl.components.base import base_node
from tfx.dsl.components.common import importer
from tfx.dsl.components.common import resolver
from tfx.dsl.context_managers import dsl_context_registry
from tfx.dsl.placeholder import placeholder as ph
from tfx.orchestration import data_types_utils
from tfx.orchestration import pipeline
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import channel as channel_types
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `pipeline_begin_node_id` function. Write a Python function `def pipeline_begin_node_id(p: pipeline.Pipeline) -> str` to solve the following problem:
Builds the node id of a Pipeline Begin node.
Here is the function:
def pipeline_begin_node_id(p: pipeline.Pipeline) -> str:
"""Builds the node id of a Pipeline Begin node."""
return f"{p.id}{constants.PIPELINE_BEGIN_NODE_SUFFIX}" | Builds the node id of a Pipeline Begin node. |
166,479 | from typing import cast, List, Optional, Sequence, Tuple, Type, Union, Dict, Any, Set
from tfx import types
from tfx.dsl.compiler import constants
from tfx.dsl.components.base import base_node
from tfx.dsl.components.common import importer
from tfx.dsl.components.common import resolver
from tfx.dsl.context_managers import dsl_context_registry
from tfx.dsl.placeholder import placeholder as ph
from tfx.orchestration import data_types_utils
from tfx.orchestration import pipeline
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import channel as channel_types
from ml_metadata.proto import metadata_store_pb2
def pipeline_end_node_id_from_pipeline_id(pipeline_id: str) -> str:
"""Builds the node id of a Pipeline End node."""
return f"{pipeline_id}{constants.PIPELINE_END_NODE_SUFFIX}"
The provided code snippet includes necessary dependencies for implementing the `pipeline_end_node_id` function. Write a Python function `def pipeline_end_node_id(p: pipeline.Pipeline) -> str` to solve the following problem:
Builds the node id of a Pipeline End node.
Here is the function:
def pipeline_end_node_id(p: pipeline.Pipeline) -> str:
"""Builds the node id of a Pipeline End node."""
return pipeline_end_node_id_from_pipeline_id(p.id) | Builds the node id of a Pipeline End node. |
166,480 | from typing import cast, List, Optional, Sequence, Tuple, Type, Union, Dict, Any, Set
from tfx import types
from tfx.dsl.compiler import constants
from tfx.dsl.components.base import base_node
from tfx.dsl.components.common import importer
from tfx.dsl.components.common import resolver
from tfx.dsl.context_managers import dsl_context_registry
from tfx.dsl.placeholder import placeholder as ph
from tfx.orchestration import data_types_utils
from tfx.orchestration import pipeline
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import channel as channel_types
from ml_metadata.proto import metadata_store_pb2
def pipeline_end_node_id_from_pipeline_id(pipeline_id: str) -> str:
"""Builds the node id of a Pipeline End node."""
return f"{pipeline_id}{constants.PIPELINE_END_NODE_SUFFIX}"
def node_context_name(pipeline_context_name: str, node_id: str):
"""Defines the name used to reference a node context in MLMD."""
return f"{pipeline_context_name}.{node_id}"
The provided code snippet includes necessary dependencies for implementing the `end_node_context_name_from_subpipeline_id` function. Write a Python function `def end_node_context_name_from_subpipeline_id(subpipeline_id: str) -> str` to solve the following problem:
Builds the end_node context name of a composable pipeline.
Here is the function:
def end_node_context_name_from_subpipeline_id(subpipeline_id: str) -> str:
"""Builds the end_node context name of a composable pipeline."""
end_node_id = pipeline_end_node_id_from_pipeline_id(subpipeline_id)
return node_context_name(subpipeline_id, end_node_id) | Builds the end_node context name of a composable pipeline. |
166,481 | from typing import cast, List, Optional, Sequence, Tuple, Type, Union, Dict, Any, Set
from tfx import types
from tfx.dsl.compiler import constants
from tfx.dsl.components.base import base_node
from tfx.dsl.components.common import importer
from tfx.dsl.components.common import resolver
from tfx.dsl.context_managers import dsl_context_registry
from tfx.dsl.placeholder import placeholder as ph
from tfx.orchestration import data_types_utils
from tfx.orchestration import pipeline
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import channel as channel_types
from ml_metadata.proto import metadata_store_pb2
def implicit_channel_key(channel: types.BaseChannel):
"""Key of a channel to the node that consumes the channel as input."""
if isinstance(channel, channel_types.PipelineInputChannel):
channel = cast(channel_types.PipelineInputChannel, channel)
return f"_{channel.pipeline.id}.{channel.output_key}"
elif isinstance(channel, types.Channel):
if channel.producer_component_id and channel.output_key:
return f"_{channel.producer_component_id}.{channel.output_key}"
raise ValueError(
"Cannot create implicit input key for Channel that has no "
"producer_component_id and output_key."
)
elif isinstance(channel, channel_types.ExternalPipelineChannel):
if (
channel.owner
and channel.pipeline_name
and channel.producer_component_id
and channel.output_key
):
implicit_key = f"_{channel.producer_component_id}.{channel.output_key}.{channel.owner}.{channel.pipeline_name}"
if channel.pipeline_run_id:
return implicit_key + "." + channel.pipeline_run_id
else:
return implicit_key
raise ValueError(
"Cannot create implicit input key for Channel that has no"
"ower, pipeline_name, producer_component_id and output_key."
)
else:
raise ValueError("Unsupported channel type for implicit channel key.")
The provided code snippet includes necessary dependencies for implementing the `build_channel_to_key_fn` function. Write a Python function `def build_channel_to_key_fn(implicit_keys_map)` to solve the following problem:
Builds a function that returns the key of a channel for consumer node.
Here is the function:
def build_channel_to_key_fn(implicit_keys_map):
"""Builds a function that returns the key of a channel for consumer node."""
def channel_to_key_fn(channel: types.BaseChannel) -> str:
implicit_key = implicit_channel_key(channel)
if implicit_key in implicit_keys_map:
return implicit_keys_map[implicit_key]
return implicit_key
return channel_to_key_fn | Builds a function that returns the key of a channel for consumer node. |
166,482 | from typing import cast, List, Optional, Sequence, Tuple, Type, Union, Dict, Any, Set
from tfx import types
from tfx.dsl.compiler import constants
from tfx.dsl.components.base import base_node
from tfx.dsl.components.common import importer
from tfx.dsl.components.common import resolver
from tfx.dsl.context_managers import dsl_context_registry
from tfx.dsl.placeholder import placeholder as ph
from tfx.orchestration import data_types_utils
from tfx.orchestration import pipeline
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import channel as channel_types
from ml_metadata.proto import metadata_store_pb2
class _PipelineEnd(base_node.BaseNode):
"""Virtual pipeline end node.
While the pipeline end node does not exist nor accessible in DSL, having a
PipelineEnd class helps generalizing the compilation.
Supported features:
- Node ID (which is "{pipeline_id}_end")
- Node type name
- Node inputs, which comes from the inner pipeline (i.e. the raw value
used from Pipeline(outputs=raw_outputs))
- Node outputs, which is the wrapped OutputChannel of the pipeline node
that is visible from the outer pipeline
Not yet supported:
- upstream/downstream nodes relationship
"""
def __init__(self, p: pipeline.Pipeline):
super().__init__()
self._pipeline = p
self.with_id(pipeline_end_node_id(p))
def type(self) -> str:
return pipeline_end_node_type_name(self._pipeline)
def inputs(self) -> Dict[str, channel_types.BaseChannel]:
return {
key: pipeline_output_channel.wrapped
for key, pipeline_output_channel in self._pipeline.outputs.items()
}
def outputs(self) -> Dict[str, channel_types.BaseChannel]:
return self._pipeline.outputs
def exec_properties(self) -> Dict[str, Any]:
return {}
The provided code snippet includes necessary dependencies for implementing the `create_pipeline_end_node` function. Write a Python function `def create_pipeline_end_node(p: pipeline.Pipeline) -> _PipelineEnd` to solve the following problem:
Create a dummy pipeline end node. pipeline end node does not appear in pipeline DSL but only in the pipeline IR. To generalizes compilation process for the pipeline end node, create a dummy BaseNode whose inputs are set as pipeline.outputs. Args: p: A Pipeline instance whose pipeline end node will be created. Returns: a pipeline end node.
Here is the function:
def create_pipeline_end_node(p: pipeline.Pipeline) -> _PipelineEnd:
"""Create a dummy pipeline end node.
pipeline end node does not appear in pipeline DSL but only in the pipeline IR.
To generalizes compilation process for the pipeline end node, create a dummy
BaseNode whose inputs are set as pipeline.outputs.
Args:
p: A Pipeline instance whose pipeline end node will be created.
Returns:
a pipeline end node.
"""
with dsl_context_registry.use_registry(p.dsl_context_registry):
return _PipelineEnd(p) | Create a dummy pipeline end node. pipeline end node does not appear in pipeline DSL but only in the pipeline IR. To generalizes compilation process for the pipeline end node, create a dummy BaseNode whose inputs are set as pipeline.outputs. Args: p: A Pipeline instance whose pipeline end node will be created. Returns: a pipeline end node. |
166,483 | from typing import Type, cast
from tfx import types
from tfx.dsl.compiler import compiler_context
from tfx.dsl.compiler import compiler_utils
from tfx.dsl.compiler import constants
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_node
from tfx.dsl.experimental.conditionals import conditional
from tfx.dsl.input_resolution import resolver_op
from tfx.dsl.placeholder import artifact_placeholder
from tfx.dsl.placeholder import placeholder
from tfx.orchestration import data_types_utils
from tfx.proto.orchestration import metadata_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import channel as channel_types
from tfx.types import channel_utils
from tfx.types import resolved_channel
from tfx.types import value_artifact
from tfx.utils import deprecation_utils
from tfx.utils import name_utils
from tfx.utils import typing_utils
def _compile_input_spec(
*,
pipeline_ctx: compiler_context.PipelineContext,
tfx_node: base_node.BaseNode,
input_key: str,
channel: channel_types.BaseChannel,
hidden: bool,
min_count: int,
result: pipeline_pb2.NodeInputs,
) -> None:
"""Compiles `BaseChannel` into `InputSpec` at `result.inputs[input_key]`.
Args:
pipeline_ctx: A `PipelineContext`.
tfx_node: A `BaseNode` instance from pipeline DSL.
input_key: An input key that the compiled `InputSpec` would be stored with.
channel: A `BaseChannel` instance to compile.
hidden: If true, this sets `InputSpec.hidden = True`. If the same channel
instances have been called multiple times with different `hidden` value,
then `hidden` will be `False`. In other words, if the channel is ever
compiled with `hidden=False`, it will ignore other `hidden=True`.
min_count: Minimum number of artifacts that should be resolved for this
input key. If min_count is not met during the input resolution, it is
considered as an error.
result: A `NodeInputs` proto to which the compiled result would be written.
"""
if input_key in result.inputs:
# Already compiled. This can happen during compiling another input channel
# from the same resolver function output.
if not hidden:
# Overwrite hidden = False even for already compiled channel, this is
# because we don't know the input should truely be hidden until the
# channel turns out not to be.
result.inputs[input_key].hidden = False
return
if channel in pipeline_ctx.channels:
# OutputChannel or PipelineInputChannel from the same pipeline has already
# compiled IR in context.channels
result.inputs[input_key].channels.append(pipeline_ctx.channels[channel])
elif isinstance(channel, channel_types.PipelineOutputChannel):
# This is the case when PipelineInputs uses pipeline.outputs where the
# pipeline is external (i.e. not a parent or sibling pipeline) thus
# pipeline run cannot be synced.
channel = cast(channel_types.PipelineOutputChannel, channel)
_compile_channel_pb(
artifact_type=channel.type,
pipeline_name=channel.pipeline.id,
node_id=channel.wrapped.producer_component_id,
output_key=channel.output_key,
result=result.inputs[input_key].channels.add())
elif isinstance(channel, channel_types.ExternalPipelineChannel):
channel = cast(channel_types.ExternalPipelineChannel, channel)
result_input_channel = result.inputs[input_key].channels.add()
_compile_channel_pb(
artifact_type=channel.type,
pipeline_name=channel.pipeline_name,
node_id=channel.producer_component_id,
output_key=channel.output_key,
result=result_input_channel)
if channel.pipeline_run_id:
ctx = result_input_channel.context_queries.add()
ctx.type.name = constants.PIPELINE_RUN_CONTEXT_TYPE_NAME
ctx.name.field_value.string_value = channel.pipeline_run_id
if pipeline_ctx.pipeline.platform_config:
project_config = (
pipeline_ctx.pipeline.platform_config.project_platform_config
)
if (
channel.owner != project_config.owner
or channel.pipeline_name != project_config.project_name
):
config = metadata_pb2.MLMDServiceConfig(
owner=channel.owner,
name=channel.pipeline_name,
)
result_input_channel.metadata_connection_config.Pack(config)
else:
config = metadata_pb2.MLMDServiceConfig(
owner=channel.owner,
name=channel.pipeline_name,
)
result_input_channel.metadata_connection_config.Pack(config)
elif isinstance(channel, channel_types.Channel):
channel = cast(channel_types.Channel, channel)
_compile_channel_pb(
artifact_type=channel.type,
pipeline_name=pipeline_ctx.pipeline_info.pipeline_name,
node_id=channel.producer_component_id,
output_key=channel.output_key,
result=result.inputs[input_key].channels.add())
elif isinstance(channel, channel_types.UnionChannel):
channel = cast(channel_types.UnionChannel, channel)
mixed_inputs = result.inputs[input_key].mixed_inputs
mixed_inputs.method = pipeline_pb2.InputSpec.Mixed.Method.UNION
for sub_channel in channel.channels:
sub_key = (
pipeline_ctx.get_node_context(tfx_node).get_input_key(sub_channel))
mixed_inputs.input_keys.append(sub_key)
_compile_input_spec(
pipeline_ctx=pipeline_ctx,
tfx_node=tfx_node,
input_key=sub_key,
channel=sub_channel,
hidden=True,
min_count=0,
result=result)
elif isinstance(channel, resolved_channel.ResolvedChannel):
channel = cast(resolved_channel.ResolvedChannel, channel)
input_graph_ref = result.inputs[input_key].input_graph_ref
input_graph_ref.graph_id = _compile_input_graph(
pipeline_ctx, tfx_node, channel, result)
if channel.output_key:
input_graph_ref.key = channel.output_key
elif isinstance(channel, channel_utils.ChannelForTesting):
channel = cast(channel_utils.ChannelForTesting, channel)
# Access result.inputs[input_key] to create an empty `InputSpec`. If the
# testing channel does not point to static artifact IDs, empty `InputSpec`
# is enough for testing.
input_spec = result.inputs[input_key]
if channel.artifact_ids:
input_spec.static_inputs.artifact_ids.extend(channel.artifact_ids)
else:
raise NotImplementedError(
f'Node {tfx_node.id} got unsupported channel type {channel!r} for '
f'inputs[{input_key!r}].')
if hidden:
result.inputs[input_key].hidden = True
if min_count:
result.inputs[input_key].min_count = min_count
def _compile_conditionals(
context: compiler_context.PipelineContext,
tfx_node: base_node.BaseNode,
result: pipeline_pb2.NodeInputs,
) -> None:
"""Compiles conditionals attached to the BaseNode.
It also compiles the channels that each conditional predicate depends on. If
the channel already appears in the node inputs, reuses it. Otherwise, creates
an implicit hidden input.
Args:
context: A `PipelineContext`.
tfx_node: A `BaseNode` instance from pipeline DSL.
result: A `NodeInputs` proto to which the compiled result would be written.
"""
try:
contexts = context.dsl_context_registry.get_contexts(tfx_node)
except ValueError:
return
for dsl_context in contexts:
if not isinstance(dsl_context, conditional.CondContext):
continue
cond_context = cast(conditional.CondContext, dsl_context)
for channel in channel_utils.get_dependent_channels(cond_context.predicate):
_compile_input_spec(
pipeline_ctx=context,
tfx_node=tfx_node,
input_key=context.get_node_context(tfx_node).get_input_key(channel),
channel=channel,
hidden=False,
min_count=1,
result=result)
cond_id = context.get_conditional_id(cond_context)
expr = channel_utils.encode_placeholder_with_channels(
cond_context.predicate, context.get_node_context(tfx_node).get_input_key
)
result.conditionals[cond_id].placeholder_expression.CopyFrom(expr)
def _compile_inputs_for_dynamic_properties(
context: compiler_context.PipelineContext,
tfx_node: base_node.BaseNode,
result: pipeline_pb2.NodeInputs,
) -> None:
"""Compiles additional InputSpecs used in dynamic properties.
Dynamic properties are the execution properties whose value comes from the
artifact value. Because of that, dynamic property resolution happens after
the input resolution at orchestrator, so input resolution should include the
resolved artifacts for the channel on which dynamic properties depend (thus
`_compile_channel(hidden=False)`).
Args:
context: A `PipelineContext`.
tfx_node: A `BaseNode` instance from pipeline DSL.
result: A `NodeInputs` proto to which the compiled result would be written.
"""
for key, exec_property in tfx_node.exec_properties.items():
if not isinstance(exec_property, placeholder.Placeholder):
continue
# Validate all the .future().value placeholders. Note that .future().uri is
# also allowed and doesn't need additional validation.
for p in exec_property.traverse():
if isinstance(p, artifact_placeholder._ArtifactValueOperator): # pylint: disable=protected-access
for channel in channel_utils.get_dependent_channels(p):
channel_type = channel.type # is_compatible() needs this variable.
if not typing_utils.is_compatible(
channel_type, Type[value_artifact.ValueArtifact]
):
raise ValueError(
'When you pass <channel>.future().value to an execution '
'property, the channel must be of a value artifact type '
f'(String, Float, ...). Got {channel_type.TYPE_NAME} in exec '
f'property {key!r} of node {tfx_node.id!r}.'
)
for channel in channel_utils.get_dependent_channels(exec_property):
_compile_input_spec(
pipeline_ctx=context,
tfx_node=tfx_node,
input_key=context.get_node_context(tfx_node).get_input_key(channel),
channel=channel,
hidden=False,
min_count=1,
result=result,
)
def _validate_min_count(
input_key: str,
min_count: int,
channel: channel_types.OutputChannel,
consumer_node: base_node.BaseNode,
) -> None:
"""Validates artifact min count against node execution options.
Note that the validation is not comprehensive. It only applies to components
in the same pipeline. Other min_count violations will be handled as node
failure at run time.
Args:
input_key: Artifact input key to be displayed in error messages.
min_count: Minimum artifact count to be set in InputSpec.
channel: OutputChannel used as an input to be compiled.
consumer_node: Node using the artifact as an input.
Raises:
ValueError: if min_count is invalid.
Returns:
None if the validation passes.
"""
producer_options = channel.producer_component.node_execution_options
if producer_options and producer_options.success_optional and min_count > 0:
raise ValueError(
f'Node({channel.producer_component_id}) is set to success_optional '
f'= True but its consumer Node({consumer_node.id}).inputs[{input_key}] '
'has min_count > 0. The consumer\'s input may need to be optional'
)
consumer_options = consumer_node.node_execution_options
if (
consumer_options
and consumer_options.trigger_strategy
in (
pipeline_pb2.NodeExecutionOptions.ALL_UPSTREAM_NODES_COMPLETED,
pipeline_pb2.NodeExecutionOptions.LAZILY_ALL_UPSTREAM_NODES_COMPLETED,
)
and min_count > 0
):
raise ValueError(
f'Node({consumer_node.id}) has trigger_strategy ='
f' {pipeline_pb2.NodeExecutionOptions.TriggerStrategy.Name(consumer_options.trigger_strategy)} but'
f" its inputs[{input_key}] has min_count > 0. The consumer's input may"
' need to be optional'
)
The provided code snippet includes necessary dependencies for implementing the `compile_node_inputs` function. Write a Python function `def compile_node_inputs( context: compiler_context.PipelineContext, tfx_node: base_node.BaseNode, result: pipeline_pb2.NodeInputs, ) -> None` to solve the following problem:
Compile NodeInputs from BaseNode input channels.
Here is the function:
def compile_node_inputs(
context: compiler_context.PipelineContext,
tfx_node: base_node.BaseNode,
result: pipeline_pb2.NodeInputs,
) -> None:
"""Compile NodeInputs from BaseNode input channels."""
# Compile DSL node inputs.
for input_key, channel in tfx_node.inputs.items():
if compiler_utils.is_resolver(tfx_node):
min_count = 0
elif isinstance(tfx_node, base_component.BaseComponent):
spec_param = tfx_node.spec.INPUTS[input_key]
if (
spec_param.allow_empty_explicitly_set
and channel.is_optional is not None
and (spec_param.allow_empty != channel.is_optional)
):
raise ValueError(
f'Node {tfx_node.id} input channel {input_key} allow_empty is set'
f' to {spec_param.allow_empty} but the provided channel is'
f' {channel.is_optional}. If the component spec explicitly sets'
' allow_empty, then the channel must match.'
)
elif spec_param.allow_empty or channel.is_optional:
min_count = 0
else:
min_count = 1
else:
min_count = 1
if isinstance(channel, channel_types.OutputChannel):
_validate_min_count(
input_key=input_key,
min_count=min_count,
channel=channel,
consumer_node=tfx_node,
)
_compile_input_spec(
pipeline_ctx=context,
tfx_node=tfx_node,
input_key=input_key,
channel=channel,
hidden=False,
min_count=min_count,
result=result)
# Add implicit input channels that are used in conditionals.
_compile_conditionals(context, tfx_node, result)
# Add implicit input channels that are used in dynamic properties.
_compile_inputs_for_dynamic_properties(context, tfx_node, result) | Compile NodeInputs from BaseNode input channels. |
166,484 | import base64
import enum
import functools
import os
import re
from typing import Any, Callable, Union, cast
from absl import logging
import attr
from tfx.dsl.io import fileio
from tfx.orchestration.portable import data_types
from tfx.proto.orchestration import placeholder_pb2
from tfx.types import artifact
from tfx.types import artifact_utils
from tfx.types import value_artifact
from tfx.utils import json_utils
from tfx.utils import proto_utils
from google.protobuf import any_pb2
from google.protobuf import descriptor as descriptor_lib
from google.protobuf import json_format
from google.protobuf import message
from google.protobuf import text_format
The provided code snippet includes necessary dependencies for implementing the `_resolve_and_ensure_boolean` function. Write a Python function `def _resolve_and_ensure_boolean( resolve_fn: Callable[[placeholder_pb2.PlaceholderExpression], Any], expression: placeholder_pb2.PlaceholderExpression, error_message: str, ) -> bool` to solve the following problem:
Ensures that expression resolves to boolean. NOTE: This is currently used to ensure that the inputs to logical operators are boolean. Since the DSL for creating Predicate expressions do not currently perform implicit boolean conversions, the evaluator should not support it either. If we support implicit boolean conversions in the DSL in the future, this check can be removed. Args: resolve_fn: The function for resolving placeholder expressions. expression: The placeholder expression to resolve. error_message: The error message to display if the expression does not resolve to a boolean type. Returns: The resolved boolean value. Raises: ValueError if expression does not resolve to boolean type.
Here is the function:
def _resolve_and_ensure_boolean(
resolve_fn: Callable[[placeholder_pb2.PlaceholderExpression], Any],
expression: placeholder_pb2.PlaceholderExpression,
error_message: str,
) -> bool:
# TODO(b/173529355): Block invalid placeholders during compilation time
"""Ensures that expression resolves to boolean.
NOTE: This is currently used to ensure that the inputs to logical operators
are boolean. Since the DSL for creating Predicate expressions do not currently
perform implicit boolean conversions, the evaluator should not support it
either. If we support implicit boolean conversions in the DSL in the
future, this check can be removed.
Args:
resolve_fn: The function for resolving placeholder expressions.
expression: The placeholder expression to resolve.
error_message: The error message to display if the expression does not
resolve to a boolean type.
Returns:
The resolved boolean value.
Raises:
ValueError if expression does not resolve to boolean type.
"""
value = resolve_fn(expression)
if isinstance(value, bool):
return value
raise ValueError(f"{error_message}\n"
f"expression: {expression}\n"
f"resolved value type: {type(value)}\n"
f"resolved value: {value}") | Ensures that expression resolves to boolean. NOTE: This is currently used to ensure that the inputs to logical operators are boolean. Since the DSL for creating Predicate expressions do not currently perform implicit boolean conversions, the evaluator should not support it either. If we support implicit boolean conversions in the DSL in the future, this check can be removed. Args: resolve_fn: The function for resolving placeholder expressions. expression: The placeholder expression to resolve. error_message: The error message to display if the expression does not resolve to a boolean type. Returns: The resolved boolean value. Raises: ValueError if expression does not resolve to boolean type. |
166,485 | import base64
import enum
import functools
import os
import re
from typing import Any, Callable, Union, cast
from absl import logging
import attr
from tfx.dsl.io import fileio
from tfx.orchestration.portable import data_types
from tfx.proto.orchestration import placeholder_pb2
from tfx.types import artifact
from tfx.types import artifact_utils
from tfx.types import value_artifact
from tfx.utils import json_utils
from tfx.utils import proto_utils
from google.protobuf import any_pb2
from google.protobuf import descriptor as descriptor_lib
from google.protobuf import json_format
from google.protobuf import message
from google.protobuf import text_format
_PLACEHOLDER_OPERATORS: dict[str, Callable[..., Any]] = {}
The provided code snippet includes necessary dependencies for implementing the `_register` function. Write a Python function `def _register(op_proto)` to solve the following problem:
Decorator function for registering operators. Internal in this module.
Here is the function:
def _register(op_proto):
"""Decorator function for registering operators. Internal in this module."""
def decorator(op: Callable[..., Any]):
_PLACEHOLDER_OPERATORS[op_proto.DESCRIPTOR.name] = op
return op
return decorator | Decorator function for registering operators. Internal in this module. |
166,486 | import base64
import enum
import functools
import os
import re
from typing import Any, Callable, Union, cast
from absl import logging
import attr
from tfx.dsl.io import fileio
from tfx.orchestration.portable import data_types
from tfx.proto.orchestration import placeholder_pb2
from tfx.types import artifact
from tfx.types import artifact_utils
from tfx.types import value_artifact
from tfx.utils import json_utils
from tfx.utils import proto_utils
from google.protobuf import any_pb2
from google.protobuf import descriptor as descriptor_lib
from google.protobuf import json_format
from google.protobuf import message
from google.protobuf import text_format
def get_unary_operator_names() -> set[str]:
"""Returns all unary placeholder operators."""
return {
op
for op, fields in _get_all_operators().items()
if "expression" in fields
}
def get_binary_operator_names() -> set[str]:
"""Returns all binary placeholder operators."""
return {op for op, fields in _get_all_operators().items() if "lhs" in fields}
def get_nary_operator_names() -> set[str]:
"""Returns all n-ary placeholder operators."""
return {
op
for op, fields in _get_all_operators().items()
if "expressions" in fields
}
The provided code snippet includes necessary dependencies for implementing the `get_all_types_in_placeholder_expression` function. Write a Python function `def get_all_types_in_placeholder_expression( placeholder: placeholder_pb2.PlaceholderExpression, ) -> set["placeholder_pb2.Placeholder.Type"]` to solve the following problem:
Returns all Placeholder.Type contained in a PlaceholderExpression.
Here is the function:
def get_all_types_in_placeholder_expression(
placeholder: placeholder_pb2.PlaceholderExpression,
) -> set["placeholder_pb2.Placeholder.Type"]:
"""Returns all Placeholder.Type contained in a PlaceholderExpression."""
if placeholder.HasField("placeholder"):
return {placeholder.placeholder.type}
if placeholder.HasField("value"):
return set()
if placeholder.HasField("operator"):
operator_name = placeholder.operator.WhichOneof("operator_type")
operator_pb = getattr(placeholder.operator, operator_name)
if operator_name in get_unary_operator_names():
expressions = [operator_pb.expression]
elif operator_name in get_binary_operator_names():
expressions = [operator_pb.lhs, operator_pb.rhs]
elif operator_name in get_nary_operator_names():
expressions = operator_pb.expressions
elif operator_name == "make_proto_op":
expressions = operator_pb.fields.values()
else:
raise ValueError(
f"Unrecognized placeholder operator {operator_name} in expression: "
f"{placeholder}"
)
types = set()
for expression in expressions:
types |= get_all_types_in_placeholder_expression(expression)
return types
raise ValueError(f"Unrecognized placeholder expression: {placeholder}") | Returns all Placeholder.Type contained in a PlaceholderExpression. |
166,487 | import abc
from collections.abc import Mapping, Sequence
from typing import Callable, Optional, Union
import attr
from tfx.orchestration import data_types_utils
from tfx.proto.orchestration import execution_hook_pb2
from ml_metadata.proto import metadata_store_pb2
_PrimitiveFlagValueType = Union[int, float, str, bool]
def _to_value(
value: _PrimitiveFlagValueType,
) -> metadata_store_pb2.Value:
result = metadata_store_pb2.Value()
data_types_utils.set_metadata_value(result, value)
return result | null |
166,488 | import abc
from collections.abc import Mapping, Sequence
from typing import Callable, Optional, Union
import attr
from tfx.orchestration import data_types_utils
from tfx.proto.orchestration import execution_hook_pb2
from ml_metadata.proto import metadata_store_pb2
_PrimitiveFlagValueType = Union[int, float, str, bool]
_FlagMap = Union[
Sequence[tuple[str, _PrimitiveFlagValueType]],
Mapping[str, _PrimitiveFlagValueType],
]
def _iterate_flags(
flags: _FlagMap,
) -> Sequence[tuple[str, _PrimitiveFlagValueType]]:
return list(flags.items()) if isinstance(flags, Mapping) else flags | null |
166,489 | from __future__ import annotations
from typing import Dict, Generic, Iterator, Mapping, Optional, TypeVar, Union
from tfx.dsl.placeholder import placeholder_base
from tfx.proto.orchestration import placeholder_pb2
from tfx.utils import proto_utils
from google.protobuf import any_pb2
from google.protobuf import descriptor as descriptor_lib
from google.protobuf import message
from google.protobuf import message_factory
_T = TypeVar('_T', bound=message.Message)
_InputFieldValues = Union[ # The values users may pass.
_InputValues, # singular (optional or required) field
list[_InputValues], # repeated field
dict[str, _InputValues], # map field with plain keys
list[tuple[Union[str, placeholder_base.Placeholder], _InputValues]], # map
None, # Users may pass None to optional fields.
]
class MakeProtoPlaceholder(Generic[_T], placeholder_base.Placeholder):
"""A placeholder that evaluates to a proto message."""
def __init__(
self,
base_message: _T,
**kwargs: _InputFieldValues,
):
"""Initializes the class. Consider this private."""
super().__init__(expected_type=type(base_message))
self._base_message = base_message
self._fields: dict[str, placeholder_base.ValueLikeType] = {}
for key, value in kwargs.items():
value = self._validate_and_transform_field(key, value)
if value is not None:
self._fields[key] = value
def _validate_and_transform_field(
self, field: str, value: _InputFieldValues
) -> Optional[placeholder_base.ValueLikeType]:
"""Validates the given value and transforms it to what encode() needs."""
message_name = self._base_message.DESCRIPTOR.full_name
if field not in self._base_message.DESCRIPTOR.fields_by_name:
raise ValueError(f'Unknown field {field} for proto {message_name}.')
descriptor: descriptor_lib.FieldDescriptor = (
self._base_message.DESCRIPTOR.fields_by_name[field]
)
field_name = f'{message_name}.{descriptor.name}'
if ( # If it's a map<> field.
descriptor.message_type
and descriptor.message_type.has_options
and descriptor.message_type.GetOptions().map_entry
):
if value is None:
return None
if isinstance(value, dict):
value = value.items()
elif not isinstance(value, list):
raise ValueError(
'Expected dict[k,v] or list[tuple[k, v]] input for map field '
f'{field_name}, got {value!r}.'
)
entries: list[
tuple[
Union[str, placeholder_base.Placeholder],
placeholder_base.ValueLikeType,
]
] = []
for entry_key, entry_value in value:
if not isinstance(
entry_key, (str, placeholder_base.Placeholder)
) or isinstance(
entry_key,
(
MakeProtoPlaceholder,
placeholder_base.ListPlaceholder,
placeholder_base.DictPlaceholder,
),
):
raise ValueError(
'Expected string (placeholder) for dict key of map field '
f'{field_name}, got {entry_key!r}.'
)
value_descriptor = descriptor.message_type.fields_by_name['value']
entry_value = self._validate_and_transform_value(
f'{field_name}.value', value_descriptor, entry_value
)
if entry_value is not None:
entries.append((entry_key, entry_value))
return placeholder_base.make_dict(entries)
elif descriptor.label == descriptor_lib.FieldDescriptor.LABEL_REPEATED:
if value is None or isinstance(value, placeholder_base.Placeholder):
return value # pytype: disable=bad-return-type
if not isinstance(value, list):
raise ValueError(
f'Expected list input for repeated field {field_name}, got '
f'{value!r}.'
)
items: list[placeholder_base.ValueLikeType] = []
for item in value:
item = self._validate_and_transform_value(field_name, descriptor, item)
if item is not None:
items.append(item)
return placeholder_base.make_list(items)
else:
return self._validate_and_transform_value(field_name, descriptor, value)
def _validate_and_transform_value(
self,
field_name: str,
descriptor: descriptor_lib.FieldDescriptor,
value: _InputValues,
) -> Optional[placeholder_base.ValueLikeType]:
if value is None:
if descriptor.label == descriptor_lib.FieldDescriptor.LABEL_OPTIONAL:
return None
raise ValueError(
f'Expected value for non-optional field {field_name}, got None.'
)
# Deal with sub-message fields first.
if descriptor.type == descriptor_lib.FieldDescriptor.TYPE_MESSAGE:
if isinstance(value, message.Message):
value = MakeProtoPlaceholder(value)
elif isinstance(value, Mapping):
value = MakeProtoPlaceholder(
# TODO(b/323991103):
# Switch to using the message_factory.GetMessageClass() function.
# See http://yaqs/3936732114019418112 for more context.
message_factory.MessageFactory().GetPrototype(
descriptor.message_type
)(**value)
)
elif (
not isinstance(value, placeholder_base.Placeholder)
or not value._is_maybe_proto_valued() # pylint: disable=protected-access
):
raise ValueError(
f'Expected submessage proto or placeholder for field {field_name}, '
f'got {value!r}.'
)
# Some best-effort validation for the proto type.
submsg_type = value.expected_type
if isinstance(submsg_type, type) and issubclass(
submsg_type, message.Message
):
# The proto placeholder knows exactly which proto type it will resolve
# to. So we can verify that it's the right one.
if descriptor.message_type.full_name not in (
submsg_type.DESCRIPTOR.full_name,
any_pb2.Any.DESCRIPTOR.full_name,
):
raise ValueError(
f'Expected message of type {descriptor.message_type.full_name} '
f'for field {field_name}, got {submsg_type.DESCRIPTOR.full_name}.'
)
return value
# Now we know it's a scalar field.
if isinstance(value, (message.Message, MakeProtoPlaceholder)):
raise ValueError(
f'Expected scalar value for field {field_name}, got {value!r}.'
)
if descriptor.type not in _PROTO_TO_PY_TYPE:
raise ValueError(
f'Unsupported proto field type {descriptor.type} on {field_name}.'
)
expected_type = _PROTO_TO_PY_TYPE[descriptor.type]
if not isinstance(value, (expected_type, placeholder_base.Placeholder)):
raise ValueError(
f'Expected {expected_type} for {field_name}, got {value!r}.'
)
return value # pytype: disable=bad-return-type
def traverse(self) -> Iterator[placeholder_base.Placeholder]:
"""Yields all placeholders under and including this one."""
yield from super().traverse()
for value in self._fields.values():
if isinstance(value, placeholder_base.Placeholder):
yield from value.traverse()
def _lift_up_descriptors(
self, op: placeholder_pb2.MakeProtoOperator
) -> None:
"""Moves+deduplicates descriptors from sub-messages to the given `op`."""
known_descriptors = {fd.name for fd in op.file_descriptors.file}
for field_value in op.fields.values():
operator_type = field_value.operator.WhichOneof('operator_type')
if operator_type == 'list_concat_op':
sub_expressions = field_value.operator.list_concat_op.expressions
elif operator_type == 'make_dict_op':
entries = field_value.operator.make_dict_op.entries
sub_expressions = [entry.key for entry in entries] + [
entry.value for entry in entries
]
else:
sub_expressions = [field_value]
for sub_expression in sub_expressions:
if (
sub_expression.operator.WhichOneof('operator_type')
== 'make_proto_op'
):
sub_op = sub_expression.operator.make_proto_op
for fd in sub_op.file_descriptors.file:
if fd.name not in known_descriptors:
known_descriptors.add(fd.name)
op.file_descriptors.file.append(fd)
sub_op.ClearField('file_descriptors')
def encode(
self, component_spec: Optional[type['_types.ComponentSpec']] = None
) -> placeholder_pb2.PlaceholderExpression:
result = placeholder_pb2.PlaceholderExpression()
op = result.operator.make_proto_op
op.base.Pack(self._base_message)
proto_utils.build_file_descriptor_set(
self._base_message, op.file_descriptors
)
for key, value in self._fields.items():
op.fields[key].MergeFrom(
placeholder_base.encode_value_like(value, component_spec)
)
self._lift_up_descriptors(op)
return result
The provided code snippet includes necessary dependencies for implementing the `make_proto` function. Write a Python function `def make_proto( base_message: _T, **kwargs: _InputFieldValues, ) -> MakeProtoPlaceholder[_T]` to solve the following problem:
Returns a placeholder that resolves to a proto with the given fields. Basic usage: ```python flags=[ ( 'my_proto_flag', ph.make_proto( MyProtoType(), field1='a plain value', field2=ph.execution_invocation().pipeline_run_id, field3=ph.make_proto( MySubProtoType(inner_field1='fixed value'), inner_field2=ph.input('foo').uri + '/somefile.txt', ... ), ... ).serialize(ph.ProtoSerializationFormat.TEXT_FORMAT) ), ] ``` It's important to note that proto creation placeholders are only allowed as sub-expressions. That is, you cannot pass their output directly to a flag. Instead, you always need to serialize the proto in some way, so that it becomes a string. Limitations: * Map fields only support string/placeholder keys, not integral types. * MessageSet is not yet supported. * Proto extension fields are not supported. * `bytes` fields can only populated through Python `str` values, so their contents must be valid strings and can't contain arbitrary bytes. * All constructed protos are final. You can use them as a submessage inside another ph.make_proto() expression, but you cannot update field values after receiving the instance from the factory. (And you shouldn't need to.) Args: base_message: An instance of the proto type that the constructed placeholder resolves to. This can already have some fields populated, which will be passed through to the output, though of course those can't contain any placeholders. **kwargs: Additional fields to populate in the output proto message, whereby the values may contain placeholders. These fields are merged on top of the fields present already in the `base_message`. Just like when constructing regular protos, repeated fields must be passed as lists and map fields must be passed either as dicts (if all keys are plain strings) or as lists of (key,value) tuples if some of the keys are placeholders. In all cases, the values can be placeholders or plain values (strings, protos) matching the respective field type. In particular, other MakeProtoPlaceholder instances can be passed to populate sub-message fields. Returns: A placeholder that, at runtime, will evaluate to a proto message of the same type as the `base_message`. It will have the `base_message`'s fields populated, but with the `kwargs` fields merged on top.
Here is the function:
def make_proto(
base_message: _T,
**kwargs: _InputFieldValues,
) -> MakeProtoPlaceholder[_T]:
"""Returns a placeholder that resolves to a proto with the given fields.
Basic usage:
```python
flags=[
(
'my_proto_flag',
ph.make_proto(
MyProtoType(),
field1='a plain value',
field2=ph.execution_invocation().pipeline_run_id,
field3=ph.make_proto(
MySubProtoType(inner_field1='fixed value'),
inner_field2=ph.input('foo').uri + '/somefile.txt',
...
),
...
).serialize(ph.ProtoSerializationFormat.TEXT_FORMAT)
),
]
```
It's important to note that proto creation placeholders are only allowed as
sub-expressions. That is, you cannot pass their output directly to a flag.
Instead, you always need to serialize the proto in some way, so that it
becomes a string.
Limitations:
* Map fields only support string/placeholder keys, not integral types.
* MessageSet is not yet supported.
* Proto extension fields are not supported.
* `bytes` fields can only populated through Python `str` values, so their
contents must be valid strings and can't contain arbitrary bytes.
* All constructed protos are final. You can use them as a submessage inside
another ph.make_proto() expression, but you cannot update field values
after receiving the instance from the factory. (And you shouldn't need to.)
Args:
base_message: An instance of the proto type that the constructed placeholder
resolves to. This can already have some fields populated, which will be
passed through to the output, though of course those can't contain any
placeholders.
**kwargs: Additional fields to populate in the output proto message, whereby
the values may contain placeholders. These fields are merged on top of the
fields present already in the `base_message`. Just like when constructing
regular protos, repeated fields must be passed as lists and map fields
must be passed either as dicts (if all keys are plain strings) or as lists
of (key,value) tuples if some of the keys are placeholders. In all cases,
the values can be placeholders or plain values (strings, protos) matching
the respective field type. In particular, other MakeProtoPlaceholder
instances can be passed to populate sub-message fields.
Returns:
A placeholder that, at runtime, will evaluate to a proto message of the
same type as the `base_message`. It will have the `base_message`'s fields
populated, but with the `kwargs` fields merged on top.
"""
return MakeProtoPlaceholder(base_message, **kwargs) | Returns a placeholder that resolves to a proto with the given fields. Basic usage: ```python flags=[ ( 'my_proto_flag', ph.make_proto( MyProtoType(), field1='a plain value', field2=ph.execution_invocation().pipeline_run_id, field3=ph.make_proto( MySubProtoType(inner_field1='fixed value'), inner_field2=ph.input('foo').uri + '/somefile.txt', ... ), ... ).serialize(ph.ProtoSerializationFormat.TEXT_FORMAT) ), ] ``` It's important to note that proto creation placeholders are only allowed as sub-expressions. That is, you cannot pass their output directly to a flag. Instead, you always need to serialize the proto in some way, so that it becomes a string. Limitations: * Map fields only support string/placeholder keys, not integral types. * MessageSet is not yet supported. * Proto extension fields are not supported. * `bytes` fields can only populated through Python `str` values, so their contents must be valid strings and can't contain arbitrary bytes. * All constructed protos are final. You can use them as a submessage inside another ph.make_proto() expression, but you cannot update field values after receiving the instance from the factory. (And you shouldn't need to.) Args: base_message: An instance of the proto type that the constructed placeholder resolves to. This can already have some fields populated, which will be passed through to the output, though of course those can't contain any placeholders. **kwargs: Additional fields to populate in the output proto message, whereby the values may contain placeholders. These fields are merged on top of the fields present already in the `base_message`. Just like when constructing regular protos, repeated fields must be passed as lists and map fields must be passed either as dicts (if all keys are plain strings) or as lists of (key,value) tuples if some of the keys are placeholders. In all cases, the values can be placeholders or plain values (strings, protos) matching the respective field type. In particular, other MakeProtoPlaceholder instances can be passed to populate sub-message fields. Returns: A placeholder that, at runtime, will evaluate to a proto message of the same type as the `base_message`. It will have the `base_message`'s fields populated, but with the `kwargs` fields merged on top. |
166,490 | from __future__ import annotations
import typing
from typing import Any, Literal, Union
from tfx.dsl.placeholder import placeholder_base
from tfx.proto.orchestration import placeholder_pb2
from google.protobuf import message
class ExecPropertyPlaceholder(placeholder_base.Placeholder):
"""ExecProperty Placeholder represents an execution property.
Prefer to use exec_property(...) to create exec property placeholders.
"""
def __init__(self, key: str):
"""Initializes the class. Consider this private."""
super().__init__(
expected_type=Union[message.Message, placeholder_base.ValueType]
)
self._key = key
def key(self) -> str:
return self._key
def encode(
self, component_spec: Any = None
) -> placeholder_pb2.PlaceholderExpression:
result = placeholder_pb2.PlaceholderExpression()
result.placeholder.type = placeholder_pb2.Placeholder.Type.EXEC_PROPERTY
result.placeholder.key = self._key
return result
The provided code snippet includes necessary dependencies for implementing the `exec_property` function. Write a Python function `def exec_property(key: str) -> ExecPropertyPlaceholder` to solve the following problem:
Returns a Placeholder that represents an execution property. Args: key: The key of the output artifact. Returns: A Placeholder that supports 1. Rendering the value of an execution property at a given key. Example: exec_property('version') 2. Rendering the whole proto or a proto field of an execution property, if the value is a proto type. The (possibly nested) proto field in a placeholder can be accessed as if accessing a proto field in Python. Example: exec_property('model_config').num_layers 3. Concatenating with other placeholders or strings. Example: output('model').uri + '/model/' + exec_property('version')
Here is the function:
def exec_property(key: str) -> ExecPropertyPlaceholder:
"""Returns a Placeholder that represents an execution property.
Args:
key: The key of the output artifact.
Returns:
A Placeholder that supports
1. Rendering the value of an execution property at a given key.
Example: exec_property('version')
2. Rendering the whole proto or a proto field of an execution property,
if the value is a proto type.
The (possibly nested) proto field in a placeholder can be accessed as
if accessing a proto field in Python.
Example: exec_property('model_config').num_layers
3. Concatenating with other placeholders or strings.
Example: output('model').uri + '/model/' + exec_property('version')
"""
return ExecPropertyPlaceholder(key) | Returns a Placeholder that represents an execution property. Args: key: The key of the output artifact. Returns: A Placeholder that supports 1. Rendering the value of an execution property at a given key. Example: exec_property('version') 2. Rendering the whole proto or a proto field of an execution property, if the value is a proto type. The (possibly nested) proto field in a placeholder can be accessed as if accessing a proto field in Python. Example: exec_property('model_config').num_layers 3. Concatenating with other placeholders or strings. Example: output('model').uri + '/model/' + exec_property('version') |
166,491 | from __future__ import annotations
import typing
from typing import Any, Literal, Union
from tfx.dsl.placeholder import placeholder_base
from tfx.proto.orchestration import placeholder_pb2
from google.protobuf import message
RuntimeInfoKeys = Literal[
'executor_spec',
'platform_config',
'pipeline_platform_config',
]
class RuntimeInfoPlaceholder(placeholder_base.Placeholder):
"""RuntimeInfo Placeholder represents runtime information for a component.
Prefer to use runtime_info(...) to create RuntimeInfo placeholders.
"""
def __init__(self, key: RuntimeInfoKeys):
"""Initializes the class. Consider this private."""
super().__init__(expected_type=message.Message)
if key not in typing.get_args(RuntimeInfoKeys):
raise ValueError(f'Got unsupported runtime info key: {key}.')
self._key = key
def encode(
self, component_spec: Any = None
) -> placeholder_pb2.PlaceholderExpression:
result = placeholder_pb2.PlaceholderExpression()
result.placeholder.type = placeholder_pb2.Placeholder.Type.RUNTIME_INFO
result.placeholder.key = self._key
return result
The provided code snippet includes necessary dependencies for implementing the `runtime_info` function. Write a Python function `def runtime_info(key: RuntimeInfoKeys) -> RuntimeInfoPlaceholder` to solve the following problem:
Returns a Placeholder that contains runtime information for component. Currently the runtime info includes following keys: 1. executor_spec: The executor spec proto. 2. platform_config: A proto that contains platform-specific information for the current pipeline node. 3. pipeline_platform_config: A proto that contains platform-specific information for the pipeline as a whole. Args: key: The key of the runtime information. Returns: A Placeholder that will render to the information associated with the key. If the placeholder is proto-valued. Accessing a proto field can be represented as if accessing a proto field in Python. Raises: ValueError: If received unsupported key.
Here is the function:
def runtime_info(key: RuntimeInfoKeys) -> RuntimeInfoPlaceholder:
"""Returns a Placeholder that contains runtime information for component.
Currently the runtime info includes following keys:
1. executor_spec: The executor spec proto.
2. platform_config: A proto that contains platform-specific information for
the current pipeline node.
3. pipeline_platform_config: A proto that contains platform-specific
information for the pipeline as a whole.
Args:
key: The key of the runtime information.
Returns:
A Placeholder that will render to the information associated with the key.
If the placeholder is proto-valued. Accessing a proto field can be
represented as if accessing a proto field in Python.
Raises:
ValueError: If received unsupported key.
"""
return RuntimeInfoPlaceholder(key) | Returns a Placeholder that contains runtime information for component. Currently the runtime info includes following keys: 1. executor_spec: The executor spec proto. 2. platform_config: A proto that contains platform-specific information for the current pipeline node. 3. pipeline_platform_config: A proto that contains platform-specific information for the pipeline as a whole. Args: key: The key of the runtime information. Returns: A Placeholder that will render to the information associated with the key. If the placeholder is proto-valued. Accessing a proto field can be represented as if accessing a proto field in Python. Raises: ValueError: If received unsupported key. |
166,492 | from __future__ import annotations
import typing
from typing import Any, Literal, Union
from tfx.dsl.placeholder import placeholder_base
from tfx.proto.orchestration import placeholder_pb2
from google.protobuf import message
class ExecInvocationPlaceholder(placeholder_base.Placeholder):
"""Execution Invocation Placeholder helps access ExecutionInvocation proto.
Prefer to use execution_invocation() to create Execution Invocation
placeholder.
"""
def __init__(self):
"""Initializes the class. Consider this private."""
super().__init__(expected_type=message.Message)
def encode(
self, component_spec: None | Any = None
) -> placeholder_pb2.PlaceholderExpression:
result = placeholder_pb2.PlaceholderExpression()
result.placeholder.type = placeholder_pb2.Placeholder.Type.EXEC_INVOCATION
return result
The provided code snippet includes necessary dependencies for implementing the `execution_invocation` function. Write a Python function `def execution_invocation() -> ExecInvocationPlaceholder` to solve the following problem:
Returns a Placeholder representing ExecutionInvocation proto. Returns: A Placeholder that will render to the ExecutionInvocation proto. Accessing a proto field is the same as if accessing a proto field in Python. Prefer to use input(key)/output(key)/exec_property(key) functions instead of input_dict/output_dict/execution_properties field from ExecutionInvocation proto.
Here is the function:
def execution_invocation() -> ExecInvocationPlaceholder:
"""Returns a Placeholder representing ExecutionInvocation proto.
Returns:
A Placeholder that will render to the ExecutionInvocation proto.
Accessing a proto field is the same as if accessing a proto field in Python.
Prefer to use input(key)/output(key)/exec_property(key) functions instead of
input_dict/output_dict/execution_properties field from ExecutionInvocation
proto.
"""
return ExecInvocationPlaceholder() | Returns a Placeholder representing ExecutionInvocation proto. Returns: A Placeholder that will render to the ExecutionInvocation proto. Accessing a proto field is the same as if accessing a proto field in Python. Prefer to use input(key)/output(key)/exec_property(key) functions instead of input_dict/output_dict/execution_properties field from ExecutionInvocation proto. |
166,493 | from __future__ import annotations
import typing
from typing import Any, Literal, Union
from tfx.dsl.placeholder import placeholder_base
from tfx.proto.orchestration import placeholder_pb2
from google.protobuf import message
class EnvironmentVariablePlaceholder(placeholder_base.Placeholder):
"""Environment Variable Placeholder helps access EnvironmentVariable proto.
Prefer to use environment_variable(...) to create Environment Variable
placeholder.
"""
def __init__(self, key: str):
"""Initializes the class. Consider this private."""
super().__init__(expected_type=placeholder_base.ValueType)
self._key = key
def encode(
self, component_spec: Any = None
) -> placeholder_pb2.PlaceholderExpression:
result = placeholder_pb2.PlaceholderExpression()
result.placeholder.type = (
placeholder_pb2.Placeholder.Type.ENVIRONMENT_VARIABLE
)
result.placeholder.key = self._key
return result
The provided code snippet includes necessary dependencies for implementing the `environment_variable` function. Write a Python function `def environment_variable(key: str) -> EnvironmentVariablePlaceholder` to solve the following problem:
Returns a Placeholder representing EnvironmentVariable proto. Args: key: The key of the environment variable. Returns: A Placeholder that supports 1. Rendering the value of an environment variable for a given key. Example: environment_variable('FOO') 2. Concatenating with other placeholders or strings. Example: 'foo=' + environment_variable('FOO')
Here is the function:
def environment_variable(key: str) -> EnvironmentVariablePlaceholder:
"""Returns a Placeholder representing EnvironmentVariable proto.
Args:
key: The key of the environment variable.
Returns:
A Placeholder that supports
1. Rendering the value of an environment variable for a given key.
Example: environment_variable('FOO')
2. Concatenating with other placeholders or strings.
Example: 'foo=' + environment_variable('FOO')
"""
return EnvironmentVariablePlaceholder(key) | Returns a Placeholder representing EnvironmentVariable proto. Args: key: The key of the environment variable. Returns: A Placeholder that supports 1. Rendering the value of an environment variable for a given key. Example: environment_variable('FOO') 2. Concatenating with other placeholders or strings. Example: 'foo=' + environment_variable('FOO') |
166,494 | from __future__ import annotations
import abc
import enum
import functools
import types
import typing
from typing import Any, Iterator, Mapping, Optional, Sequence, Union
import attr
from tfx.proto.orchestration import placeholder_pb2
from tfx.utils import proto_utils
from google.protobuf import message
class Predicate(Placeholder):
"""A boolean-valued Placeholder.
Pipeline authors obtain an instance of Predicate by comparing a
Placeholder with a primitive (int, float, or str), or by
comparing two Placeholders with each other.
The Predicate can then be used to define conditional statements using the
pipeline-authoring DSL.
Predicates should be instantiated with syntax like `<channel>.future() > 5`.
"""
def __init__(self):
"""Initializes the class. Consider this private."""
super().__init__(expected_type=bool)
def __add__(self, right):
# Unlike Placeholders, Predicates cannot be added.
raise NotImplementedError
def __radd__(self, left):
# Unlike Placeholders, Predicates cannot be added.
raise NotImplementedError
def b64encode(self, url_safe: bool = True):
# Unlike Placeholders, Predicates cannot be b64encoded.
raise NotImplementedError
class _NotPredicate(Predicate):
"""Represents a logical negation."""
value: Predicate
def encode(
self, component_spec: Any = None
) -> placeholder_pb2.PlaceholderExpression:
if isinstance(self.value, _NotPredicate): # not(not(foo)) becomes foo
return self.value.value.encode(component_spec)
result = placeholder_pb2.PlaceholderExpression()
result.operator.unary_logical_op.op = (
placeholder_pb2.UnaryLogicalOperator.Operation.NOT
)
result.operator.unary_logical_op.expression.CopyFrom(
self.value.encode(component_spec)
)
return result
def traverse(self) -> Iterator[Placeholder]:
yield self
yield from self.value.traverse()
The provided code snippet includes necessary dependencies for implementing the `logical_not` function. Write a Python function `def logical_not(pred: Predicate) -> Predicate` to solve the following problem:
Applies a NOT boolean operation on a Predicate. Args: pred: The Predicate to apply the NOT operation to. Returns: The negated Predicate.
Here is the function:
def logical_not(pred: Predicate) -> Predicate:
"""Applies a NOT boolean operation on a Predicate.
Args:
pred: The Predicate to apply the NOT operation to.
Returns:
The negated Predicate.
"""
# https://github.com/PyCQA/pylint/issues/5857 pylint: disable=too-many-function-args
return _NotPredicate(pred) | Applies a NOT boolean operation on a Predicate. Args: pred: The Predicate to apply the NOT operation to. Returns: The negated Predicate. |
166,495 | from __future__ import annotations
import abc
import enum
import functools
import types
import typing
from typing import Any, Iterator, Mapping, Optional, Sequence, Union
import attr
from tfx.proto.orchestration import placeholder_pb2
from tfx.utils import proto_utils
from google.protobuf import message
class Predicate(Placeholder):
"""A boolean-valued Placeholder.
Pipeline authors obtain an instance of Predicate by comparing a
Placeholder with a primitive (int, float, or str), or by
comparing two Placeholders with each other.
The Predicate can then be used to define conditional statements using the
pipeline-authoring DSL.
Predicates should be instantiated with syntax like `<channel>.future() > 5`.
"""
def __init__(self):
"""Initializes the class. Consider this private."""
super().__init__(expected_type=bool)
def __add__(self, right):
# Unlike Placeholders, Predicates cannot be added.
raise NotImplementedError
def __radd__(self, left):
# Unlike Placeholders, Predicates cannot be added.
raise NotImplementedError
def b64encode(self, url_safe: bool = True):
# Unlike Placeholders, Predicates cannot be b64encoded.
raise NotImplementedError
class _BinaryLogicalPredicate(Predicate):
"""Represents a boolean logical expression with exactly two arguments."""
logical_op: placeholder_pb2.BinaryLogicalOperator.Operation
left: Predicate
right: Predicate
def encode(
self, component_spec: Any = None
) -> placeholder_pb2.PlaceholderExpression:
result = placeholder_pb2.PlaceholderExpression()
result.operator.binary_logical_op.op = self.logical_op
result.operator.binary_logical_op.lhs.CopyFrom(
self.left.encode(component_spec)
)
result.operator.binary_logical_op.rhs.CopyFrom(
self.right.encode(component_spec)
)
return result
def traverse(self) -> Iterator[Placeholder]:
yield self
yield from self.left.traverse()
yield from self.right.traverse()
The provided code snippet includes necessary dependencies for implementing the `logical_and` function. Write a Python function `def logical_and(left: Predicate, right: Predicate) -> Predicate` to solve the following problem:
Applies the AND boolean operation on two Predicates. Args: left: The first argument of the AND operation. right: The second argument of the AND operation. Returns: The Predicate resulting from the AND operation.
Here is the function:
def logical_and(left: Predicate, right: Predicate) -> Predicate:
"""Applies the AND boolean operation on two Predicates.
Args:
left: The first argument of the AND operation.
right: The second argument of the AND operation.
Returns:
The Predicate resulting from the AND operation.
"""
# https://github.com/PyCQA/pylint/issues/5857 pylint: disable=too-many-function-args
return _BinaryLogicalPredicate(
placeholder_pb2.BinaryLogicalOperator.Operation.AND, left, right
) | Applies the AND boolean operation on two Predicates. Args: left: The first argument of the AND operation. right: The second argument of the AND operation. Returns: The Predicate resulting from the AND operation. |
166,496 | from __future__ import annotations
import abc
import enum
import functools
import types
import typing
from typing import Any, Iterator, Mapping, Optional, Sequence, Union
import attr
from tfx.proto.orchestration import placeholder_pb2
from tfx.utils import proto_utils
from google.protobuf import message
class Predicate(Placeholder):
"""A boolean-valued Placeholder.
Pipeline authors obtain an instance of Predicate by comparing a
Placeholder with a primitive (int, float, or str), or by
comparing two Placeholders with each other.
The Predicate can then be used to define conditional statements using the
pipeline-authoring DSL.
Predicates should be instantiated with syntax like `<channel>.future() > 5`.
"""
def __init__(self):
"""Initializes the class. Consider this private."""
super().__init__(expected_type=bool)
def __add__(self, right):
# Unlike Placeholders, Predicates cannot be added.
raise NotImplementedError
def __radd__(self, left):
# Unlike Placeholders, Predicates cannot be added.
raise NotImplementedError
def b64encode(self, url_safe: bool = True):
# Unlike Placeholders, Predicates cannot be b64encoded.
raise NotImplementedError
class _BinaryLogicalPredicate(Predicate):
"""Represents a boolean logical expression with exactly two arguments."""
logical_op: placeholder_pb2.BinaryLogicalOperator.Operation
left: Predicate
right: Predicate
def encode(
self, component_spec: Any = None
) -> placeholder_pb2.PlaceholderExpression:
result = placeholder_pb2.PlaceholderExpression()
result.operator.binary_logical_op.op = self.logical_op
result.operator.binary_logical_op.lhs.CopyFrom(
self.left.encode(component_spec)
)
result.operator.binary_logical_op.rhs.CopyFrom(
self.right.encode(component_spec)
)
return result
def traverse(self) -> Iterator[Placeholder]:
yield self
yield from self.left.traverse()
yield from self.right.traverse()
The provided code snippet includes necessary dependencies for implementing the `logical_or` function. Write a Python function `def logical_or(left: Predicate, right: Predicate) -> Predicate` to solve the following problem:
Applies the OR boolean operation on two Predicates. Args: left: The first argument of the OR operation. right: The second argument of the OR operation. Returns: The Predicate resulting from the OR operation.
Here is the function:
def logical_or(left: Predicate, right: Predicate) -> Predicate:
"""Applies the OR boolean operation on two Predicates.
Args:
left: The first argument of the OR operation.
right: The second argument of the OR operation.
Returns:
The Predicate resulting from the OR operation.
"""
# https://github.com/PyCQA/pylint/issues/5857 pylint: disable=too-many-function-args
return _BinaryLogicalPredicate(
placeholder_pb2.BinaryLogicalOperator.Operation.OR, left, right
) | Applies the OR boolean operation on two Predicates. Args: left: The first argument of the OR operation. right: The second argument of the OR operation. Returns: The Predicate resulting from the OR operation. |
166,497 | from __future__ import annotations
import abc
import enum
import functools
import types
import typing
from typing import Any, Iterator, Mapping, Optional, Sequence, Union
import attr
from tfx.proto.orchestration import placeholder_pb2
from tfx.utils import proto_utils
from google.protobuf import message
ValueLikeType = Union[ValueType, 'Placeholder']
class ListPlaceholder(Placeholder):
"""List of multiple Placeholders.
Prefer to use ph.make_list() to create ListPlaceholder.
"""
def __init__(self, input_placeholders: list[ValueLikeType]):
"""Initializes the class. Consider this private."""
super().__init__(expected_type=list)
self._input_placeholders = input_placeholders
def __add__(self, right: ListPlaceholder) -> ListPlaceholder:
return ListPlaceholder(self._input_placeholders + right._input_placeholders)
def __radd__(self, left: ListPlaceholder) -> ListPlaceholder:
return ListPlaceholder(left._input_placeholders + self._input_placeholders)
def serialize_list(
self, serialization_format: ListSerializationFormat
) -> _ListSerializationOperator:
"""Serializes list-value placeholder to JSON or comma-separated string.
Only supports primitive type list element (a.k.a bool, int, float or str) at
the
moment; throws runtime error otherwise.
Args:
serialization_format: The format of how the proto is serialized.
Returns:
A placeholder.
"""
return _ListSerializationOperator(self, serialization_format)
def traverse(self) -> Iterator[Placeholder]:
"""Yields all placeholders under and including this one."""
yield from super().traverse()
for p in self._input_placeholders:
if isinstance(p, Placeholder):
yield from p.traverse()
def encode(
self, component_spec: Optional[type['types.ComponentSpec']] = None
) -> placeholder_pb2.PlaceholderExpression:
result = placeholder_pb2.PlaceholderExpression()
result.operator.list_concat_op.SetInParent()
expressions = result.operator.list_concat_op.expressions
for input_placeholder in self._input_placeholders:
expressions.append(encode_value_like(input_placeholder, component_spec))
return result
The provided code snippet includes necessary dependencies for implementing the `make_list` function. Write a Python function `def make_list( input_placeholders: list[ValueLikeType], ) -> ListPlaceholder` to solve the following problem:
Returns a ListPlaceholder representing a list of input placeholders.
Here is the function:
def make_list(
input_placeholders: list[ValueLikeType],
) -> ListPlaceholder:
"""Returns a ListPlaceholder representing a list of input placeholders."""
return ListPlaceholder(input_placeholders) | Returns a ListPlaceholder representing a list of input placeholders. |
166,498 | from __future__ import annotations
import abc
import enum
import functools
import types
import typing
from typing import Any, Iterator, Mapping, Optional, Sequence, Union
import attr
from tfx.proto.orchestration import placeholder_pb2
from tfx.utils import proto_utils
from google.protobuf import message
class Placeholder(abc.ABC):
"""A placeholder value computed based on a tree of Placeholders and operators.
This is the base class of the Python placeholder API. It allows users of the
Tflex DSL to construct somewhat complex expressions with a convenient Python
API (e.g. using the + operator for string concatenation). Every Placeholder
instance represents an expression with certain (future) inputs that will yield
a value of the `expected_type` during pipeline execution.
Placeholder instances are immutable. (There is one, very well controlled
exception in ChannelWrappedPlaceholder.set_key(), but the way it is called
still allows users to treat placeholders as immutable.) Each Placeholder
instance represents a tree of sub-expressions, which are also immutable, so
the entire tree is immutable. Thus, newly created Placeholder instances can
can safely reference any pre-existing Placeholder instances (including their
entire sub-trees) without having to worry about them being mutated. Any given
Placeholder could be referenced by multiple parents.
The ultimate purpose of a Placeholder expression tree is to be encoded into a
PlaceholderExpression proto, which then becomes part of the intermediate
representation (IR) shipped to the orchestrator for pipeline execution. So
this Python API only allows building placeholder expressions and the runtime
only knows how to evaluate encoded PlaceholderExpressions.
"""
def __init__(self, expected_type: Optional[type[Any]]):
"""Creates a new Placeholder. Consider this private.
Args:
expected_type: The Python type (Union types are allowed) that this
Placeholder will evaluate to. None means that we don't know the type.
"""
self.expected_type = expected_type
def __deepcopy__(self, memo):
# Placeholders are immutable. While nobody should (want to) invoke deepcopy
# on a placeholder itself, when they're being cloned as part of a larger
# deepcopy operation, it is safe to just return the same instance.
return self
def _is_maybe_proto_valued(self) -> bool:
"""True if the Placeholder might evaluate to a proto."""
return _is_maybe_subclass(self.expected_type, message.Message)
# Functions that allow the Tflex DSL user to apply standard Python operators
# on a Placeholder, obtaining new Placeholders for that expression.
def __getitem__(self, key: Union[int, str]) -> Placeholder:
if isinstance(key, str) and self._is_maybe_proto_valued():
return _ProtoOperator(self, proto_field_path=[f'[{key!r}]'])
return _IndexOperator(self, key, is_proto=self._is_maybe_proto_valued())
def __getattr__(self, field_name: str) -> Placeholder:
if not field_name.startswith('__') and self._is_maybe_proto_valued():
return _ProtoOperator(self, proto_field_path=[f'.{field_name}'])
return super().__getattribute__(field_name)
def __add__(self, right: Union[str, Placeholder]) -> _ConcatOperator:
return _ConcatOperator([self, right])
def __radd__(self, left: str) -> _ConcatOperator:
return _ConcatOperator([left, self])
def __eq__(self, other: ValueLikeType) -> 'Predicate':
# https://github.com/PyCQA/pylint/issues/5857 pylint: disable=too-many-function-args
return _ComparisonPredicate(_CompareOp.EQUAL, self, other)
def __ne__(self, other: ValueLikeType) -> 'Predicate':
return logical_not(self == other)
def __lt__(self, other: ValueLikeType) -> 'Predicate':
# https://github.com/PyCQA/pylint/issues/5857 pylint: disable=too-many-function-args
return _ComparisonPredicate(_CompareOp.LESS_THAN, self, other)
def __le__(self, other: ValueLikeType) -> 'Predicate':
return logical_not(self > other)
def __gt__(self, other: ValueLikeType) -> 'Predicate':
# https://github.com/PyCQA/pylint/issues/5857 pylint: disable=too-many-function-args
return _ComparisonPredicate(_CompareOp.GREATER_THAN, self, other)
def __ge__(self, other: ValueLikeType) -> 'Predicate':
return logical_not(self < other)
# Additional functions that Tflex DSL users can apply to their Placeholders,
# obtaining new Placeholders that represent these transformations.
def __iter__(self) -> Iterator[Any]:
raise RuntimeError(
'Iterating over a placeholder is not supported. '
'Did you miss the ending `,` in your tuple?'
)
def b64encode(self, url_safe: bool = True) -> _Base64EncodeOperator:
"""Encodes the value with URL-safe Base64 encoding."""
return _Base64EncodeOperator(self, url_safe)
def serialize(
self,
serialization_format: ProtoSerializationFormat,
) -> _ProtoOperator:
"""Serializes the proto-valued placeholder using the provided format.
Args:
serialization_format: The format of how the proto is serialized.
Returns:
A placeholder representing the serialized proto value.
"""
assert self._is_maybe_proto_valued()
return _ProtoOperator(self, [], serialization_format)
# TODO(pke) Move this down to only the sub-classes that really support it, if
# pytype allows.
def serialize_list(
self,
serialization_format: ListSerializationFormat,
) -> Placeholder:
"""Serializes a list-valued placeholder to JSON or comma-separated string.
Here list value includes repeated proto field. This function only
supports primitive type list element (a.k.a bool, int, float or str) at the
moment; throws runtime error otherwise.
Args:
serialization_format: The format of how the list is serialized.
Returns:
A placeholder representing the serialized list.
"""
return _ListSerializationOperator(self, serialization_format)
def encode(
self, component_spec: Optional[type['types.ComponentSpec']] = None
) -> placeholder_pb2.PlaceholderExpression:
"""Do not call this as a Tflex user.
Encodes the Placeholder for later eval.
Args:
component_spec: A Tflex component spec whose PARAMETERS field will be used
to determine the proto types of its inputs/outputs/parameters. This
allows the encoded placeholder to include the proto descriptors.
Returns:
An encoded PlaceholderExpression, which when evaluated later at pipeline
runtime will result in the value represented by this Placeholder.
"""
raise NotImplementedError()
def traverse(self) -> Iterator[Placeholder]:
"""Yields all placeholders under and including this one."""
yield self
The provided code snippet includes necessary dependencies for implementing the `join` function. Write a Python function `def join( placeholders: Sequence[Union[str, Placeholder]], separator: str = '', ) -> Union[str, Placeholder]` to solve the following problem:
Joins a list consisting of placeholders and strings using separator. Returns an empty string if placeholders is empty. Args: placeholders: List of placeholders and/or strings. separator: The separator to use when joining the passed in values. Returns: A Placeholder representing the concatenation of all elements passed in, or a string in the case that no element was a Placeholder instance.
Here is the function:
def join(
placeholders: Sequence[Union[str, Placeholder]],
separator: str = '',
) -> Union[str, Placeholder]:
"""Joins a list consisting of placeholders and strings using separator.
Returns an empty string if placeholders is empty.
Args:
placeholders: List of placeholders and/or strings.
separator: The separator to use when joining the passed in values.
Returns:
A Placeholder representing the concatenation of all elements passed in, or
a string in the case that no element was a Placeholder instance.
"""
if not placeholders:
return ''
def joiner(a, b):
if separator:
return a + separator + b
return a + b
return functools.reduce(joiner, placeholders) | Joins a list consisting of placeholders and strings using separator. Returns an empty string if placeholders is empty. Args: placeholders: List of placeholders and/or strings. separator: The separator to use when joining the passed in values. Returns: A Placeholder representing the concatenation of all elements passed in, or a string in the case that no element was a Placeholder instance. |
166,499 | from __future__ import annotations
import abc
import enum
import functools
import types
import typing
from typing import Any, Iterator, Mapping, Optional, Sequence, Union
import attr
from tfx.proto.orchestration import placeholder_pb2
from tfx.utils import proto_utils
from google.protobuf import message
class Placeholder(abc.ABC):
"""A placeholder value computed based on a tree of Placeholders and operators.
This is the base class of the Python placeholder API. It allows users of the
Tflex DSL to construct somewhat complex expressions with a convenient Python
API (e.g. using the + operator for string concatenation). Every Placeholder
instance represents an expression with certain (future) inputs that will yield
a value of the `expected_type` during pipeline execution.
Placeholder instances are immutable. (There is one, very well controlled
exception in ChannelWrappedPlaceholder.set_key(), but the way it is called
still allows users to treat placeholders as immutable.) Each Placeholder
instance represents a tree of sub-expressions, which are also immutable, so
the entire tree is immutable. Thus, newly created Placeholder instances can
can safely reference any pre-existing Placeholder instances (including their
entire sub-trees) without having to worry about them being mutated. Any given
Placeholder could be referenced by multiple parents.
The ultimate purpose of a Placeholder expression tree is to be encoded into a
PlaceholderExpression proto, which then becomes part of the intermediate
representation (IR) shipped to the orchestrator for pipeline execution. So
this Python API only allows building placeholder expressions and the runtime
only knows how to evaluate encoded PlaceholderExpressions.
"""
def __init__(self, expected_type: Optional[type[Any]]):
"""Creates a new Placeholder. Consider this private.
Args:
expected_type: The Python type (Union types are allowed) that this
Placeholder will evaluate to. None means that we don't know the type.
"""
self.expected_type = expected_type
def __deepcopy__(self, memo):
# Placeholders are immutable. While nobody should (want to) invoke deepcopy
# on a placeholder itself, when they're being cloned as part of a larger
# deepcopy operation, it is safe to just return the same instance.
return self
def _is_maybe_proto_valued(self) -> bool:
"""True if the Placeholder might evaluate to a proto."""
return _is_maybe_subclass(self.expected_type, message.Message)
# Functions that allow the Tflex DSL user to apply standard Python operators
# on a Placeholder, obtaining new Placeholders for that expression.
def __getitem__(self, key: Union[int, str]) -> Placeholder:
if isinstance(key, str) and self._is_maybe_proto_valued():
return _ProtoOperator(self, proto_field_path=[f'[{key!r}]'])
return _IndexOperator(self, key, is_proto=self._is_maybe_proto_valued())
def __getattr__(self, field_name: str) -> Placeholder:
if not field_name.startswith('__') and self._is_maybe_proto_valued():
return _ProtoOperator(self, proto_field_path=[f'.{field_name}'])
return super().__getattribute__(field_name)
def __add__(self, right: Union[str, Placeholder]) -> _ConcatOperator:
return _ConcatOperator([self, right])
def __radd__(self, left: str) -> _ConcatOperator:
return _ConcatOperator([left, self])
def __eq__(self, other: ValueLikeType) -> 'Predicate':
# https://github.com/PyCQA/pylint/issues/5857 pylint: disable=too-many-function-args
return _ComparisonPredicate(_CompareOp.EQUAL, self, other)
def __ne__(self, other: ValueLikeType) -> 'Predicate':
return logical_not(self == other)
def __lt__(self, other: ValueLikeType) -> 'Predicate':
# https://github.com/PyCQA/pylint/issues/5857 pylint: disable=too-many-function-args
return _ComparisonPredicate(_CompareOp.LESS_THAN, self, other)
def __le__(self, other: ValueLikeType) -> 'Predicate':
return logical_not(self > other)
def __gt__(self, other: ValueLikeType) -> 'Predicate':
# https://github.com/PyCQA/pylint/issues/5857 pylint: disable=too-many-function-args
return _ComparisonPredicate(_CompareOp.GREATER_THAN, self, other)
def __ge__(self, other: ValueLikeType) -> 'Predicate':
return logical_not(self < other)
# Additional functions that Tflex DSL users can apply to their Placeholders,
# obtaining new Placeholders that represent these transformations.
def __iter__(self) -> Iterator[Any]:
raise RuntimeError(
'Iterating over a placeholder is not supported. '
'Did you miss the ending `,` in your tuple?'
)
def b64encode(self, url_safe: bool = True) -> _Base64EncodeOperator:
"""Encodes the value with URL-safe Base64 encoding."""
return _Base64EncodeOperator(self, url_safe)
def serialize(
self,
serialization_format: ProtoSerializationFormat,
) -> _ProtoOperator:
"""Serializes the proto-valued placeholder using the provided format.
Args:
serialization_format: The format of how the proto is serialized.
Returns:
A placeholder representing the serialized proto value.
"""
assert self._is_maybe_proto_valued()
return _ProtoOperator(self, [], serialization_format)
# TODO(pke) Move this down to only the sub-classes that really support it, if
# pytype allows.
def serialize_list(
self,
serialization_format: ListSerializationFormat,
) -> Placeholder:
"""Serializes a list-valued placeholder to JSON or comma-separated string.
Here list value includes repeated proto field. This function only
supports primitive type list element (a.k.a bool, int, float or str) at the
moment; throws runtime error otherwise.
Args:
serialization_format: The format of how the list is serialized.
Returns:
A placeholder representing the serialized list.
"""
return _ListSerializationOperator(self, serialization_format)
def encode(
self, component_spec: Optional[type['types.ComponentSpec']] = None
) -> placeholder_pb2.PlaceholderExpression:
"""Do not call this as a Tflex user.
Encodes the Placeholder for later eval.
Args:
component_spec: A Tflex component spec whose PARAMETERS field will be used
to determine the proto types of its inputs/outputs/parameters. This
allows the encoded placeholder to include the proto descriptors.
Returns:
An encoded PlaceholderExpression, which when evaluated later at pipeline
runtime will result in the value represented by this Placeholder.
"""
raise NotImplementedError()
def traverse(self) -> Iterator[Placeholder]:
"""Yields all placeholders under and including this one."""
yield self
class _JoinPathOperator(Placeholder):
"""JoinPath Operator runs os.path.join() on the given arguments.
Do not instantiate directly, use ph.join_path() instead.
"""
def __init__(
self,
*args: str | Placeholder,
):
super().__init__(expected_type=str)
self._args = args
def traverse(self) -> Iterator[Placeholder]:
yield self
for arg in self._args:
if isinstance(arg, Placeholder):
yield from arg.traverse()
def encode(
self, component_spec: Optional[type['types.ComponentSpec']] = None
) -> placeholder_pb2.PlaceholderExpression:
result = placeholder_pb2.PlaceholderExpression()
op = result.operator.join_path_op
for arg in self._args:
op.expressions.append(encode_value_like(arg, component_spec))
return result
The provided code snippet includes necessary dependencies for implementing the `join_path` function. Write a Python function `def join_path( *args: str | Placeholder, ) -> Placeholder` to solve the following problem:
Runs os.path.join() on placeholder arguments. Args: *args: (Placeholders that resolve to) strings which will be passed to os.path.join(). Returns: A placeholder that will resolve to the joined path.
Here is the function:
def join_path(
*args: str | Placeholder,
) -> Placeholder:
"""Runs os.path.join() on placeholder arguments.
Args:
*args: (Placeholders that resolve to) strings which will be passed to
os.path.join().
Returns:
A placeholder that will resolve to the joined path.
"""
return _JoinPathOperator(*args) | Runs os.path.join() on placeholder arguments. Args: *args: (Placeholders that resolve to) strings which will be passed to os.path.join(). Returns: A placeholder that will resolve to the joined path. |
166,500 | from __future__ import annotations
import abc
import enum
import functools
import types
import typing
from typing import Any, Iterator, Mapping, Optional, Sequence, Union
import attr
from tfx.proto.orchestration import placeholder_pb2
from tfx.utils import proto_utils
from google.protobuf import message
ValueLikeType = Union[ValueType, 'Placeholder']
class Placeholder(abc.ABC):
"""A placeholder value computed based on a tree of Placeholders and operators.
This is the base class of the Python placeholder API. It allows users of the
Tflex DSL to construct somewhat complex expressions with a convenient Python
API (e.g. using the + operator for string concatenation). Every Placeholder
instance represents an expression with certain (future) inputs that will yield
a value of the `expected_type` during pipeline execution.
Placeholder instances are immutable. (There is one, very well controlled
exception in ChannelWrappedPlaceholder.set_key(), but the way it is called
still allows users to treat placeholders as immutable.) Each Placeholder
instance represents a tree of sub-expressions, which are also immutable, so
the entire tree is immutable. Thus, newly created Placeholder instances can
can safely reference any pre-existing Placeholder instances (including their
entire sub-trees) without having to worry about them being mutated. Any given
Placeholder could be referenced by multiple parents.
The ultimate purpose of a Placeholder expression tree is to be encoded into a
PlaceholderExpression proto, which then becomes part of the intermediate
representation (IR) shipped to the orchestrator for pipeline execution. So
this Python API only allows building placeholder expressions and the runtime
only knows how to evaluate encoded PlaceholderExpressions.
"""
def __init__(self, expected_type: Optional[type[Any]]):
"""Creates a new Placeholder. Consider this private.
Args:
expected_type: The Python type (Union types are allowed) that this
Placeholder will evaluate to. None means that we don't know the type.
"""
self.expected_type = expected_type
def __deepcopy__(self, memo):
# Placeholders are immutable. While nobody should (want to) invoke deepcopy
# on a placeholder itself, when they're being cloned as part of a larger
# deepcopy operation, it is safe to just return the same instance.
return self
def _is_maybe_proto_valued(self) -> bool:
"""True if the Placeholder might evaluate to a proto."""
return _is_maybe_subclass(self.expected_type, message.Message)
# Functions that allow the Tflex DSL user to apply standard Python operators
# on a Placeholder, obtaining new Placeholders for that expression.
def __getitem__(self, key: Union[int, str]) -> Placeholder:
if isinstance(key, str) and self._is_maybe_proto_valued():
return _ProtoOperator(self, proto_field_path=[f'[{key!r}]'])
return _IndexOperator(self, key, is_proto=self._is_maybe_proto_valued())
def __getattr__(self, field_name: str) -> Placeholder:
if not field_name.startswith('__') and self._is_maybe_proto_valued():
return _ProtoOperator(self, proto_field_path=[f'.{field_name}'])
return super().__getattribute__(field_name)
def __add__(self, right: Union[str, Placeholder]) -> _ConcatOperator:
return _ConcatOperator([self, right])
def __radd__(self, left: str) -> _ConcatOperator:
return _ConcatOperator([left, self])
def __eq__(self, other: ValueLikeType) -> 'Predicate':
# https://github.com/PyCQA/pylint/issues/5857 pylint: disable=too-many-function-args
return _ComparisonPredicate(_CompareOp.EQUAL, self, other)
def __ne__(self, other: ValueLikeType) -> 'Predicate':
return logical_not(self == other)
def __lt__(self, other: ValueLikeType) -> 'Predicate':
# https://github.com/PyCQA/pylint/issues/5857 pylint: disable=too-many-function-args
return _ComparisonPredicate(_CompareOp.LESS_THAN, self, other)
def __le__(self, other: ValueLikeType) -> 'Predicate':
return logical_not(self > other)
def __gt__(self, other: ValueLikeType) -> 'Predicate':
# https://github.com/PyCQA/pylint/issues/5857 pylint: disable=too-many-function-args
return _ComparisonPredicate(_CompareOp.GREATER_THAN, self, other)
def __ge__(self, other: ValueLikeType) -> 'Predicate':
return logical_not(self < other)
# Additional functions that Tflex DSL users can apply to their Placeholders,
# obtaining new Placeholders that represent these transformations.
def __iter__(self) -> Iterator[Any]:
raise RuntimeError(
'Iterating over a placeholder is not supported. '
'Did you miss the ending `,` in your tuple?'
)
def b64encode(self, url_safe: bool = True) -> _Base64EncodeOperator:
"""Encodes the value with URL-safe Base64 encoding."""
return _Base64EncodeOperator(self, url_safe)
def serialize(
self,
serialization_format: ProtoSerializationFormat,
) -> _ProtoOperator:
"""Serializes the proto-valued placeholder using the provided format.
Args:
serialization_format: The format of how the proto is serialized.
Returns:
A placeholder representing the serialized proto value.
"""
assert self._is_maybe_proto_valued()
return _ProtoOperator(self, [], serialization_format)
# TODO(pke) Move this down to only the sub-classes that really support it, if
# pytype allows.
def serialize_list(
self,
serialization_format: ListSerializationFormat,
) -> Placeholder:
"""Serializes a list-valued placeholder to JSON or comma-separated string.
Here list value includes repeated proto field. This function only
supports primitive type list element (a.k.a bool, int, float or str) at the
moment; throws runtime error otherwise.
Args:
serialization_format: The format of how the list is serialized.
Returns:
A placeholder representing the serialized list.
"""
return _ListSerializationOperator(self, serialization_format)
def encode(
self, component_spec: Optional[type['types.ComponentSpec']] = None
) -> placeholder_pb2.PlaceholderExpression:
"""Do not call this as a Tflex user.
Encodes the Placeholder for later eval.
Args:
component_spec: A Tflex component spec whose PARAMETERS field will be used
to determine the proto types of its inputs/outputs/parameters. This
allows the encoded placeholder to include the proto descriptors.
Returns:
An encoded PlaceholderExpression, which when evaluated later at pipeline
runtime will result in the value represented by this Placeholder.
"""
raise NotImplementedError()
def traverse(self) -> Iterator[Placeholder]:
"""Yields all placeholders under and including this one."""
yield self
class DictPlaceholder(Placeholder):
"""Dict of multiple Placeholders. None values are dropped.
Prefer to use ph.make_dict() to create DictPlaceholder.
"""
def __init__(
self,
entries: Sequence[
tuple[Union[str, Placeholder], Optional[ValueLikeType]]
],
):
"""Initializes the class. Consider this private."""
super().__init__(expected_type=dict)
self._entries = entries
def __add__(self, right: DictPlaceholder) -> DictPlaceholder:
raise NotImplementedError('Add operator not supported for DictPlaceholders')
def __radd__(self, left: DictPlaceholder) -> DictPlaceholder:
raise NotImplementedError('Add operator not supported for DictPlaceholders')
def traverse(self) -> Iterator[Placeholder]:
"""Yields all placeholders under and including this one."""
yield from super().traverse()
for key, value in self._entries:
if isinstance(key, Placeholder):
yield from key.traverse()
if isinstance(value, Placeholder):
yield from value.traverse()
def encode(
self, component_spec: Optional[type['types.ComponentSpec']] = None
) -> placeholder_pb2.PlaceholderExpression:
result = placeholder_pb2.PlaceholderExpression()
result.operator.make_dict_op.SetInParent()
entries = result.operator.make_dict_op.entries
for key, value in self._entries:
if value is None:
continue # Drop None values
entries.add(
key=encode_value_like(key, component_spec),
value=encode_value_like(value, component_spec),
)
return result
The provided code snippet includes necessary dependencies for implementing the `make_dict` function. Write a Python function `def make_dict( entries: Union[ Mapping[str, Union[ValueLikeType, None]], Sequence[tuple[Union[str, Placeholder], Union[ValueLikeType, None]]], ], ) -> DictPlaceholder` to solve the following problem:
Returns a DictPlaceholder representing a dict of input placeholders. Args: entries: A mapping that will become the final dict after running placeholder resolution on each of the values. Values that resolve to None are dropped. If you also want placeholders in the keys, you need to pass the dict as a sequence of (k,v) tuples, whereby the key placeholder must evaluate to a string. Returns: A placeholder that will resolve to a dict with the given entries.
Here is the function:
def make_dict(
entries: Union[
Mapping[str, Union[ValueLikeType, None]],
Sequence[tuple[Union[str, Placeholder], Union[ValueLikeType, None]]],
],
) -> DictPlaceholder:
"""Returns a DictPlaceholder representing a dict of input placeholders.
Args:
entries: A mapping that will become the final dict after running placeholder
resolution on each of the values. Values that resolve to None are dropped.
If you also want placeholders in the keys, you need to pass the dict as a
sequence of (k,v) tuples, whereby the key placeholder must evaluate to a
string.
Returns:
A placeholder that will resolve to a dict with the given entries.
"""
if isinstance(entries, Mapping):
entries = entries.items()
return DictPlaceholder(entries) | Returns a DictPlaceholder representing a dict of input placeholders. Args: entries: A mapping that will become the final dict after running placeholder resolution on each of the values. Values that resolve to None are dropped. If you also want placeholders in the keys, you need to pass the dict as a sequence of (k,v) tuples, whereby the key placeholder must evaluate to a string. Returns: A placeholder that will resolve to a dict with the given entries. |
166,501 | from __future__ import annotations
import abc
import enum
import functools
import types
import typing
from typing import Any, Iterator, Mapping, Optional, Sequence, Union
import attr
from tfx.proto.orchestration import placeholder_pb2
from tfx.utils import proto_utils
from google.protobuf import message
The provided code snippet includes necessary dependencies for implementing the `_is_maybe_subclass` function. Write a Python function `def _is_maybe_subclass( test_type: Optional[type[Any]], parent_type: type[Any] ) -> bool` to solve the following problem:
Like issubclass(), but supports Union types on the sub-class side. Args: test_type: A sub-type to test. Can be a Union or a plain class. parent_type: A parent type (class, type or tuple of classes/types). Returns: True if the test_type is a sub-type of the parent_type. If the test_type is a Union, any of them is allowed. If it's None, returns True.
Here is the function:
def _is_maybe_subclass(
test_type: Optional[type[Any]], parent_type: type[Any]
) -> bool:
"""Like issubclass(), but supports Union types on the sub-class side.
Args:
test_type: A sub-type to test. Can be a Union or a plain class.
parent_type: A parent type (class, type or tuple of classes/types).
Returns:
True if the test_type is a sub-type of the parent_type. If the test_type is
a Union, any of them is allowed. If it's None, returns True.
"""
if test_type is None:
return True
if typing.get_origin(test_type) == Union:
return any(
_is_maybe_subclass(t, parent_type) for t in typing.get_args(test_type)
)
assert typing.get_origin(test_type) is None
return issubclass(test_type, parent_type) | Like issubclass(), but supports Union types on the sub-class side. Args: test_type: A sub-type to test. Can be a Union or a plain class. parent_type: A parent type (class, type or tuple of classes/types). Returns: True if the test_type is a sub-type of the parent_type. If the test_type is a Union, any of them is allowed. If it's None, returns True. |
166,502 | from __future__ import annotations
import abc
import enum
import functools
import types
import typing
from typing import Any, Iterator, Mapping, Optional, Sequence, Union
import attr
from tfx.proto.orchestration import placeholder_pb2
from tfx.utils import proto_utils
from google.protobuf import message
ValueLikeType = Union[ValueType, 'Placeholder']
class Placeholder(abc.ABC):
"""A placeholder value computed based on a tree of Placeholders and operators.
This is the base class of the Python placeholder API. It allows users of the
Tflex DSL to construct somewhat complex expressions with a convenient Python
API (e.g. using the + operator for string concatenation). Every Placeholder
instance represents an expression with certain (future) inputs that will yield
a value of the `expected_type` during pipeline execution.
Placeholder instances are immutable. (There is one, very well controlled
exception in ChannelWrappedPlaceholder.set_key(), but the way it is called
still allows users to treat placeholders as immutable.) Each Placeholder
instance represents a tree of sub-expressions, which are also immutable, so
the entire tree is immutable. Thus, newly created Placeholder instances can
can safely reference any pre-existing Placeholder instances (including their
entire sub-trees) without having to worry about them being mutated. Any given
Placeholder could be referenced by multiple parents.
The ultimate purpose of a Placeholder expression tree is to be encoded into a
PlaceholderExpression proto, which then becomes part of the intermediate
representation (IR) shipped to the orchestrator for pipeline execution. So
this Python API only allows building placeholder expressions and the runtime
only knows how to evaluate encoded PlaceholderExpressions.
"""
def __init__(self, expected_type: Optional[type[Any]]):
"""Creates a new Placeholder. Consider this private.
Args:
expected_type: The Python type (Union types are allowed) that this
Placeholder will evaluate to. None means that we don't know the type.
"""
self.expected_type = expected_type
def __deepcopy__(self, memo):
# Placeholders are immutable. While nobody should (want to) invoke deepcopy
# on a placeholder itself, when they're being cloned as part of a larger
# deepcopy operation, it is safe to just return the same instance.
return self
def _is_maybe_proto_valued(self) -> bool:
"""True if the Placeholder might evaluate to a proto."""
return _is_maybe_subclass(self.expected_type, message.Message)
# Functions that allow the Tflex DSL user to apply standard Python operators
# on a Placeholder, obtaining new Placeholders for that expression.
def __getitem__(self, key: Union[int, str]) -> Placeholder:
if isinstance(key, str) and self._is_maybe_proto_valued():
return _ProtoOperator(self, proto_field_path=[f'[{key!r}]'])
return _IndexOperator(self, key, is_proto=self._is_maybe_proto_valued())
def __getattr__(self, field_name: str) -> Placeholder:
if not field_name.startswith('__') and self._is_maybe_proto_valued():
return _ProtoOperator(self, proto_field_path=[f'.{field_name}'])
return super().__getattribute__(field_name)
def __add__(self, right: Union[str, Placeholder]) -> _ConcatOperator:
return _ConcatOperator([self, right])
def __radd__(self, left: str) -> _ConcatOperator:
return _ConcatOperator([left, self])
def __eq__(self, other: ValueLikeType) -> 'Predicate':
# https://github.com/PyCQA/pylint/issues/5857 pylint: disable=too-many-function-args
return _ComparisonPredicate(_CompareOp.EQUAL, self, other)
def __ne__(self, other: ValueLikeType) -> 'Predicate':
return logical_not(self == other)
def __lt__(self, other: ValueLikeType) -> 'Predicate':
# https://github.com/PyCQA/pylint/issues/5857 pylint: disable=too-many-function-args
return _ComparisonPredicate(_CompareOp.LESS_THAN, self, other)
def __le__(self, other: ValueLikeType) -> 'Predicate':
return logical_not(self > other)
def __gt__(self, other: ValueLikeType) -> 'Predicate':
# https://github.com/PyCQA/pylint/issues/5857 pylint: disable=too-many-function-args
return _ComparisonPredicate(_CompareOp.GREATER_THAN, self, other)
def __ge__(self, other: ValueLikeType) -> 'Predicate':
return logical_not(self < other)
# Additional functions that Tflex DSL users can apply to their Placeholders,
# obtaining new Placeholders that represent these transformations.
def __iter__(self) -> Iterator[Any]:
raise RuntimeError(
'Iterating over a placeholder is not supported. '
'Did you miss the ending `,` in your tuple?'
)
def b64encode(self, url_safe: bool = True) -> _Base64EncodeOperator:
"""Encodes the value with URL-safe Base64 encoding."""
return _Base64EncodeOperator(self, url_safe)
def serialize(
self,
serialization_format: ProtoSerializationFormat,
) -> _ProtoOperator:
"""Serializes the proto-valued placeholder using the provided format.
Args:
serialization_format: The format of how the proto is serialized.
Returns:
A placeholder representing the serialized proto value.
"""
assert self._is_maybe_proto_valued()
return _ProtoOperator(self, [], serialization_format)
# TODO(pke) Move this down to only the sub-classes that really support it, if
# pytype allows.
def serialize_list(
self,
serialization_format: ListSerializationFormat,
) -> Placeholder:
"""Serializes a list-valued placeholder to JSON or comma-separated string.
Here list value includes repeated proto field. This function only
supports primitive type list element (a.k.a bool, int, float or str) at the
moment; throws runtime error otherwise.
Args:
serialization_format: The format of how the list is serialized.
Returns:
A placeholder representing the serialized list.
"""
return _ListSerializationOperator(self, serialization_format)
def encode(
self, component_spec: Optional[type['types.ComponentSpec']] = None
) -> placeholder_pb2.PlaceholderExpression:
"""Do not call this as a Tflex user.
Encodes the Placeholder for later eval.
Args:
component_spec: A Tflex component spec whose PARAMETERS field will be used
to determine the proto types of its inputs/outputs/parameters. This
allows the encoded placeholder to include the proto descriptors.
Returns:
An encoded PlaceholderExpression, which when evaluated later at pipeline
runtime will result in the value represented by this Placeholder.
"""
raise NotImplementedError()
def traverse(self) -> Iterator[Placeholder]:
"""Yields all placeholders under and including this one."""
yield self
The provided code snippet includes necessary dependencies for implementing the `encode_value_like` function. Write a Python function `def encode_value_like( x: ValueLikeType, component_spec: Any = None ) -> placeholder_pb2.PlaceholderExpression` to solve the following problem:
Encodes x to a placeholder expression proto.
Here is the function:
def encode_value_like(
x: ValueLikeType, component_spec: Any = None
) -> placeholder_pb2.PlaceholderExpression:
"""Encodes x to a placeholder expression proto."""
if isinstance(x, Placeholder):
return x.encode(component_spec)
result = placeholder_pb2.PlaceholderExpression()
if isinstance(x, bool):
result.value.bool_value = x
elif isinstance(x, int):
result.value.int_value = x
elif isinstance(x, float):
result.value.double_value = x
elif isinstance(x, str):
result.value.string_value = x
else:
raise ValueError(f'x must be an int, float, str, or Placeholder. x: {x}')
return result | Encodes x to a placeholder expression proto. |
166,503 | from __future__ import annotations
from typing import Any, Optional
from tfx.dsl.placeholder import placeholder_base
from tfx.proto.orchestration import placeholder_pb2
class ArtifactPlaceholder(placeholder_base.Placeholder):
"""Artifact Placeholder represents an input or an output artifact.
Prefer to use ph.input(...) or ph.output(...) to create instances.
"""
def __init__(
self,
key: str,
is_input: bool,
index: Optional[int] = None,
):
"""Initializes the class. Consider this private."""
# This should be tfx.types.Artifact, but it can't be due to a circular
# dependency. See placeholder_base.py for details. TODO(b/191610358).
super().__init__(expected_type=None)
assert index is None or isinstance(index, int)
self._key = key
self._is_input = is_input
self._index = index
def is_input(self) -> bool:
return self._is_input
def is_output(self) -> bool:
return not self._is_input
def key(self) -> str:
return self._key
def uri(self) -> _ArtifactUriOperator:
return _ArtifactUriOperator(self)
def split_uri(self, split: str) -> _ArtifactUriOperator:
return _ArtifactUriOperator(self, split)
def value(self) -> _ArtifactValueOperator:
if self.is_output:
raise ValueError('Calling ph.output(..).value is not supported.')
return _ArtifactValueOperator(self)
def __getitem__(self, index: int) -> ArtifactPlaceholder:
assert self._index is None
return ArtifactPlaceholder(self._key, self._is_input, index)
def property(self, key: str) -> _PropertyOperator:
return _PropertyOperator(self, key)
def custom_property(self, key: str) -> _PropertyOperator:
return _PropertyOperator(self, key, is_custom_property=True)
def encode(
self, component_spec: Any = None
) -> placeholder_pb2.PlaceholderExpression:
del component_spec
result = placeholder_pb2.PlaceholderExpression()
result.operator.index_op.index = self._index or 0
artifact_result = result.operator.index_op.expression
artifact_result.placeholder.type = (
placeholder_pb2.Placeholder.INPUT_ARTIFACT
if self._is_input
else placeholder_pb2.Placeholder.OUTPUT_ARTIFACT
)
if self._key:
artifact_result.placeholder.key = self._key
return result
The provided code snippet includes necessary dependencies for implementing the `input` function. Write a Python function `def input(key: str) -> ArtifactPlaceholder` to solve the following problem:
Returns a Placeholder that represents an input artifact. Args: key: The key of the input artifact. Returns: A Placeholder that supports 1. Rendering the whole MLMD artifact proto as text_format. Example: input('model') 2. Accessing a specific index using [index], if multiple artifacts are associated with the given key. If not specified, default to the first artifact. Example: input('model')[0] 3. Getting the URI of an artifact through .uri property. Example: input('model').uri or input('model')[0].uri 4. Getting the URI of a specific split of an artifact using .split_uri(split_name) method. Example: input('examples')[0].split_uri('train') 5. Getting the value of a primitive artifact through .value property. Example: input('primitive').value 6. Concatenating with other placeholders or strings. Example: input('model').uri + '/model/' + exec_property('version')
Here is the function:
def input(key: str) -> ArtifactPlaceholder: # pylint: disable=redefined-builtin
"""Returns a Placeholder that represents an input artifact.
Args:
key: The key of the input artifact.
Returns:
A Placeholder that supports
1. Rendering the whole MLMD artifact proto as text_format.
Example: input('model')
2. Accessing a specific index using [index], if multiple artifacts are
associated with the given key. If not specified, default to the first
artifact.
Example: input('model')[0]
3. Getting the URI of an artifact through .uri property.
Example: input('model').uri or input('model')[0].uri
4. Getting the URI of a specific split of an artifact using
.split_uri(split_name) method.
Example: input('examples')[0].split_uri('train')
5. Getting the value of a primitive artifact through .value property.
Example: input('primitive').value
6. Concatenating with other placeholders or strings.
Example: input('model').uri + '/model/' + exec_property('version')
"""
return ArtifactPlaceholder(key, is_input=True) | Returns a Placeholder that represents an input artifact. Args: key: The key of the input artifact. Returns: A Placeholder that supports 1. Rendering the whole MLMD artifact proto as text_format. Example: input('model') 2. Accessing a specific index using [index], if multiple artifacts are associated with the given key. If not specified, default to the first artifact. Example: input('model')[0] 3. Getting the URI of an artifact through .uri property. Example: input('model').uri or input('model')[0].uri 4. Getting the URI of a specific split of an artifact using .split_uri(split_name) method. Example: input('examples')[0].split_uri('train') 5. Getting the value of a primitive artifact through .value property. Example: input('primitive').value 6. Concatenating with other placeholders or strings. Example: input('model').uri + '/model/' + exec_property('version') |
166,504 | from __future__ import annotations
from typing import Any, Optional
from tfx.dsl.placeholder import placeholder_base
from tfx.proto.orchestration import placeholder_pb2
class ArtifactPlaceholder(placeholder_base.Placeholder):
"""Artifact Placeholder represents an input or an output artifact.
Prefer to use ph.input(...) or ph.output(...) to create instances.
"""
def __init__(
self,
key: str,
is_input: bool,
index: Optional[int] = None,
):
"""Initializes the class. Consider this private."""
# This should be tfx.types.Artifact, but it can't be due to a circular
# dependency. See placeholder_base.py for details. TODO(b/191610358).
super().__init__(expected_type=None)
assert index is None or isinstance(index, int)
self._key = key
self._is_input = is_input
self._index = index
def is_input(self) -> bool:
return self._is_input
def is_output(self) -> bool:
return not self._is_input
def key(self) -> str:
return self._key
def uri(self) -> _ArtifactUriOperator:
return _ArtifactUriOperator(self)
def split_uri(self, split: str) -> _ArtifactUriOperator:
return _ArtifactUriOperator(self, split)
def value(self) -> _ArtifactValueOperator:
if self.is_output:
raise ValueError('Calling ph.output(..).value is not supported.')
return _ArtifactValueOperator(self)
def __getitem__(self, index: int) -> ArtifactPlaceholder:
assert self._index is None
return ArtifactPlaceholder(self._key, self._is_input, index)
def property(self, key: str) -> _PropertyOperator:
return _PropertyOperator(self, key)
def custom_property(self, key: str) -> _PropertyOperator:
return _PropertyOperator(self, key, is_custom_property=True)
def encode(
self, component_spec: Any = None
) -> placeholder_pb2.PlaceholderExpression:
del component_spec
result = placeholder_pb2.PlaceholderExpression()
result.operator.index_op.index = self._index or 0
artifact_result = result.operator.index_op.expression
artifact_result.placeholder.type = (
placeholder_pb2.Placeholder.INPUT_ARTIFACT
if self._is_input
else placeholder_pb2.Placeholder.OUTPUT_ARTIFACT
)
if self._key:
artifact_result.placeholder.key = self._key
return result
The provided code snippet includes necessary dependencies for implementing the `output` function. Write a Python function `def output(key: str) -> ArtifactPlaceholder` to solve the following problem:
Returns a Placeholder that represents an output artifact. It is the same as input(...) function, except it is for output artifacts. Args: key: The key of the output artifact. Returns: A Placeholder that supports 1. Rendering the whole artifact as text_format. Example: output('model') 2. Accessing a specific index using [index], if multiple artifacts are associated with the given key. If not specified, default to the first artifact. Example: output('model')[0] 3. Getting the URI of an artifact through .uri property. Example: output('model').uri or output('model')[0].uri 4. Getting the URI of a specific split of an artifact using .split_uri(split_name) method. Example: output('examples')[0].split_uri('train') 5. Getting the value of a primitive artifact through .value property. Example: output('primitive').value 6. Concatenating with other placeholders or strings. Example: output('model').uri + '/model/' + exec_property('version')
Here is the function:
def output(key: str) -> ArtifactPlaceholder:
"""Returns a Placeholder that represents an output artifact.
It is the same as input(...) function, except it is for output artifacts.
Args:
key: The key of the output artifact.
Returns:
A Placeholder that supports
1. Rendering the whole artifact as text_format.
Example: output('model')
2. Accessing a specific index using [index], if multiple artifacts are
associated with the given key. If not specified, default to the first
artifact.
Example: output('model')[0]
3. Getting the URI of an artifact through .uri property.
Example: output('model').uri or output('model')[0].uri
4. Getting the URI of a specific split of an artifact using
.split_uri(split_name) method.
Example: output('examples')[0].split_uri('train')
5. Getting the value of a primitive artifact through .value property.
Example: output('primitive').value
6. Concatenating with other placeholders or strings.
Example: output('model').uri + '/model/' + exec_property('version')
"""
return ArtifactPlaceholder(key, is_input=False) | Returns a Placeholder that represents an output artifact. It is the same as input(...) function, except it is for output artifacts. Args: key: The key of the output artifact. Returns: A Placeholder that supports 1. Rendering the whole artifact as text_format. Example: output('model') 2. Accessing a specific index using [index], if multiple artifacts are associated with the given key. If not specified, default to the first artifact. Example: output('model')[0] 3. Getting the URI of an artifact through .uri property. Example: output('model').uri or output('model')[0].uri 4. Getting the URI of a specific split of an artifact using .split_uri(split_name) method. Example: output('examples')[0].split_uri('train') 5. Getting the value of a primitive artifact through .value property. Example: output('primitive').value 6. Concatenating with other placeholders or strings. Example: output('model').uri + '/model/' + exec_property('version') |
166,505 | import abc
import copy
from typing import Any, Dict, Optional, Type
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_executor
from tfx.dsl.components.base import executor_spec as executor_spec_module
from tfx.dsl.context_managers import dsl_context_registry
from tfx.dsl.experimental.node_execution_options import utils
from tfx.utils import deprecation_utils
from tfx.utils import doc_controls
from tfx.utils import json_utils
from tfx.utils import name_utils
The provided code snippet includes necessary dependencies for implementing the `_abstract_property` function. Write a Python function `def _abstract_property() -> Any` to solve the following problem:
Returns an abstract property for use in an ABC abstract class.
Here is the function:
def _abstract_property() -> Any:
"""Returns an abstract property for use in an ABC abstract class."""
return abc.abstractmethod(lambda: None) | Returns an abstract property for use in an ABC abstract class. |
166,506 | import os
from typing import Any, Dict, List
import absl
from tfx import types
from tfx.dsl.io import fileio
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.types import artifact_utils
from tfx.types import channel_utils
The provided code snippet includes necessary dependencies for implementing the `_generate_output_uri` function. Write a Python function `def _generate_output_uri(base_output_dir: str, name: str, execution_id: int, is_single_artifact: bool = True, index: int = 0) -> str` to solve the following problem:
Generate uri for output artifact.
Here is the function:
def _generate_output_uri(base_output_dir: str,
name: str,
execution_id: int,
is_single_artifact: bool = True,
index: int = 0) -> str:
"""Generate uri for output artifact."""
if is_single_artifact:
# TODO(b/145680633): Consider differentiating different types of uris.
return os.path.join(base_output_dir, name, str(execution_id))
return os.path.join(base_output_dir, name, str(execution_id), str(index)) | Generate uri for output artifact. |
166,507 | import os
from typing import Any, Dict, List
import absl
from tfx import types
from tfx.dsl.io import fileio
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.types import artifact_utils
from tfx.types import channel_utils
The provided code snippet includes necessary dependencies for implementing the `_prepare_output_paths` function. Write a Python function `def _prepare_output_paths(artifact: types.Artifact)` to solve the following problem:
Create output directories for output artifact.
Here is the function:
def _prepare_output_paths(artifact: types.Artifact):
"""Create output directories for output artifact."""
if fileio.exists(artifact.uri):
absl.logging.warning('Output artifact uri %s already exists', artifact.uri)
# TODO(b/158689199): We currently simply return as a short-term workaround
# to unblock execution retires. A comprehensive solution to guarantee
# idempotent executions is needed.
return
# TODO(b/147242148): Introduce principled artifact structure (directory
# or file) definition.
if isinstance(artifact, types.ValueArtifact):
artifact_dir = os.path.dirname(artifact.uri)
else:
artifact_dir = artifact.uri
# TODO(zhitaoli): Consider refactoring this out into something
# which can handle permission bits.
absl.logging.debug('Creating output artifact uri %s as directory',
artifact_dir)
fileio.makedirs(artifact_dir) | Create output directories for output artifact. |
166,508 | from typing import Any, Dict, List, Optional, Type, Union
import absl
from tfx import types
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_node
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.types import channel_utils
from tfx.utils import doc_controls
from ml_metadata import errors
from ml_metadata.proto import metadata_store_pb2
IMPORT_RESULT_KEY = 'result'
def _prepare_artifact(
metadata_handle: metadata.Metadata,
uri: str,
properties: Dict[str, Any],
custom_properties: Dict[str, Any],
reimport: bool,
output_artifact_class: Type[types.Artifact],
mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType],
) -> types.Artifact:
"""Prepares the Importer's output artifact.
If there is already an artifact in MLMD with the same URI, properties /
custom properties, and type, that artifact will be reused unless the
`reimport` argument is set to True.
Args:
metadata_handle: The handler of MLMD.
uri: The uri of the artifact.
properties: The properties of the artifact, given as a dictionary from
string keys to integer / string values. Must conform to the declared
properties of the destination channel's output type.
custom_properties: The custom properties of the artifact, given as a
dictionary from string keys to integer / string values.
reimport: If set to True, will register a new artifact even if it already
exists in the database.
output_artifact_class: The class of the output artifact.
mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.
Returns:
An Artifact object representing the imported artifact.
"""
absl.logging.info(
'Processing source uri: %s, properties: %s, custom_properties: %s' %
(uri, properties, custom_properties))
# Check types of custom properties.
for key, value in custom_properties.items():
if not isinstance(value, (int, str, bytes)):
raise ValueError(
('Custom property value for key %r must be a string or integer '
'(got %r instead)') % (key, value))
unfiltered_previous_artifacts = metadata_handle.get_artifacts_by_uri(uri)
new_artifact_type = False
if mlmd_artifact_type and not mlmd_artifact_type.id:
try:
mlmd_artifact_type = metadata_handle.store.get_artifact_type(
mlmd_artifact_type.name
)
except errors.NotFoundError:
# Artifact type is not registered, so it must be new.
new_artifact_type = True
result = output_artifact_class(mlmd_artifact_type)
result.uri = uri
result.is_external = True
_set_artifact_properties(result, properties, custom_properties)
# Only consider previous artifacts as candidates to reuse if:
# * reimport is False
# * the given artifact type is recognized by MLMD
# * the type and properties match the imported artifact
if not reimport and not new_artifact_type:
previous_artifacts = []
for candidate_mlmd_artifact in unfiltered_previous_artifacts:
if mlmd_artifact_type and candidate_mlmd_artifact.type_id != mlmd_artifact_type.id:
# If mlmd_artifact_type is defined, don't reuse existing artifacts if
# they don't match the given type.
continue
is_candidate = True
candidate_artifact = output_artifact_class(mlmd_artifact_type)
candidate_artifact.set_mlmd_artifact(candidate_mlmd_artifact)
for key, value in properties.items():
if (
not candidate_artifact.has_property(key)
or getattr(candidate_artifact, key) != value
):
is_candidate = False
break
for key, value in custom_properties.items():
if isinstance(value, int):
if (
not candidate_artifact.has_custom_property(key)
or candidate_artifact.get_int_custom_property(key) != value
):
is_candidate = False
break
elif isinstance(value, (str, bytes)):
if (
not candidate_artifact.has_custom_property(key)
or candidate_artifact.get_string_custom_property(key) != value
):
is_candidate = False
break
if is_candidate:
previous_artifacts.append(candidate_mlmd_artifact)
# If a registered artifact has the same uri and properties and the user does
# not explicitly ask for reimport, reuse that artifact.
if previous_artifacts:
absl.logging.info('Reusing existing artifact')
result.set_mlmd_artifact(
max(previous_artifacts, key=lambda m: m.create_time_since_epoch))
return result
The provided code snippet includes necessary dependencies for implementing the `generate_output_dict` function. Write a Python function `def generate_output_dict( metadata_handle: metadata.Metadata, uri: str, properties: Dict[str, Any], custom_properties: Dict[str, Any], reimport: bool, output_artifact_class: Type[types.Artifact], mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType] = None, output_key: Optional[str] = None, ) -> Dict[str, List[types.Artifact]]` to solve the following problem:
Generates Importer's output dict. If there is already an artifact in MLMD with the same URI and properties / custom properties, that artifact will be reused unless the `reimport` argument is set to True. Args: metadata_handle: The handler of MLMD. uri: The uri of the artifact. properties: The properties of the artifact, given as a dictionary from string keys to integer / string values. Must conform to the declared properties of the destination channel's output type. custom_properties: The custom properties of the artifact, given as a dictionary from string keys to integer / string values. reimport: If set to True, will register a new artifact even if it already exists in the database. output_artifact_class: The class of the output artifact. mlmd_artifact_type: The MLMD artifact type of the Artifact to be created. output_key: The key to use for the imported artifact in the Importer's output dictionary. Defaults to 'result'. Returns: A dictionary with the only key `output_key` whose value is the Artifact.
Here is the function:
def generate_output_dict(
metadata_handle: metadata.Metadata,
uri: str,
properties: Dict[str, Any],
custom_properties: Dict[str, Any],
reimport: bool,
output_artifact_class: Type[types.Artifact],
mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType] = None,
output_key: Optional[str] = None,
) -> Dict[str, List[types.Artifact]]:
"""Generates Importer's output dict.
If there is already an artifact in MLMD with the same URI and properties /
custom properties, that artifact will be reused unless the `reimport`
argument is set to True.
Args:
metadata_handle: The handler of MLMD.
uri: The uri of the artifact.
properties: The properties of the artifact, given as a dictionary from
string keys to integer / string values. Must conform to the declared
properties of the destination channel's output type.
custom_properties: The custom properties of the artifact, given as a
dictionary from string keys to integer / string values.
reimport: If set to True, will register a new artifact even if it already
exists in the database.
output_artifact_class: The class of the output artifact.
mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.
output_key: The key to use for the imported artifact in the Importer's
output dictionary. Defaults to 'result'.
Returns:
A dictionary with the only key `output_key` whose value is the Artifact.
"""
output_key = output_key or IMPORT_RESULT_KEY
return {
output_key: [
_prepare_artifact(
metadata_handle,
uri=uri,
properties=properties,
custom_properties=custom_properties,
output_artifact_class=output_artifact_class,
mlmd_artifact_type=mlmd_artifact_type,
reimport=reimport,
)
]
} | Generates Importer's output dict. If there is already an artifact in MLMD with the same URI and properties / custom properties, that artifact will be reused unless the `reimport` argument is set to True. Args: metadata_handle: The handler of MLMD. uri: The uri of the artifact. properties: The properties of the artifact, given as a dictionary from string keys to integer / string values. Must conform to the declared properties of the destination channel's output type. custom_properties: The custom properties of the artifact, given as a dictionary from string keys to integer / string values. reimport: If set to True, will register a new artifact even if it already exists in the database. output_artifact_class: The class of the output artifact. mlmd_artifact_type: The MLMD artifact type of the Artifact to be created. output_key: The key to use for the imported artifact in the Importer's output dictionary. Defaults to 'result'. Returns: A dictionary with the only key `output_key` whose value is the Artifact. |
166,509 | from typing import Any, Callable, Iterable, List, Optional, Tuple, Type
from tfx.dsl.io import filesystem
from tfx.dsl.io import filesystem_registry
from tfx.dsl.io.filesystem import PathType
import tfx.dsl.io.plugins.tensorflow_gfile
import tfx.dsl.io.plugins.local
def _get_filesystem(path) -> Type[filesystem.Filesystem]:
return (filesystem_registry.DEFAULT_FILESYSTEM_REGISTRY
.get_filesystem_for_path(path))
PathType = Union[bytes, str]
The provided code snippet includes necessary dependencies for implementing the `mkdir` function. Write a Python function `def mkdir(path: PathType) -> None` to solve the following problem:
Make a directory at the given path; parent directory must exist.
Here is the function:
def mkdir(path: PathType) -> None:
"""Make a directory at the given path; parent directory must exist."""
_get_filesystem(path).mkdir(path) | Make a directory at the given path; parent directory must exist. |
166,510 | from typing import Any, Callable, Iterable, List, Optional, Tuple, Type
from tfx.dsl.io import filesystem
from tfx.dsl.io import filesystem_registry
from tfx.dsl.io.filesystem import PathType
import tfx.dsl.io.plugins.tensorflow_gfile
import tfx.dsl.io.plugins.local
def _get_filesystem(path) -> Type[filesystem.Filesystem]:
return (filesystem_registry.DEFAULT_FILESYSTEM_REGISTRY
.get_filesystem_for_path(path))
The provided code snippet includes necessary dependencies for implementing the `get_inline_filename` function. Write a Python function `def get_inline_filename(data: str, compressed: bool = False) -> str` to solve the following problem:
Return a path for an inline file with the given content.
Here is the function:
def get_inline_filename(data: str, compressed: bool = False) -> str:
"""Return a path for an inline file with the given content."""
return _get_filesystem('/inline').get_inline_filename(data, compressed) | Return a path for an inline file with the given content. |
166,511 | import collections
from collections.abc import Iterable, Iterator
import contextlib
import threading
from typing import Any, Optional
from tfx.dsl.context_managers import dsl_context
class DslContextRegistry:
"""Registry for DslContexts and associated BaseNodes of a pipeline DSL.
DslContextRegistry manages the active DslContexts, their orders, BaseNodes,
and association between DslContext and BaseNodes during the pipeline DSL
definition. DslContext and BaseNode always belong to exactly one
DslContextRegistry (1:n relationship).
"""
def __init__(self):
super().__init__()
# Frame of currently active DSL context. Ordered by parent -> child.
self._active_contexts: list[dsl_context.DslContext] = []
# All DSL contexts that have ever been defined so far.
self._all_contexts: list[dsl_context.DslContext] = []
self._all_nodes: list[_BaseNode] = []
# Mapping from Context ID to a list of nodes that belong to each context.
# Each list of node is sorted chronologically.
self._nodes_by_context = collections.defaultdict(list)
self._finalized = False
def __copy__(self):
result = DslContextRegistry()
result._active_contexts = list(self._active_contexts)
result._all_contexts = list(self._all_contexts)
result._all_nodes = list(self._all_nodes)
result._nodes_by_context.update({
k: list(v) for k, v in self._nodes_by_context.items()
})
result._finalized = False
return result
def all_contexts(self) -> list[dsl_context.DslContext]:
"""All contexts defined during the lifespan of the registry."""
return list(self._all_contexts)
def active_contexts(self) -> list[dsl_context.DslContext]:
"""All active context frame in parent -> child order."""
return list(self._active_contexts)
def all_nodes(self):
return self._all_nodes
def finalize(self):
"""Finalize and make the instance immutable."""
self._finalized = True
def _check_mutable(self):
if self._finalized:
raise RuntimeError('Cannot mutate DslContextRegistry after finalized.')
def temporary_mutable(self):
"""Temporarily make the registry mutable."""
is_finalized = self._finalized
self._finalized = False
try:
yield
finally:
self._finalized = is_finalized
def push_context(self, context: dsl_context.DslContext):
"""Pushes the context to the top of active context frames."""
assert context not in self._active_contexts
self._check_mutable()
self._active_contexts.append(context)
self._all_contexts.append(context)
def pop_context(self) -> dsl_context.DslContext:
"""Removes the top context from the active context frame."""
self._check_mutable()
assert self._active_contexts, (
'Internal assertion error; no active contexts to remove.')
return self._active_contexts.pop()
def peek_context(self) -> Optional[dsl_context.DslContext]:
"""Returns the top context of the active context frame."""
return self._active_contexts[-1] if self._active_contexts else None
def put_node(self, node: _BaseNode) -> None:
"""Associates the node to all active contexts."""
self._check_mutable()
self._all_nodes.append(node)
for context in self._active_contexts:
self._nodes_by_context[context].append(node)
def remove_nodes(self, node_ids: Iterable[str]) -> None:
"""Removes nodes from the registry.
This is only intended to be used for invasive pipeline modification during
pipeline lowering and compilation. Do not use it directly.
Args:
node_ids: Node IDs to remove.
"""
self._check_mutable()
self._all_nodes = [n for n in self._all_nodes if n.id not in node_ids]
for context in self._active_contexts:
self._nodes_by_context[context] = [
n for n in self._nodes_by_context[context] if n.id not in node_ids
]
def replace_node(self, node_from: _BaseNode, node_to: _BaseNode) -> None:
"""Replaces one node instance to another in a registry."""
self._check_mutable()
if node_from not in self._all_nodes:
raise ValueError(
f'{node_from.id} does not exist in pipeline registry. Valid:'
f' {[n.id for n in self._all_nodes]}'
)
self._all_nodes[self._all_nodes.index(node_from)] = node_to
for context in self._all_contexts:
nodes = self._nodes_by_context[context]
if node_from in nodes:
nodes[nodes.index(node_from)] = node_to
context.replace_node(node_from, node_to)
context.validate(nodes)
def get_nodes(self, context: dsl_context.DslContext) -> list[_BaseNode]:
"""Gets all BaseNodes that belongs to the context.
Args:
context: A DslContext that has been put to the registry.
Raises:
ValueError: If the context is unknown to the registry.
Returns:
Nodes that belong to the context, possibly empty list.
"""
if context not in self._all_contexts:
raise ValueError(f'Context {context} does not exist in the registry.')
return list(self._nodes_by_context[context])
def get_contexts(self, node: _BaseNode) -> list[dsl_context.DslContext]:
"""Gets all dsl_context.DslContexts that the node belongs to.
Args:
node: A BaseNode that has been put to the registry.
Raises:
ValueError: If the node is unknown to the registry.
Returns:
List of DslContexts that wraps the node, ordered by outer-most to
inner-most, possibly empty list.
"""
# This is O(N^2), but not performance critical.
if node not in self._all_nodes:
raise ValueError(
f'Node {node.id} does not exist in the registry. Valid:'
f' {[n.id for n in self._all_nodes]})'
)
result = []
for context in self._all_contexts:
if node in self._nodes_by_context[context]:
result.append(context)
return result
def extract_for_pipeline(
self, nodes: Iterable[_BaseNode]
) -> 'DslContextRegistry':
"""Creates new registry with pipeline level contexts filtered out.
This function should be called in the pipeline constructor, i.e., where
pipeine scope ends, to persist contexts defined within the pipeline scope
in the pipeline object.
After the extraction, self no longer contains the extracted nodes and the
contexts from subpipeline registry.
Args:
nodes: List of nodes that the pipeline contains. The pipeline itself does
not belong to the list.
Returns:
A new DSL context registry that contains the argument nodes.
"""
# pylint:disable=protected-access
result = DslContextRegistry()
if not nodes:
return result
self._check_mutable()
# Move nodes from self to result.
# We're preserving the original _all_nodes order (it matters).
# TODO: b/321881540 - Should raise error if the node does not exist in
# _all_nodes.
nodes_set = set(nodes)
result._all_nodes = [n for n in self._all_nodes if n in nodes_set]
self._all_nodes = [n for n in self._all_nodes if n not in nodes_set]
# Move contexts and associations from self to result.
for c in self._all_contexts:
# Outer contexts that should stay in self. Removing nodes context
# association is enough.
if c in self._active_contexts:
for n in result._all_nodes:
self._nodes_by_context[c].remove(n)
continue
# Other contexts may subject to extraction if it has association with
# nodes, otherwise irrelevant.
for n in nodes:
if n in self._nodes_by_context[c]:
result._nodes_by_context[c].append(n)
# Extracted contexts should be pruned from the self.
if result._nodes_by_context[c]:
result._all_contexts.append(c)
if not self._nodes_by_context[c]:
del self._nodes_by_context[c]
self._all_contexts.remove(c)
# Remove dangling context.parent.
for c in result._all_contexts:
if c.parent not in result._all_contexts:
c.parent = None
result._finalized = True
# pylint:enable=protected-access
return result
def use_registry(registry: DslContextRegistry) -> Iterator[DslContextRegistry]:
"""Use the given registry as a global scope."""
old_registry = get()
_registry_holder.current = registry
try:
with registry.temporary_mutable():
yield registry
finally:
_registry_holder.current = old_registry
The provided code snippet includes necessary dependencies for implementing the `new_registry` function. Write a Python function `def new_registry() -> Iterator[DslContextRegistry]` to solve the following problem:
Push the new registry to the global scope.
Here is the function:
def new_registry() -> Iterator[DslContextRegistry]:
"""Push the new registry to the global scope."""
with use_registry(DslContextRegistry()) as result:
yield result | Push the new registry to the global scope. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.