id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
166,212 | import collections
import copy
import os
from typing import Any, Callable, Dict, List, Optional, Type, cast, MutableMapping
from absl import logging
from kfp import compiler
from kfp import dsl
from kfp import gcp
from kubernetes import client as k8s_client
from tfx import version
from tfx.dsl.compiler import compiler as tfx_compiler
from tfx.dsl.components.base import base_component as tfx_base_component
from tfx.dsl.components.base import base_node
from tfx.orchestration import data_types
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration import tfx_runner
from tfx.orchestration.config import pipeline_config
from tfx.orchestration.kubeflow import base_component
from tfx.orchestration.kubeflow import utils
from tfx.orchestration.kubeflow.proto import kubeflow_pb2
from tfx.orchestration.launcher import base_component_launcher
from tfx.orchestration.launcher import in_process_component_launcher
from tfx.orchestration.launcher import kubernetes_component_launcher
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import telemetry_utils
def get_default_output_filename(pipeline_name: str) -> str:
return pipeline_name + '.tar.gz' | null |
166,213 | import argparse
import copy
import json
import logging
import os
import sys
import textwrap
from typing import cast, Dict, List, Mapping, MutableMapping, Optional, Sequence, Tuple, Union
from tfx import types
from tfx.dsl.compiler import constants
from tfx.orchestration import metadata
from tfx.orchestration.kubeflow.proto import kubeflow_pb2
from tfx.orchestration.local import runner_utils
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import execution_publish_utils
from tfx.orchestration.portable import kubernetes_executor_operator
from tfx.orchestration.portable import launcher
from tfx.orchestration.portable import runtime_parameter_utils
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import artifact
from tfx.types import channel
from tfx.types import standard_artifacts
from tfx.utils import telemetry_utils
from google.protobuf import json_format
from ml_metadata.proto import metadata_store_pb2
_KFP_POD_NAME_ENV_KEY = 'KFP_POD_NAME'
_KFP_POD_NAME_PROPERTY_KEY = 'kfp_pod_name'
The provided code snippet includes necessary dependencies for implementing the `_register_execution` function. Write a Python function `def _register_execution( metadata_handle: metadata.Metadata, execution_type: metadata_store_pb2.ExecutionType, contexts: List[metadata_store_pb2.Context], input_artifacts: MutableMapping[str, Sequence[types.Artifact]], exec_properties: Mapping[str, types.Property], ) -> metadata_store_pb2.Execution` to solve the following problem:
Registers an execution in MLMD.
Here is the function:
def _register_execution(
metadata_handle: metadata.Metadata,
execution_type: metadata_store_pb2.ExecutionType,
contexts: List[metadata_store_pb2.Context],
input_artifacts: MutableMapping[str, Sequence[types.Artifact]],
exec_properties: Mapping[str, types.Property],
) -> metadata_store_pb2.Execution:
"""Registers an execution in MLMD."""
kfp_pod_name = os.environ.get(_KFP_POD_NAME_ENV_KEY)
execution_properties_copy = copy.deepcopy(exec_properties)
execution_properties_copy = cast(MutableMapping[str, types.Property],
execution_properties_copy)
if kfp_pod_name:
logging.info('Adding KFP pod name %s to execution', kfp_pod_name)
execution_properties_copy[_KFP_POD_NAME_PROPERTY_KEY] = kfp_pod_name
return execution_publish_utils.register_execution(
metadata_handle=metadata_handle,
execution_type=execution_type,
contexts=contexts,
input_artifacts=input_artifacts,
exec_properties=execution_properties_copy,
) | Registers an execution in MLMD. |
166,214 | import argparse
import copy
import json
import logging
import os
import sys
import textwrap
from typing import cast, Dict, List, Mapping, MutableMapping, Optional, Sequence, Tuple, Union
from tfx import types
from tfx.dsl.compiler import constants
from tfx.orchestration import metadata
from tfx.orchestration.kubeflow.proto import kubeflow_pb2
from tfx.orchestration.local import runner_utils
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import execution_publish_utils
from tfx.orchestration.portable import kubernetes_executor_operator
from tfx.orchestration.portable import launcher
from tfx.orchestration.portable import runtime_parameter_utils
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import artifact
from tfx.types import channel
from tfx.types import standard_artifacts
from tfx.utils import telemetry_utils
from google.protobuf import json_format
from ml_metadata.proto import metadata_store_pb2
def _get_config_value(config_value: kubeflow_pb2.ConfigValue) -> Optional[str]:
value_from = config_value.WhichOneof('value_from')
if value_from is None:
raise ValueError('No value set in config value: {}'.format(config_value))
if value_from == 'value':
return config_value.value
return os.getenv(config_value.environment_variable)
def _get_grpc_metadata_connection_config(
kubeflow_metadata_config: kubeflow_pb2.KubeflowGrpcMetadataConfig
) -> metadata_store_pb2.MetadataStoreClientConfig:
"""Constructs a metadata grpc connection config.
Args:
kubeflow_metadata_config: Configuration parameters to use for constructing a
valid metadata connection config in a Kubeflow cluster.
Returns:
A metadata_store_pb2.MetadataStoreClientConfig object.
"""
connection_config = metadata_store_pb2.MetadataStoreClientConfig()
connection_config.host = _get_config_value(
kubeflow_metadata_config.grpc_service_host)
connection_config.port = int(
_get_config_value(kubeflow_metadata_config.grpc_service_port))
return connection_config
The provided code snippet includes necessary dependencies for implementing the `_get_metadata_connection_config` function. Write a Python function `def _get_metadata_connection_config( kubeflow_metadata_config: kubeflow_pb2.KubeflowMetadataConfig ) -> Union[metadata_store_pb2.ConnectionConfig, metadata_store_pb2.MetadataStoreClientConfig]` to solve the following problem:
Constructs a metadata connection config. Args: kubeflow_metadata_config: Configuration parameters to use for constructing a valid metadata connection config in a Kubeflow cluster. Returns: A Union of metadata_store_pb2.ConnectionConfig and metadata_store_pb2.MetadataStoreClientConfig object.
Here is the function:
def _get_metadata_connection_config(
kubeflow_metadata_config: kubeflow_pb2.KubeflowMetadataConfig
) -> Union[metadata_store_pb2.ConnectionConfig,
metadata_store_pb2.MetadataStoreClientConfig]:
"""Constructs a metadata connection config.
Args:
kubeflow_metadata_config: Configuration parameters to use for constructing a
valid metadata connection config in a Kubeflow cluster.
Returns:
A Union of metadata_store_pb2.ConnectionConfig and
metadata_store_pb2.MetadataStoreClientConfig object.
"""
config_type = kubeflow_metadata_config.WhichOneof('connection_config')
if config_type is None:
logging.warning(
'Providing mysql configuration through KubeflowMetadataConfig will be '
'deprecated soon. Use one of KubeflowGrpcMetadataConfig or'
'KubeflowMySqlMetadataConfig instead')
connection_config = metadata_store_pb2.ConnectionConfig()
connection_config.mysql.host = _get_config_value(
kubeflow_metadata_config.mysql_db_service_host)
connection_config.mysql.port = int(
_get_config_value(kubeflow_metadata_config.mysql_db_service_port))
connection_config.mysql.database = _get_config_value(
kubeflow_metadata_config.mysql_db_name)
connection_config.mysql.user = _get_config_value(
kubeflow_metadata_config.mysql_db_user)
connection_config.mysql.password = _get_config_value(
kubeflow_metadata_config.mysql_db_password)
return connection_config
assert config_type == 'grpc_config', ('expected oneof grpc_config')
return _get_grpc_metadata_connection_config(
kubeflow_metadata_config.grpc_config) | Constructs a metadata connection config. Args: kubeflow_metadata_config: Configuration parameters to use for constructing a valid metadata connection config in a Kubeflow cluster. Returns: A Union of metadata_store_pb2.ConnectionConfig and metadata_store_pb2.MetadataStoreClientConfig object. |
166,215 | import argparse
import copy
import json
import logging
import os
import sys
import textwrap
from typing import cast, Dict, List, Mapping, MutableMapping, Optional, Sequence, Tuple, Union
from tfx import types
from tfx.dsl.compiler import constants
from tfx.orchestration import metadata
from tfx.orchestration.kubeflow.proto import kubeflow_pb2
from tfx.orchestration.local import runner_utils
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import execution_publish_utils
from tfx.orchestration.portable import kubernetes_executor_operator
from tfx.orchestration.portable import launcher
from tfx.orchestration.portable import runtime_parameter_utils
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import artifact
from tfx.types import channel
from tfx.types import standard_artifacts
from tfx.utils import telemetry_utils
from google.protobuf import json_format
from ml_metadata.proto import metadata_store_pb2
def _sanitize_underscore(name: str) -> Optional[str]:
"""Sanitize the underscore in pythonic name for markdown visualization."""
if name:
return str(name).replace('_', '\\_')
else:
return None
def _render_artifact_as_mdstr(single_artifact: artifact.Artifact) -> str:
"""Render an artifact as markdown string with the following format.
**Artifact: artifact1**
**Properties**:
**key1**: value1
**key2**: value2
......
Args:
single_artifact: the artifact to be rendered.
Returns:
a md-formatted string representation of the artifact.
"""
span_str = 'None'
split_names_str = 'None'
if single_artifact.PROPERTIES:
if 'span' in single_artifact.PROPERTIES:
span_str = str(single_artifact.span)
if 'split_names' in single_artifact.PROPERTIES:
split_names_str = str(single_artifact.split_names)
return textwrap.dedent("""\
**Artifact: {name}**
**Properties**:
**uri**: {uri}
**id**: {id}
**span**: {span}
**type_id**: {type_id}
**type_name**: {type_name}
**state**: {state}
**split_names**: {split_names}
**producer_component**: {producer_component}
""".format(
name=_sanitize_underscore(single_artifact.name) or 'None',
uri=_sanitize_underscore(single_artifact.uri) or 'None',
id=str(single_artifact.id),
span=_sanitize_underscore(span_str),
type_id=str(single_artifact.type_id),
type_name=_sanitize_underscore(single_artifact.type_name),
state=_sanitize_underscore(single_artifact.state) or 'None',
split_names=_sanitize_underscore(split_names_str),
producer_component=_sanitize_underscore(
single_artifact.producer_component) or 'None'))
The provided code snippet includes necessary dependencies for implementing the `_render_channel_as_mdstr` function. Write a Python function `def _render_channel_as_mdstr(input_channel: channel.Channel) -> str` to solve the following problem:
Render a Channel as markdown string with the following format. **Type**: input_channel.type_name **Artifact: artifact1** **Properties**: **key1**: value1 **key2**: value2 ...... Args: input_channel: the channel to be rendered. Returns: a md-formatted string representation of the channel.
Here is the function:
def _render_channel_as_mdstr(input_channel: channel.Channel) -> str:
"""Render a Channel as markdown string with the following format.
**Type**: input_channel.type_name
**Artifact: artifact1**
**Properties**:
**key1**: value1
**key2**: value2
......
Args:
input_channel: the channel to be rendered.
Returns:
a md-formatted string representation of the channel.
"""
md_str = '**Type**: {}\n\n'.format(
_sanitize_underscore(input_channel.type_name))
rendered_artifacts = []
# List all artifacts in the channel.
for single_artifact in input_channel.get():
rendered_artifacts.append(_render_artifact_as_mdstr(single_artifact))
return md_str + '\n\n'.join(rendered_artifacts) | Render a Channel as markdown string with the following format. **Type**: input_channel.type_name **Artifact: artifact1** **Properties**: **key1**: value1 **key2**: value2 ...... Args: input_channel: the channel to be rendered. Returns: a md-formatted string representation of the channel. |
166,216 | import argparse
import copy
import json
import logging
import os
import sys
import textwrap
from typing import cast, Dict, List, Mapping, MutableMapping, Optional, Sequence, Tuple, Union
from tfx import types
from tfx.dsl.compiler import constants
from tfx.orchestration import metadata
from tfx.orchestration.kubeflow.proto import kubeflow_pb2
from tfx.orchestration.local import runner_utils
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import execution_publish_utils
from tfx.orchestration.portable import kubernetes_executor_operator
from tfx.orchestration.portable import launcher
from tfx.orchestration.portable import runtime_parameter_utils
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import artifact
from tfx.types import channel
from tfx.types import standard_artifacts
from tfx.utils import telemetry_utils
from google.protobuf import json_format
from ml_metadata.proto import metadata_store_pb2
def _sanitize_underscore(name: str) -> Optional[str]:
"""Sanitize the underscore in pythonic name for markdown visualization."""
if name:
return str(name).replace('_', '\\_')
else:
return None
def _render_artifact_as_mdstr(single_artifact: artifact.Artifact) -> str:
"""Render an artifact as markdown string with the following format.
**Artifact: artifact1**
**Properties**:
**key1**: value1
**key2**: value2
......
Args:
single_artifact: the artifact to be rendered.
Returns:
a md-formatted string representation of the artifact.
"""
span_str = 'None'
split_names_str = 'None'
if single_artifact.PROPERTIES:
if 'span' in single_artifact.PROPERTIES:
span_str = str(single_artifact.span)
if 'split_names' in single_artifact.PROPERTIES:
split_names_str = str(single_artifact.split_names)
return textwrap.dedent("""\
**Artifact: {name}**
**Properties**:
**uri**: {uri}
**id**: {id}
**span**: {span}
**type_id**: {type_id}
**type_name**: {type_name}
**state**: {state}
**split_names**: {split_names}
**producer_component**: {producer_component}
""".format(
name=_sanitize_underscore(single_artifact.name) or 'None',
uri=_sanitize_underscore(single_artifact.uri) or 'None',
id=str(single_artifact.id),
span=_sanitize_underscore(span_str),
type_id=str(single_artifact.type_id),
type_name=_sanitize_underscore(single_artifact.type_name),
state=_sanitize_underscore(single_artifact.state) or 'None',
split_names=_sanitize_underscore(split_names_str),
producer_component=_sanitize_underscore(
single_artifact.producer_component) or 'None'))
The provided code snippet includes necessary dependencies for implementing the `_dump_ui_metadata` function. Write a Python function `def _dump_ui_metadata( node: pipeline_pb2.PipelineNode, execution_info: data_types.ExecutionInfo, ui_metadata_path: str = '/mlpipeline-ui-metadata.json') -> None` to solve the following problem:
Dump KFP UI metadata json file for visualization purpose. For general components we just render a simple Markdown file for exec_properties/inputs/outputs. If the file already exists and is a valid format(have a list of dictionaries in outputs key), we append the existing UI metadata items to our output json file. Args: node: associated TFX node. execution_info: runtime execution info for this component, including materialized inputs/outputs/execution properties and id. ui_metadata_path: path to dump ui metadata.
Here is the function:
def _dump_ui_metadata(
node: pipeline_pb2.PipelineNode,
execution_info: data_types.ExecutionInfo,
ui_metadata_path: str = '/mlpipeline-ui-metadata.json') -> None:
"""Dump KFP UI metadata json file for visualization purpose.
For general components we just render a simple Markdown file for
exec_properties/inputs/outputs.
If the file already exists and is a valid format(have a list of
dictionaries in outputs key), we append the existing UI metadata items
to our output json file.
Args:
node: associated TFX node.
execution_info: runtime execution info for this component, including
materialized inputs/outputs/execution properties and id.
ui_metadata_path: path to dump ui metadata.
"""
exec_properties_list = [
'**{}**: {}'.format(
_sanitize_underscore(name), _sanitize_underscore(exec_property))
for name, exec_property in execution_info.exec_properties.items()
]
src_str_exec_properties = '# Execution properties:\n{}'.format(
'\n\n'.join(exec_properties_list) or 'No execution property.')
def _dump_input_populated_artifacts(
node_inputs: MutableMapping[str, pipeline_pb2.InputSpec],
name_to_artifacts: Dict[str, List[artifact.Artifact]]) -> List[str]:
"""Dump artifacts markdown string for inputs.
Args:
node_inputs: maps from input name to input sepc proto.
name_to_artifacts: maps from input key to list of populated artifacts.
Returns:
A list of dumped markdown string, each of which represents a channel.
"""
rendered_list = []
for name, spec in node_inputs.items():
# Need to look for materialized artifacts in the execution decision.
rendered_artifacts = ''.join([
_render_artifact_as_mdstr(single_artifact)
for single_artifact in name_to_artifacts.get(name, [])
])
# TODO(b/255869994): Use InputSpec.artifact_type field instead.
if spec.channels:
artifact_type = spec.channels[0].artifact_query.type.name
else:
artifact_type = '_Unknown_'
rendered_list.append(
'## {name}\n\n**Type**: {channel_type}\n\n{artifacts}'.format(
name=_sanitize_underscore(name),
channel_type=_sanitize_underscore(artifact_type),
artifacts=rendered_artifacts))
return rendered_list
def _dump_output_populated_artifacts(
node_outputs: MutableMapping[str, pipeline_pb2.OutputSpec],
name_to_artifacts: Dict[str, List[artifact.Artifact]]) -> List[str]:
"""Dump artifacts markdown string for outputs.
Args:
node_outputs: maps from output name to output sepc proto.
name_to_artifacts: maps from output key to list of populated artifacts.
Returns:
A list of dumped markdown string, each of which represents a channel.
"""
rendered_list = []
for name, spec in node_outputs.items():
# Need to look for materialized artifacts in the execution decision.
rendered_artifacts = ''.join([
_render_artifact_as_mdstr(single_artifact)
for single_artifact in name_to_artifacts.get(name, [])
])
# There must be at least a channel in a input, and all channels in a input
# share the same artifact type.
artifact_type = spec.artifact_spec.type.name
rendered_list.append(
'## {name}\n\n**Type**: {channel_type}\n\n{artifacts}'.format(
name=_sanitize_underscore(name),
channel_type=_sanitize_underscore(artifact_type),
artifacts=rendered_artifacts))
return rendered_list
src_str_inputs = '# Inputs:\n{}'.format(''.join(
_dump_input_populated_artifacts(
node_inputs=node.inputs.inputs,
name_to_artifacts=execution_info.input_dict or {})) or 'No input.')
src_str_outputs = '# Outputs:\n{}'.format(''.join(
_dump_output_populated_artifacts(
node_outputs=node.outputs.outputs,
name_to_artifacts=execution_info.output_dict or {})) or 'No output.')
outputs = [{
'storage':
'inline',
'source':
'{exec_properties}\n\n{inputs}\n\n{outputs}'.format(
exec_properties=src_str_exec_properties,
inputs=src_str_inputs,
outputs=src_str_outputs),
'type':
'markdown',
}]
# Add Tensorboard view for ModelRun outpus.
for name, spec in node.outputs.outputs.items():
if spec.artifact_spec.type.name == standard_artifacts.ModelRun.TYPE_NAME:
output_model = execution_info.output_dict[name][0]
# Add Tensorboard view.
tensorboard_output = {'type': 'tensorboard', 'source': output_model.uri}
outputs.append(tensorboard_output)
# Add existing KFP UI Metadata if the file exists and is a valid format
if os.path.isfile(ui_metadata_path):
def _read_validated_ui_metadata(
ui_metadata_path: str) -> List[Dict[str, str]]:
"""Read validated existing KFP UI Metadata file.
Args:
ui_metadata_path: path for ui metadata
Returns:
A list of UI metadata if the file is valid. An empty list otherwise.
"""
result = []
try:
with open(ui_metadata_path, 'r') as f:
metadata_dict = json.load(f)
if ('outputs' in metadata_dict and
isinstance(metadata_dict['outputs'], list)):
for ui_metadata in metadata_dict['outputs']:
if isinstance(ui_metadata, dict):
result.append(ui_metadata)
except json.JSONDecodeError:
pass
return result
outputs += _read_validated_ui_metadata(ui_metadata_path)
metadata_dict = {'outputs': outputs}
with open(ui_metadata_path, 'w') as f:
json.dump(metadata_dict, f) | Dump KFP UI metadata json file for visualization purpose. For general components we just render a simple Markdown file for exec_properties/inputs/outputs. If the file already exists and is a valid format(have a list of dictionaries in outputs key), we append the existing UI metadata items to our output json file. Args: node: associated TFX node. execution_info: runtime execution info for this component, including materialized inputs/outputs/execution properties and id. ui_metadata_path: path to dump ui metadata. |
166,217 | import argparse
import copy
import json
import logging
import os
import sys
import textwrap
from typing import cast, Dict, List, Mapping, MutableMapping, Optional, Sequence, Tuple, Union
from tfx import types
from tfx.dsl.compiler import constants
from tfx.orchestration import metadata
from tfx.orchestration.kubeflow.proto import kubeflow_pb2
from tfx.orchestration.local import runner_utils
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import execution_publish_utils
from tfx.orchestration.portable import kubernetes_executor_operator
from tfx.orchestration.portable import launcher
from tfx.orchestration.portable import runtime_parameter_utils
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import artifact
from tfx.types import channel
from tfx.types import standard_artifacts
from tfx.utils import telemetry_utils
from google.protobuf import json_format
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `_get_pipeline_node` function. Write a Python function `def _get_pipeline_node(pipeline: pipeline_pb2.Pipeline, node_id: str)` to solve the following problem:
Gets node of a certain node_id from a pipeline.
Here is the function:
def _get_pipeline_node(pipeline: pipeline_pb2.Pipeline, node_id: str):
"""Gets node of a certain node_id from a pipeline."""
result = None
for node in pipeline.nodes:
if (node.WhichOneof('node') == 'pipeline_node' and
node.pipeline_node.node_info.id == node_id):
result = node.pipeline_node
if not result:
logging.error('pipeline ir = %s\n', pipeline)
raise RuntimeError(f'Cannot find node with id {node_id} in pipeline ir.')
return result | Gets node of a certain node_id from a pipeline. |
166,218 | import argparse
import copy
import json
import logging
import os
import sys
import textwrap
from typing import cast, Dict, List, Mapping, MutableMapping, Optional, Sequence, Tuple, Union
from tfx import types
from tfx.dsl.compiler import constants
from tfx.orchestration import metadata
from tfx.orchestration.kubeflow.proto import kubeflow_pb2
from tfx.orchestration.local import runner_utils
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import execution_publish_utils
from tfx.orchestration.portable import kubernetes_executor_operator
from tfx.orchestration.portable import launcher
from tfx.orchestration.portable import runtime_parameter_utils
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import artifact
from tfx.types import channel
from tfx.types import standard_artifacts
from tfx.utils import telemetry_utils
from google.protobuf import json_format
from ml_metadata.proto import metadata_store_pb2
def _parse_runtime_parameter_str(param: str) -> Tuple[str, types.Property]:
"""Parses runtime parameter string in command line argument."""
# Runtime parameter format: "{name}=(INT|DOUBLE|STRING):{value}"
name, value_and_type = param.split('=', 1)
value_type, value = value_and_type.split(':', 1)
if value_type == pipeline_pb2.RuntimeParameter.Type.Name(
pipeline_pb2.RuntimeParameter.INT):
value = int(value)
elif value_type == pipeline_pb2.RuntimeParameter.Type.Name(
pipeline_pb2.RuntimeParameter.DOUBLE):
value = float(value)
return (name, value)
The provided code snippet includes necessary dependencies for implementing the `_resolve_runtime_parameters` function. Write a Python function `def _resolve_runtime_parameters(tfx_ir: pipeline_pb2.Pipeline, parameters: Optional[List[str]]) -> None` to solve the following problem:
Resolve runtime parameters in the pipeline proto inplace.
Here is the function:
def _resolve_runtime_parameters(tfx_ir: pipeline_pb2.Pipeline,
parameters: Optional[List[str]]) -> None:
"""Resolve runtime parameters in the pipeline proto inplace."""
if parameters is None:
return
parameter_bindings = {
# Substitute the runtime parameter to be a concrete run_id
constants.PIPELINE_RUN_ID_PARAMETER_NAME:
os.environ['WORKFLOW_ID'],
}
# Argo will fill runtime parameter values in the parameters.
for param in parameters:
name, value = _parse_runtime_parameter_str(param)
parameter_bindings[name] = value
runtime_parameter_utils.substitute_runtime_parameter(tfx_ir,
parameter_bindings) | Resolve runtime parameters in the pipeline proto inplace. |
166,219 | import functools
from typing import Any, Dict, List, Type
from airflow import models
from airflow.operators import python_operator
from tfx.dsl.components.base import base_node
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.orchestration.config import base_component_config
from tfx.orchestration.launcher import base_component_launcher
from tfx.utils import telemetry_utils
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `_airflow_component_launcher` function. Write a Python function `def _airflow_component_launcher( component: base_node.BaseNode, component_launcher_class: Type[ base_component_launcher.BaseComponentLauncher], pipeline_info: data_types.PipelineInfo, driver_args: data_types.DriverArgs, metadata_connection_config: metadata_store_pb2.ConnectionConfig, beam_pipeline_args: List[str], additional_pipeline_args: Dict[str, Any], component_config: base_component_config.BaseComponentConfig, exec_properties: Dict[str, Any], **kwargs) -> None` to solve the following problem:
Helper function to launch TFX component execution. This helper function will be called with Airflow env objects which contains run_id that we need to pass into TFX ComponentLauncher. Args: component: TFX BaseComponent instance. This instance holds all inputs and outputs placeholders as well as component properties. component_launcher_class: The class of the launcher to launch the component. pipeline_info: A data_types.PipelineInfo instance that holds pipeline properties driver_args: Component specific args for driver. metadata_connection_config: Configuration for how to connect to metadata. beam_pipeline_args: Pipeline arguments for Beam powered Components. additional_pipeline_args: A dict of additional pipeline args. component_config: Component config to launch the component. exec_properties: Execution properties from the ComponentSpec. **kwargs: Context arguments that will be passed in by Airflow, including: - ti: TaskInstance object from which we can get run_id of the running pipeline. For more details, please refer to the code: https://github.com/apache/airflow/blob/master/airflow/operators/python_operator.py
Here is the function:
def _airflow_component_launcher(
component: base_node.BaseNode, component_launcher_class: Type[
base_component_launcher.BaseComponentLauncher],
pipeline_info: data_types.PipelineInfo, driver_args: data_types.DriverArgs,
metadata_connection_config: metadata_store_pb2.ConnectionConfig,
beam_pipeline_args: List[str], additional_pipeline_args: Dict[str, Any],
component_config: base_component_config.BaseComponentConfig,
exec_properties: Dict[str, Any], **kwargs) -> None:
"""Helper function to launch TFX component execution.
This helper function will be called with Airflow env objects which contains
run_id that we need to pass into TFX ComponentLauncher.
Args:
component: TFX BaseComponent instance. This instance holds all inputs and
outputs placeholders as well as component properties.
component_launcher_class: The class of the launcher to launch the component.
pipeline_info: A data_types.PipelineInfo instance that holds pipeline
properties
driver_args: Component specific args for driver.
metadata_connection_config: Configuration for how to connect to metadata.
beam_pipeline_args: Pipeline arguments for Beam powered Components.
additional_pipeline_args: A dict of additional pipeline args.
component_config: Component config to launch the component.
exec_properties: Execution properties from the ComponentSpec.
**kwargs: Context arguments that will be passed in by Airflow, including:
- ti: TaskInstance object from which we can get run_id of the running
pipeline.
For more details, please refer to the code:
https://github.com/apache/airflow/blob/master/airflow/operators/python_operator.py
"""
component.exec_properties.update(exec_properties)
# Populate run id from Airflow task instance.
pipeline_info.run_id = kwargs['ti'].get_dagrun().run_id
launcher = component_launcher_class.create(
component=component,
pipeline_info=pipeline_info,
driver_args=driver_args,
metadata_connection=metadata.Metadata(metadata_connection_config),
beam_pipeline_args=beam_pipeline_args,
additional_pipeline_args=additional_pipeline_args,
component_config=component_config)
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_RUNNER: 'airflow'}):
launcher.launch() | Helper function to launch TFX component execution. This helper function will be called with Airflow env objects which contains run_id that we need to pass into TFX ComponentLauncher. Args: component: TFX BaseComponent instance. This instance holds all inputs and outputs placeholders as well as component properties. component_launcher_class: The class of the launcher to launch the component. pipeline_info: A data_types.PipelineInfo instance that holds pipeline properties driver_args: Component specific args for driver. metadata_connection_config: Configuration for how to connect to metadata. beam_pipeline_args: Pipeline arguments for Beam powered Components. additional_pipeline_args: A dict of additional pipeline args. component_config: Component config to launch the component. exec_properties: Execution properties from the ComponentSpec. **kwargs: Context arguments that will be passed in by Airflow, including: - ti: TaskInstance object from which we can get run_id of the running pipeline. For more details, please refer to the code: https://github.com/apache/airflow/blob/master/airflow/operators/python_operator.py |
166,220 | from typing import Optional
from absl import logging
from tfx.proto.orchestration import local_deployment_config_pb2
from tfx.proto.orchestration import metadata_pb2
from tfx.proto.orchestration import pipeline_pb2
from google.protobuf import any_pb2
from google.protobuf import message
def _to_local_deployment(
input_config: pipeline_pb2.IntermediateDeploymentConfig
) -> local_deployment_config_pb2.LocalDeploymentConfig:
"""Turns IntermediateDeploymentConfig to LocalDeploymentConfig."""
result = local_deployment_config_pb2.LocalDeploymentConfig()
for k, v in input_config.executor_specs.items():
result.executor_specs[k].CopyFrom(_build_executable_spec(k, v))
for k, v in input_config.custom_driver_specs.items():
result.custom_driver_specs[k].CopyFrom(_build_executable_spec(k, v))
for k, v in input_config.node_level_platform_configs.items():
result.node_level_platform_configs[k].CopyFrom(
_build_local_platform_config(k, v))
result.metadata_connection_config.CopyFrom(
extract_mlmd_connection(input_config.metadata_connection_config))
if result.metadata_connection_config.WhichOneof('connection_config') is None:
# Some users like Kubernetes choose to use their own MLMD connection.
# So their IR doesn't contain it.
logging.warning('metadata_connection_config is not provided by IR.')
return result
The provided code snippet includes necessary dependencies for implementing the `extract_local_deployment_config` function. Write a Python function `def extract_local_deployment_config( pipeline: pipeline_pb2.Pipeline ) -> local_deployment_config_pb2.LocalDeploymentConfig` to solve the following problem:
Extracts the proto.Any pipeline.deployment_config to LocalDeploymentConfig.
Here is the function:
def extract_local_deployment_config(
pipeline: pipeline_pb2.Pipeline
) -> local_deployment_config_pb2.LocalDeploymentConfig:
"""Extracts the proto.Any pipeline.deployment_config to LocalDeploymentConfig."""
if not pipeline.deployment_config:
raise ValueError('deployment_config is not available in the pipeline.')
result = local_deployment_config_pb2.LocalDeploymentConfig()
if pipeline.deployment_config.Unpack(result):
return result
result = pipeline_pb2.IntermediateDeploymentConfig()
if pipeline.deployment_config.Unpack(result):
return _to_local_deployment(result)
raise ValueError('deployment_config {} of type {} is not supported'.format(
pipeline.deployment_config, type(pipeline.deployment_config))) | Extracts the proto.Any pipeline.deployment_config to LocalDeploymentConfig. |
166,221 | from typing import Optional
from absl import logging
from tfx.proto.orchestration import local_deployment_config_pb2
from tfx.proto.orchestration import metadata_pb2
from tfx.proto.orchestration import pipeline_pb2
from google.protobuf import any_pb2
from google.protobuf import message
def _unwrap_executable_spec(
executable_spec: Optional[local_deployment_config_pb2.ExecutableSpec]
) -> Optional[message.Message]:
"""Unwraps the one of spec from ExecutableSpec."""
return (getattr(executable_spec, executable_spec.WhichOneof('spec'))
if executable_spec else None)
def extract_executor_spec(
deployment_config: local_deployment_config_pb2.LocalDeploymentConfig,
node_id: str
) -> Optional[message.Message]:
return _unwrap_executable_spec(
deployment_config.executor_specs.get(node_id)) | null |
166,222 | from typing import Optional
from absl import logging
from tfx.proto.orchestration import local_deployment_config_pb2
from tfx.proto.orchestration import metadata_pb2
from tfx.proto.orchestration import pipeline_pb2
from google.protobuf import any_pb2
from google.protobuf import message
def _unwrap_executable_spec(
executable_spec: Optional[local_deployment_config_pb2.ExecutableSpec]
) -> Optional[message.Message]:
"""Unwraps the one of spec from ExecutableSpec."""
return (getattr(executable_spec, executable_spec.WhichOneof('spec'))
if executable_spec else None)
def extract_custom_driver_spec(
deployment_config: local_deployment_config_pb2.LocalDeploymentConfig,
node_id: str
) -> Optional[message.Message]:
return _unwrap_executable_spec(
deployment_config.custom_driver_specs.get(node_id)) | null |
166,223 | from typing import Optional, Tuple, Type
from tfx.dsl.components.base import base_component
from tfx.orchestration.config import base_component_config
from tfx.orchestration.config import pipeline_config
from tfx.orchestration.launcher import base_component_launcher
The provided code snippet includes necessary dependencies for implementing the `find_component_launch_info` function. Write a Python function `def find_component_launch_info( p_config: pipeline_config.PipelineConfig, component: base_component.BaseComponent, ) -> Tuple[Type[base_component_launcher.BaseComponentLauncher], Optional[base_component_config.BaseComponentConfig]]` to solve the following problem:
Find a launcher and component config to launch the component. The default lookup logic goes through the `supported_launcher_classes` in sequence for each config from the `default_component_configs`. User can override a single component setting by `component_config_overrides`. The method returns the first component config and launcher which together can launch the executor_spec of the component. Subclass may customize the logic by overriding the method. Args: p_config: the pipeline config. component: the component to launch. Returns: The found tuple of component launcher class and the compatible component config. Raises: RuntimeError: if no supported launcher is found.
Here is the function:
def find_component_launch_info(
p_config: pipeline_config.PipelineConfig,
component: base_component.BaseComponent,
) -> Tuple[Type[base_component_launcher.BaseComponentLauncher],
Optional[base_component_config.BaseComponentConfig]]:
"""Find a launcher and component config to launch the component.
The default lookup logic goes through the `supported_launcher_classes`
in sequence for each config from the `default_component_configs`. User can
override a single component setting by `component_config_overrides`. The
method returns the first component config and launcher which together can
launch the executor_spec of the component.
Subclass may customize the logic by overriding the method.
Args:
p_config: the pipeline config.
component: the component to launch.
Returns:
The found tuple of component launcher class and the compatible component
config.
Raises:
RuntimeError: if no supported launcher is found.
"""
if component.id in p_config.component_config_overrides:
component_configs = [p_config.component_config_overrides[component.id]]
else:
# Add None to the end of the list to find launcher with no component
# config
component_configs = p_config.default_component_configs + [None]
for component_config in component_configs:
for component_launcher_class in p_config.supported_launcher_classes:
if component_launcher_class.can_launch(component.executor_spec,
component_config):
return (component_launcher_class, component_config)
raise RuntimeError('No launcher info can be found for component "%s".' %
component.component_id) | Find a launcher and component config to launch the component. The default lookup logic goes through the `supported_launcher_classes` in sequence for each config from the `default_component_configs`. User can override a single component setting by `component_config_overrides`. The method returns the first component config and launcher which together can launch the executor_spec of the component. Subclass may customize the logic by overriding the method. Args: p_config: the pipeline config. component: the component to launch. Returns: The found tuple of component launcher class and the compatible component config. Raises: RuntimeError: if no supported launcher is found. |
166,224 | from typing import Dict, Iterable, List, Mapping, Optional
from tfx import types
from tfx.proto.orchestration import execution_result_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import artifact_utils
from tfx.utils import json_utils
from tfx.utils import proto_utils
from google.protobuf import message
from ml_metadata.proto import metadata_store_pb2
from ml_metadata.proto import metadata_store_service_pb2
The provided code snippet includes necessary dependencies for implementing the `build_artifact_dict` function. Write a Python function `def build_artifact_dict( proto_dict: Mapping[str, metadata_store_service_pb2.ArtifactStructList] ) -> Dict[str, List[types.Artifact]]` to solve the following problem:
Converts input/output artifact dict.
Here is the function:
def build_artifact_dict(
proto_dict: Mapping[str, metadata_store_service_pb2.ArtifactStructList]
) -> Dict[str, List[types.Artifact]]:
"""Converts input/output artifact dict."""
result = {}
for k, v in proto_dict.items():
result[k] = []
for artifact_struct in v.elements:
if not artifact_struct.HasField('artifact'):
raise RuntimeError('Only support artifact oneof field')
artifact_and_type = artifact_struct.artifact
result[k].append(
artifact_utils.deserialize_artifact(artifact_and_type.type,
artifact_and_type.artifact))
return result | Converts input/output artifact dict. |
166,225 | from typing import Dict, Iterable, List, Mapping, Optional
from tfx import types
from tfx.proto.orchestration import execution_result_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import artifact_utils
from tfx.utils import json_utils
from tfx.utils import proto_utils
from google.protobuf import message
from ml_metadata.proto import metadata_store_pb2
from ml_metadata.proto import metadata_store_service_pb2
The provided code snippet includes necessary dependencies for implementing the `build_value_dict` function. Write a Python function `def build_value_dict( metadata_value_dict: Mapping[str, metadata_store_pb2.Value] ) -> Dict[str, types.ExecPropertyTypes]` to solve the following problem:
Converts MLMD value dict into plain value dict.
Here is the function:
def build_value_dict(
metadata_value_dict: Mapping[str, metadata_store_pb2.Value]
) -> Dict[str, types.ExecPropertyTypes]:
"""Converts MLMD value dict into plain value dict."""
result = {}
for k, v in metadata_value_dict.items():
result[k] = getattr(v, v.WhichOneof('value'))
return result | Converts MLMD value dict into plain value dict. |
166,226 | import copy
import enum
from typing import Any, Collection, Dict, Iterable, Iterator, List, Optional, Tuple, Union, cast
import warnings
from tfx.dsl.compiler import constants
from tfx.dsl.components.base import base_node
from tfx.dsl.components.base import executor_spec
from tfx.dsl.context_managers import dsl_context_registry as dsl_context_registry_lib
from tfx.dsl.experimental.conditionals import conditional
from tfx.dsl.placeholder import placeholder as ph
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.types import channel
from tfx.types import channel_utils
from tfx.utils import doc_controls
from tfx.utils import topsort
from google.protobuf import message
def add_beam_pipeline_args_to_component(component, beam_pipeline_args):
if isinstance(component.executor_spec, executor_spec.BeamExecutorSpec):
# Prepend pipeline-level beam_pipeline_args in front of component specific
# ones to make component-level override pipeline-level args.
cast(
executor_spec.BeamExecutorSpec,
component.executor_spec).beam_pipeline_args = beam_pipeline_args + cast(
executor_spec.BeamExecutorSpec,
component.executor_spec).beam_pipeline_args | null |
166,227 | import copy
import enum
from typing import Any, Collection, Dict, Iterable, Iterator, List, Optional, Tuple, Union, cast
import warnings
from tfx.dsl.compiler import constants
from tfx.dsl.components.base import base_node
from tfx.dsl.components.base import executor_spec
from tfx.dsl.context_managers import dsl_context_registry as dsl_context_registry_lib
from tfx.dsl.experimental.conditionals import conditional
from tfx.dsl.placeholder import placeholder as ph
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.types import channel
from tfx.types import channel_utils
from tfx.utils import doc_controls
from tfx.utils import topsort
from google.protobuf import message
class Pipeline(base_node.BaseNode):
"""Logical TFX pipeline object.
Pipeline object represents the DAG of TFX components, which can be run using
one of the pipeline orchestration systems that TFX supports. For details,
please refer to the
[guide](https://github.com/tensorflow/tfx/blob/master/docs/guide/build_tfx_pipeline.md).
Attributes:
components: A deterministic list of logical components of this pipeline,
which are deduped and topologically sorted.
enable_cache: Whether or not cache is enabled for this run.
metadata_connection_config: The config to connect to ML metadata.
execution_mode: Execution mode of the pipeline. Currently only support
synchronous execution mode.
beam_pipeline_args: Pipeline arguments for Beam powered Components. Use
`with_beam_pipeline_args` to set component level Beam args.
platform_config: Pipeline level platform config, in proto form.
"""
def __init__(
self,
pipeline_name: str,
pipeline_root: Optional[Union[str, ph.Placeholder]] = '',
metadata_connection_config: Optional[
metadata.ConnectionConfigType
] = None,
components: Iterable[base_node.BaseNode] = (),
enable_cache: bool = False,
beam_pipeline_args: Optional[List[Union[str, ph.Placeholder]]] = None,
platform_config: Optional[message.Message] = None,
execution_mode: ExecutionMode = ExecutionMode.SYNC,
inputs: Optional[PipelineInputs] = None,
outputs: Optional[Dict[str, channel.OutputChannel]] = None,
dsl_context_registry: Optional[
dsl_context_registry_lib.DslContextRegistry
] = None,
):
"""Initialize pipeline.
Args:
pipeline_name: Name of the pipeline;
pipeline_root: Path to root directory of the pipeline. This will most
often be just a string. Some orchestrators may have limited support for
constructing this from a Placeholder, e.g. a RuntimeInfoPlaceholder that
refers to fields from the platform config. pipeline_root is optional
only if the pipeline is composed within another parent pipeline, in
which case it will inherit its parent pipeline's root.
metadata_connection_config: The config to connect to ML metadata.
components: Optional list of components to construct the pipeline.
enable_cache: Whether or not cache is enabled for this run.
beam_pipeline_args: Pipeline arguments for Beam powered Components.
platform_config: Pipeline level platform config, in proto form.
execution_mode: The execution mode of the pipeline, can be SYNC or ASYNC.
inputs: Optional inputs of a pipeline.
outputs: Optional outputs of a pipeline.
dsl_context_registry: DslContextRegistry to use for this pipeline, if not
provided then the current context (potentially a new DslContext) will be
used.
"""
if len(pipeline_name) > _MAX_PIPELINE_NAME_LENGTH:
raise ValueError(
f'pipeline {pipeline_name} exceeds maximum allowed length: {_MAX_PIPELINE_NAME_LENGTH}.'
)
self.pipeline_name = pipeline_name
# Registry extraction should come before super().__init__() which put self
# to the active DslContextRegistry.
self._dsl_context_registry = dsl_context_registry
if self._dsl_context_registry is None:
parent_reg = dsl_context_registry_lib.get()
self._dsl_context_registry = parent_reg.extract_for_pipeline(components)
# Initialize pipeline as a node.
super().__init__()
if inputs:
inputs.pipeline = self
self._inputs = inputs
if outputs:
self._outputs = {
k: channel.PipelineOutputChannel(v, pipeline=self, output_key=k)
for k, v in outputs.items()
}
else:
self._outputs = {}
self._id = pipeline_name
# Once pipeline is finalized, this instance is regarded as immutable and
# any detectable mutation will raise an error.
self._finalized = False
# TODO(b/183621450): deprecate PipelineInfo.
self.pipeline_info = data_types.PipelineInfo( # pylint: disable=g-missing-from-attributes
pipeline_name=pipeline_name,
pipeline_root=pipeline_root)
self.enable_cache = enable_cache
self.metadata_connection_config = metadata_connection_config
self.execution_mode = execution_mode
self._beam_pipeline_args = beam_pipeline_args or []
self.platform_config = platform_config
# TODO: b/324635891 - Remove all references and clean this up.
self.additional_pipeline_args = {}
# TODO(b/216581002): Use self._dsl_context_registry to obtain components.
self._components = []
if components:
self._set_components(components)
def _check_mutable(self):
if self._finalized:
raise RuntimeError('Cannot mutate Pipeline after finalize.')
def beam_pipeline_args(self):
"""Beam pipeline args used for all components in the pipeline."""
return self._beam_pipeline_args
def dsl_context_registry(self) -> dsl_context_registry_lib.DslContextRegistry: # pylint: disable=g-missing-from-attributes
if self._dsl_context_registry is None:
raise RuntimeError('DslContextRegistry is not persisted yet. Run '
'pipeline.finalize() first.')
return self._dsl_context_registry
def id(self):
return self._id
def components(self):
"""A deterministic list of logical components that are deduped and topologically sorted."""
return self._components
def components(self, components: List[base_node.BaseNode]):
self._set_components(components)
def _set_components(self, components: Iterable[base_node.BaseNode]) -> None:
"""Set a full list of components of the pipeline."""
self._check_mutable()
deduped_components = set(components)
for upstream_component, component in enumerate_implicit_dependencies(
list(deduped_components),
registry=self._dsl_context_registry,
pipeline=self,
):
component.add_upstream_node(upstream_component)
layers = topsort.topsorted_layers(
list(deduped_components),
get_node_id_fn=lambda c: c.id,
get_parent_nodes=lambda c: c.upstream_nodes,
get_child_nodes=lambda c: c.downstream_nodes)
self._components = []
for layer in layers:
for component in layer:
self._components.append(component)
if self.beam_pipeline_args:
for component in self._components:
add_beam_pipeline_args_to_component(component, self.beam_pipeline_args)
def finalize(self):
self._persist_dsl_context_registry()
self._finalized = True
def _persist_dsl_context_registry(self):
"""Persist the DslContextRegistry to the pipeline."""
assert self._dsl_context_registry is not None
self._dsl_context_registry = copy.copy(self._dsl_context_registry)
self._dsl_context_registry.finalize()
given_components = set(self._components)
registry_components = set(self._dsl_context_registry.all_nodes)
for unseen_component in given_components - registry_components:
warnings.warn(
f'Component {unseen_component.id} is not found from the registry. '
'This is probably due to reusing component from another pipeline '
'or interleaved pipeline definitions. Make sure each component '
'belong to exactly one pipeline, and pipeline definitions are '
'separated.')
def inputs(self) -> Dict[str, Any]:
# If we view a Pipeline as a Node, its inputs should be unwrapped (raw)
# channels that are provided through PipelineInputs, and consumed by nodes
# in the inner pipeline.
if self._inputs:
return self._inputs.raw_inputs
else:
return {}
def outputs(self) -> Dict[str, Any]:
# If we view a Pipeline as a Node, its outputs should be wrapped channels
# that will be consumed by nodes in the outer pipeline.
return self._outputs
def exec_properties(self) -> Dict[str, Any]:
return {}
The provided code snippet includes necessary dependencies for implementing the `enumerate_implicit_dependencies` function. Write a Python function `def enumerate_implicit_dependencies( components: Collection[base_node.BaseNode], registry: dsl_context_registry_lib.DslContextRegistry, pipeline: Optional[Pipeline] = None, ) -> Iterator[Tuple[base_node.BaseNode, base_node.BaseNode]]` to solve the following problem:
Enumerate component dependencies arising from data deps between them. Args: components: Components to consider. registry: DslContextRegistry to use for looking up conditional predicates. pipeline: Pipeline object if calling from the context of one. Yields: Pairs of the form (upstream_component, component). If a component has no upstream components within `components` then it will not be present as the first element of any tuple in the output. A warning is generated if an `upstream_component` of some node in `components` is not part of the supplied pipeline's components. Raises: RuntimeError: When duplicate components are detected.
Here is the function:
def enumerate_implicit_dependencies(
components: Collection[base_node.BaseNode],
registry: dsl_context_registry_lib.DslContextRegistry,
pipeline: Optional[Pipeline] = None,
) -> Iterator[Tuple[base_node.BaseNode, base_node.BaseNode]]:
"""Enumerate component dependencies arising from data deps between them.
Args:
components: Components to consider.
registry: DslContextRegistry to use for looking up conditional predicates.
pipeline: Pipeline object if calling from the context of one.
Yields:
Pairs of the form (upstream_component, component). If a component has no
upstream components within `components` then it will not be present as the
first element of any tuple in the output. A warning is generated if an
`upstream_component` of some node in `components` is not part of the
supplied pipeline's components.
Raises:
RuntimeError: When duplicate components are detected.
"""
node_by_id = {}
# Fills in producer map.
for component in components:
# Checks every node has an unique id.
if component.id in node_by_id:
raise RuntimeError(
f'Duplicated node_id {component.id} for component type'
f'{component.type}. Try setting a different node_id using '
'`.with_id()`.'
)
if pipeline and component.id == pipeline.pipeline_name:
raise RuntimeError(
f'node id {component.id} is the same as its enclosing pipeline id.'
'Try setting a different node_id using `.with_id()`.'
)
node_by_id[component.id] = component
# Deduce upstream nodes based on producer map.
for component in components:
channels = list(component.inputs.values())
for exec_property in component.exec_properties.values():
if isinstance(exec_property, ph.Placeholder):
channels.extend(channel_utils.get_dependent_channels(exec_property))
if component in registry.all_nodes:
# Backward compatibility; component might not be part of the current
# pipeline registry in the case
for predicate in conditional.get_predicates(component, registry):
channels.extend(channel_utils.get_dependent_channels(predicate))
pipeline_component_ids = set(
(component.id for component in pipeline.components)
) if pipeline else set()
for input_channel in channels:
for upstream_node_id in input_channel.get_data_dependent_node_ids():
if pipeline and upstream_node_id == pipeline.id:
# If a component's input channel depends on the (self) pipeline,
# it means that component consumes pipeline-level inputs. No need to
# add upstream node here. Pipeline-level inputs will be handled
# during compilation.
continue
upstream_node = node_by_id.get(upstream_node_id)
if upstream_node:
yield (upstream_node, component)
elif pipeline and upstream_node_id not in pipeline_component_ids:
warnings.warn(
f'Node {component.id} depends on the output of node'
f' {upstream_node_id}, but {upstream_node_id} is not included in'
' the components of pipeline. Did you forget to add it?'
) | Enumerate component dependencies arising from data deps between them. Args: components: Components to consider. registry: DslContextRegistry to use for looking up conditional predicates. pipeline: Pipeline object if calling from the context of one. Yields: Pairs of the form (upstream_component, component). If a component has no upstream components within `components` then it will not be present as the first element of any tuple in the output. A warning is generated if an `upstream_component` of some node in `components` is not part of the supplied pipeline's components. Raises: RuntimeError: When duplicate components are detected. |
166,228 | from typing import Any, Dict, List, Optional, Union
import jinja2
from tfx import types
from tfx.dsl.component.experimental import executor_specs
from tfx.dsl.component.experimental import placeholders
from tfx.dsl.components.base import executor_spec
def _render_items(items: List[str], context: Dict[str, Any]) -> List[str]:
if not items:
return items
return [_render_text(item, context) for item in items]
def _render_text(text: str, context: Dict[str, Any]) -> str:
return jinja2.Template(text).render(context)
def _resolve_container_command_line(
cmd_args: Optional[List[placeholders.CommandlineArgumentType]],
input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any],
) -> List[str]:
"""Resolves placeholders in the command line of a container.
Args:
cmd_args: command line args to resolve.
input_dict: Dictionary of input artifacts consumed by this component.
output_dict: Dictionary of output artifacts produced by this component.
exec_properties: Dictionary of execution properties.
Returns:
Resolved command line.
"""
def expand_command_line_arg(
cmd_arg: placeholders.CommandlineArgumentType,) -> str:
"""Resolves a single argument."""
if isinstance(cmd_arg, str):
return cmd_arg
elif isinstance(cmd_arg, placeholders.InputValuePlaceholder):
if cmd_arg.input_name in exec_properties:
return str(exec_properties[cmd_arg.input_name])
else:
artifact = input_dict[cmd_arg.input_name][0]
return str(artifact.value)
elif isinstance(cmd_arg, placeholders.InputUriPlaceholder):
return input_dict[cmd_arg.input_name][0].uri
elif isinstance(cmd_arg, placeholders.OutputUriPlaceholder):
return output_dict[cmd_arg.output_name][0].uri
elif isinstance(cmd_arg, placeholders.ConcatPlaceholder):
resolved_items = [expand_command_line_arg(item) for item in cmd_arg.items]
for item in resolved_items:
if not isinstance(item, str):
raise TypeError('Expanded item "{}" has incorrect type "{}"'.format(
item, type(item)))
return ''.join(resolved_items)
else:
raise TypeError(
('Unsupported type of command-line arguments: "{}".'
' Supported types are {}.')
.format(type(cmd_arg), str(executor_specs.CommandlineArgumentType)))
resolved_command_line = []
for cmd_arg in (cmd_args or []):
resolved_cmd_arg = expand_command_line_arg(cmd_arg)
if not isinstance(resolved_cmd_arg, str):
raise TypeError(
'Resolved argument "{}" (type="{}") is not a string.'.format(
resolved_cmd_arg, type(resolved_cmd_arg)))
resolved_command_line.append(resolved_cmd_arg)
return resolved_command_line
The provided code snippet includes necessary dependencies for implementing the `resolve_container_template` function. Write a Python function `def resolve_container_template( container_spec_tmpl: Union[executor_spec.ExecutorContainerSpec, executor_specs.TemplatedExecutorContainerSpec], input_dict: Dict[str, List[types.Artifact]], output_dict: Dict[str, List[types.Artifact]], exec_properties: Dict[str, Any]) -> executor_spec.ExecutorContainerSpec` to solve the following problem:
Resolves Jinja2 template languages from an executor container spec. Args: container_spec_tmpl: the container spec template to be resolved. input_dict: Dictionary of input artifacts consumed by this component. output_dict: Dictionary of output artifacts produced by this component. exec_properties: Dictionary of execution properties. Returns: A resolved container spec.
Here is the function:
def resolve_container_template(
container_spec_tmpl: Union[executor_spec.ExecutorContainerSpec,
executor_specs.TemplatedExecutorContainerSpec],
input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any]) -> executor_spec.ExecutorContainerSpec:
"""Resolves Jinja2 template languages from an executor container spec.
Args:
container_spec_tmpl: the container spec template to be resolved.
input_dict: Dictionary of input artifacts consumed by this component.
output_dict: Dictionary of output artifacts produced by this component.
exec_properties: Dictionary of execution properties.
Returns:
A resolved container spec.
"""
context = {
'input_dict': input_dict,
'output_dict': output_dict,
'exec_properties': exec_properties,
}
if isinstance(container_spec_tmpl,
executor_specs.TemplatedExecutorContainerSpec):
return executor_spec.ExecutorContainerSpec(
image=container_spec_tmpl.image,
command=_resolve_container_command_line(
cmd_args=container_spec_tmpl.command,
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
),
)
return executor_spec.ExecutorContainerSpec(
image=_render_text(container_spec_tmpl.image, context),
command=_render_items(container_spec_tmpl.command, context),
args=_render_items(container_spec_tmpl.args, context)) | Resolves Jinja2 template languages from an executor container spec. Args: container_spec_tmpl: the container spec template to be resolved. input_dict: Dictionary of input artifacts consumed by this component. output_dict: Dictionary of output artifacts produced by this component. exec_properties: Dictionary of execution properties. Returns: A resolved container spec. |
166,229 | from typing import Any, Dict, List, Optional, Union
import jinja2
from tfx import types
from tfx.dsl.component.experimental import executor_specs
from tfx.dsl.component.experimental import placeholders
from tfx.dsl.components.base import executor_spec
The provided code snippet includes necessary dependencies for implementing the `to_swagger_dict` function. Write a Python function `def to_swagger_dict(config: Any) -> Any` to solve the following problem:
Converts a config object to a swagger API dict. This utility method recursively converts swagger code generated configs into a valid swagger dictionary. This method is trying to workaround a bug (https://github.com/swagger-api/swagger-codegen/issues/8948) from swagger generated code Args: config: The config object. It can be one of List, Dict or a Swagger code generated object, which has a `attribute_map` attribute. Returns: The original object with all Swagger generated object replaced with dictionary object.
Here is the function:
def to_swagger_dict(config: Any) -> Any:
"""Converts a config object to a swagger API dict.
This utility method recursively converts swagger code generated configs into
a valid swagger dictionary. This method is trying to workaround a bug
(https://github.com/swagger-api/swagger-codegen/issues/8948)
from swagger generated code
Args:
config: The config object. It can be one of List, Dict or a Swagger code
generated object, which has a `attribute_map` attribute.
Returns:
The original object with all Swagger generated object replaced with
dictionary object.
"""
if isinstance(config, list):
return [to_swagger_dict(x) for x in config]
if hasattr(config, 'attribute_map'):
return {
swagger_name: to_swagger_dict(getattr(config, key))
for (key, swagger_name) in config.attribute_map.items()
if getattr(config, key)
}
if isinstance(config, dict):
return {key: to_swagger_dict(value) for key, value in config.items()}
return config | Converts a config object to a swagger API dict. This utility method recursively converts swagger code generated configs into a valid swagger dictionary. This method is trying to workaround a bug (https://github.com/swagger-api/swagger-codegen/issues/8948) from swagger generated code Args: config: The config object. It can be one of List, Dict or a Swagger code generated object, which has a `attribute_map` attribute. Returns: The original object with all Swagger generated object replaced with dictionary object. |
166,230 | from collections.abc import Container, MutableSequence, Sequence
import inspect
from typing import Any, Callable, TypeVar, get_args, get_origin, Optional
from tfx.dsl.component.experimental import json_compat
from tfx.orchestration.portable import data_types
from tfx.types import artifact
from tfx.types import standard_artifacts
from tfx.utils import pure_typing_utils
from tfx.utils.di import errors
from tfx.utils.di import providers
def _type_check(value: Any, type_hint: Any) -> bool:
if type_hint is None:
return True
try:
return pure_typing_utils.is_compatible(value, type_hint)
except NotImplementedError:
return True | null |
166,231 | from collections.abc import Container, MutableSequence, Sequence
import inspect
from typing import Any, Callable, TypeVar, get_args, get_origin, Optional
from tfx.dsl.component.experimental import json_compat
from tfx.orchestration.portable import data_types
from tfx.types import artifact
from tfx.types import standard_artifacts
from tfx.utils import pure_typing_utils
from tfx.utils.di import errors
from tfx.utils.di import providers
_TfxArtifact = artifact.Artifact
def _is_valid_artifact_type(artifact_type: Any) -> bool:
return (
inspect.isclass(artifact_type)
and issubclass(artifact_type, _TfxArtifact)
and artifact_type.TYPE_NAME
)
def _try_infer(
type_hint: Any,
) -> Optional[type[standard_artifacts.ValueArtifact]]:
if type_hint in _PRIMITIVE_TO_ARTIFACT:
return _PRIMITIVE_TO_ARTIFACT[type_hint]
elif json_compat.is_json_compatible(type_hint):
return standard_artifacts.JsonValue
return None
def _deserialize_artifact(
target_type: type[_AT], artifacts: list[_TfxArtifact]
) -> list[_AT]:
"""Transforms list[Artifact] into desired tfx artifact class.
This is different from artifact_utils.deserialize_artifacts which depends on
the globally imported classes to search for the tfx artifact class. The target
artifact class is explicitly passed, thus it is guaranteed that the artifact
class is already imported.
Args:
target_type: TFX artifact type of the result.
artifacts: Already deserialized artifacts given from ExecutionInfo.
Returns:
Correctly deserialized artifact list.
"""
result = []
for a in artifacts:
if a.type_name != target_type.TYPE_NAME:
raise errors.InvalidTypeHintError(
f'type_hint uses {target_type.TYPE_NAME} but the resolved artifacts'
f' have type_name = {a.type_name}'
)
if type(a) is target_type: # pylint: disable=unidiomatic-typecheck
result.append(a)
else:
new_artifact = target_type()
new_artifact.set_mlmd_artifact_type(a.artifact_type)
new_artifact.set_mlmd_artifact(a.mlmd_artifact)
result.append(new_artifact)
return result
The provided code snippet includes necessary dependencies for implementing the `_transform_artifacts` function. Write a Python function `def _transform_artifacts( artifacts: list[_TfxArtifact], type_hint: Any, is_input: bool = False ) -> Any` to solve the following problem:
Transforms raw list[Artifact] to target type_hint with type checking.
Here is the function:
def _transform_artifacts(
artifacts: list[_TfxArtifact], type_hint: Any, is_input: bool = False
) -> Any:
"""Transforms raw list[Artifact] to target type_hint with type checking."""
if type_hint is None:
return artifacts
origin = get_origin(type_hint)
args = get_args(type_hint)
if origin and args:
# List[T]
if (
origin in (list, Sequence, MutableSequence)
and len(args) == 1
and _is_valid_artifact_type(args[0])
):
return _deserialize_artifact(args[0], artifacts)
# Optional[T]
is_opt, unwrapped_type = pure_typing_utils.maybe_unwrap_optional(type_hint)
if is_opt and _is_valid_artifact_type(unwrapped_type):
artifact_type = unwrapped_type
artifacts = _deserialize_artifact(artifact_type, artifacts)
if not artifacts:
return None
elif len(artifacts) == 1:
return artifacts[0]
else:
raise errors.InvalidTypeHintError(
f'type_hint = {type_hint} but got {len(artifacts)} artifacts. Please'
f' use list[{artifact_type.__name__}] annotation instead.'
)
# Just T
if _is_valid_artifact_type(unwrapped_type):
artifact_type = unwrapped_type
artifacts = _deserialize_artifact(artifact_type, artifacts)
if len(artifacts) == 1:
return artifacts[0]
else:
raise errors.InvalidTypeHintError(
f'type_hint = {type_hint} but got {len(artifacts)} artifacts. Please'
f' use list[{artifact_type.__name__}] or'
f' Optional[{artifact_type.__name__}] annotation instead.'
)
# Primitive or jsonable type_hint for a value artifact
if (
is_input
and (artifact_type := _try_infer(unwrapped_type))
is not None
):
artifacts = _deserialize_artifact(artifact_type, artifacts)
if is_opt and not artifacts:
return None
if len(artifacts) == 1:
artifacts[0].read()
return artifacts[0].value
else:
raise errors.InvalidTypeHintError(
f'type_hint = {type_hint} but got {len(artifacts)} artifacts.'
' Please use a single value artifact for primitive types.'
)
raise errors.InvalidTypeHintError(f'Unsupported annotation: {type_hint}') | Transforms raw list[Artifact] to target type_hint with type checking. |
166,232 | from typing import Mapping, Optional, Sequence
import uuid
from tfx import types
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration.portable import merge_utils
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import execution_result_pb2
from tfx.utils import typing_utils
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `publish_cached_executions` function. Write a Python function `def publish_cached_executions( metadata_handle: metadata.Metadata, contexts: Sequence[metadata_store_pb2.Context], executions: Sequence[metadata_store_pb2.Execution], output_artifacts_maps: Optional[ Sequence[typing_utils.ArtifactMultiMap] ] = None, ) -> None` to solve the following problem:
Marks an existing execution as using cached outputs from a previous execution. Args: metadata_handle: A handler to access MLMD. contexts: MLMD contexts to associated with the execution. executions: Executions that will be published as CACHED executions. output_artifacts_maps: A list of output artifacts of the executions. Each artifact will be linked with the execution through an event of type OUTPUT
Here is the function:
def publish_cached_executions(
metadata_handle: metadata.Metadata,
contexts: Sequence[metadata_store_pb2.Context],
executions: Sequence[metadata_store_pb2.Execution],
output_artifacts_maps: Optional[
Sequence[typing_utils.ArtifactMultiMap]
] = None,
) -> None:
"""Marks an existing execution as using cached outputs from a previous execution.
Args:
metadata_handle: A handler to access MLMD.
contexts: MLMD contexts to associated with the execution.
executions: Executions that will be published as CACHED executions.
output_artifacts_maps: A list of output artifacts of the executions. Each
artifact will be linked with the execution through an event of type OUTPUT
"""
for execution in executions:
execution.last_known_state = metadata_store_pb2.Execution.CACHED
execution_lib.put_executions(
metadata_handle,
executions,
contexts,
output_artifacts_maps=output_artifacts_maps,
) | Marks an existing execution as using cached outputs from a previous execution. Args: metadata_handle: A handler to access MLMD. contexts: MLMD contexts to associated with the execution. executions: Executions that will be published as CACHED executions. output_artifacts_maps: A list of output artifacts of the executions. Each artifact will be linked with the execution through an event of type OUTPUT |
166,233 | from typing import Mapping, Optional, Sequence
import uuid
from tfx import types
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration.portable import merge_utils
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import execution_result_pb2
from tfx.utils import typing_utils
from ml_metadata.proto import metadata_store_pb2
def set_execution_result_if_not_empty(
executor_output: Optional[execution_result_pb2.ExecutorOutput],
execution: metadata_store_pb2.Execution,
) -> None:
"""Sets execution result as a custom property of the execution."""
if executor_output and (
executor_output.execution_result.result_message
or executor_output.execution_result.metadata_details
or executor_output.execution_result.code
):
execution_lib.set_execution_result(
executor_output.execution_result, execution
)
The provided code snippet includes necessary dependencies for implementing the `publish_failed_execution` function. Write a Python function `def publish_failed_execution( metadata_handle: metadata.Metadata, contexts: Sequence[metadata_store_pb2.Context], execution_id: int, executor_output: Optional[execution_result_pb2.ExecutorOutput] = None, ) -> None` to solve the following problem:
Marks an existing execution as failed. Args: metadata_handle: A handler to access MLMD. contexts: MLMD contexts to associated with the execution. execution_id: The id of the execution. executor_output: The output of executor.
Here is the function:
def publish_failed_execution(
metadata_handle: metadata.Metadata,
contexts: Sequence[metadata_store_pb2.Context],
execution_id: int,
executor_output: Optional[execution_result_pb2.ExecutorOutput] = None,
) -> None:
"""Marks an existing execution as failed.
Args:
metadata_handle: A handler to access MLMD.
contexts: MLMD contexts to associated with the execution.
execution_id: The id of the execution.
executor_output: The output of executor.
"""
[execution] = metadata_handle.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.FAILED
set_execution_result_if_not_empty(executor_output, execution)
execution_lib.put_execution(metadata_handle, execution, contexts) | Marks an existing execution as failed. Args: metadata_handle: A handler to access MLMD. contexts: MLMD contexts to associated with the execution. execution_id: The id of the execution. executor_output: The output of executor. |
166,234 | import sys
import traceback
from typing import Any, Dict, List, Mapping, Optional, Type, TypeVar
from absl import logging
import attr
import grpc
import portpicker
from tfx import types
from tfx.dsl.compiler import placeholder_utils
from tfx.dsl.io import fileio
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration.portable import base_driver_operator
from tfx.orchestration.portable import base_executor_operator
from tfx.orchestration.portable import beam_executor_operator
from tfx.orchestration.portable import cache_utils
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import docker_executor_operator
from tfx.orchestration.portable import execution_publish_utils
from tfx.orchestration.portable import execution_watcher
from tfx.orchestration.portable import importer_node_handler
from tfx.orchestration.portable import inputs_utils
from tfx.orchestration.portable import outputs_utils
from tfx.orchestration.portable import python_driver_operator
from tfx.orchestration.portable import python_executor_operator
from tfx.orchestration.portable import resolver_node_handler
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import driver_output_pb2
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import execution_result_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import typing_utils
from google.protobuf import message
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `_register_execution` function. Write a Python function `def _register_execution( metadata_handle: metadata.Metadata, execution_type: metadata_store_pb2.ExecutionType, contexts: List[metadata_store_pb2.Context], input_artifacts: Optional[typing_utils.ArtifactMultiMap] = None, exec_properties: Optional[Mapping[str, types.Property]] = None, ) -> metadata_store_pb2.Execution` to solve the following problem:
Registers an execution in MLMD.
Here is the function:
def _register_execution(
metadata_handle: metadata.Metadata,
execution_type: metadata_store_pb2.ExecutionType,
contexts: List[metadata_store_pb2.Context],
input_artifacts: Optional[typing_utils.ArtifactMultiMap] = None,
exec_properties: Optional[Mapping[str, types.Property]] = None,
) -> metadata_store_pb2.Execution:
"""Registers an execution in MLMD."""
return execution_publish_utils.register_execution(
metadata_handle=metadata_handle,
execution_type=execution_type,
contexts=contexts,
input_artifacts=input_artifacts,
exec_properties=exec_properties,
) | Registers an execution in MLMD. |
166,235 | import collections
import copy
import hashlib
from typing import Any, Dict, List, Mapping, Optional, Sequence
from tfx import types
from tfx.dsl.io import fileio
from tfx.orchestration import metadata
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import artifact_utils
from google.protobuf import message
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `get_cache_context` function. Write a Python function `def get_cache_context( metadata_handle: metadata.Metadata, pipeline_node: pipeline_pb2.PipelineNode, pipeline_info: pipeline_pb2.PipelineInfo, executor_spec: Optional[message.Message] = None, input_artifacts: Optional[Mapping[str, Sequence[types.Artifact]]] = None, output_artifacts: Optional[Mapping[str, Sequence[types.Artifact]]] = None, parameters: Optional[Mapping[str, Any]] = None, ) -> metadata_store_pb2.Context` to solve the following problem:
Gets cache context for a potential node execution. The cache key is generated by applying SHA-256 hashing function on: - Serialized pipeline info. - Serialized node_info of the PipelineNode. - Serialized executor spec - Serialized input artifacts if any. - Serialized output artifacts if any. The uri was removed during the process. - Serialized parameters if any. - Serialized module file content if module file is present in parameters. Args: metadata_handle: A handler to access MLMD store. pipeline_node: A pipeline_pb2.PipelineNode instance to represent the node. pipeline_info: Information of the pipeline. executor_spec: A proto message representing the executor specification. input_artifacts: Input artifacts of the potential execution. The order of the artifacts under a key matters when calculating the cache key. output_artifacts: Output artifacts skeleton of the potential execution. The order of the artifadts under a key matters when calculating the cache key. parameters: Parameters of the potential execution. Returns: A metadata_store_pb2.Context for the cache key.
Here is the function:
def get_cache_context(
metadata_handle: metadata.Metadata,
pipeline_node: pipeline_pb2.PipelineNode,
pipeline_info: pipeline_pb2.PipelineInfo,
executor_spec: Optional[message.Message] = None,
input_artifacts: Optional[Mapping[str, Sequence[types.Artifact]]] = None,
output_artifacts: Optional[Mapping[str, Sequence[types.Artifact]]] = None,
parameters: Optional[Mapping[str, Any]] = None,
) -> metadata_store_pb2.Context:
"""Gets cache context for a potential node execution.
The cache key is generated by applying SHA-256 hashing function on:
- Serialized pipeline info.
- Serialized node_info of the PipelineNode.
- Serialized executor spec
- Serialized input artifacts if any.
- Serialized output artifacts if any. The uri was removed during the process.
- Serialized parameters if any.
- Serialized module file content if module file is present in parameters.
Args:
metadata_handle: A handler to access MLMD store.
pipeline_node: A pipeline_pb2.PipelineNode instance to represent the node.
pipeline_info: Information of the pipeline.
executor_spec: A proto message representing the executor specification.
input_artifacts: Input artifacts of the potential execution. The order of
the artifacts under a key matters when calculating the cache key.
output_artifacts: Output artifacts skeleton of the potential execution. The
order of the artifadts under a key matters when calculating the cache key.
parameters: Parameters of the potential execution.
Returns:
A metadata_store_pb2.Context for the cache key.
"""
h = hashlib.sha256()
h.update(pipeline_info.SerializeToString(deterministic=True))
h.update(pipeline_node.node_info.SerializeToString(deterministic=True))
if executor_spec:
h.update(executor_spec.SerializeToString(deterministic=True))
for key in sorted(input_artifacts or {}):
h.update(key.encode())
for artifact in input_artifacts[key]:
h.update(artifact.mlmd_artifact.SerializeToString(deterministic=True))
for key in sorted(output_artifacts or {}):
h.update(key.encode())
for artifact in output_artifacts[key]:
stateless_artifact = copy.deepcopy(artifact)
# Output uri and name should not be taken into consideration as cache key.
stateless_artifact.uri = ''
stateless_artifact.name = ''
h.update(
stateless_artifact.mlmd_artifact.SerializeToString(
deterministic=True))
parameters = parameters or {}
for key, value in sorted(parameters.items()):
h.update(key.encode())
h.update(str(value).encode())
# Special treatment for module files as they will be used as part of the logic
# for processing. Currently this pattern is employeed by Trainer and
# Transform.
if ('module_file' in parameters and parameters['module_file'] and
fileio.exists(parameters['module_file'])):
with fileio.open(parameters['module_file'], 'r') as f:
h.update(f.read().encode())
return context_lib.register_context_if_not_exists(
metadata_handle=metadata_handle,
context_type_name=context_lib.CONTEXT_TYPE_EXECUTION_CACHE,
context_name=h.hexdigest(),
) | Gets cache context for a potential node execution. The cache key is generated by applying SHA-256 hashing function on: - Serialized pipeline info. - Serialized node_info of the PipelineNode. - Serialized executor spec - Serialized input artifacts if any. - Serialized output artifacts if any. The uri was removed during the process. - Serialized parameters if any. - Serialized module file content if module file is present in parameters. Args: metadata_handle: A handler to access MLMD store. pipeline_node: A pipeline_pb2.PipelineNode instance to represent the node. pipeline_info: Information of the pipeline. executor_spec: A proto message representing the executor specification. input_artifacts: Input artifacts of the potential execution. The order of the artifacts under a key matters when calculating the cache key. output_artifacts: Output artifacts skeleton of the potential execution. The order of the artifadts under a key matters when calculating the cache key. parameters: Parameters of the potential execution. Returns: A metadata_store_pb2.Context for the cache key. |
166,236 | import collections
import copy
import hashlib
from typing import Any, Dict, List, Mapping, Optional, Sequence
from tfx import types
from tfx.dsl.io import fileio
from tfx.orchestration import metadata
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import artifact_utils
from google.protobuf import message
from ml_metadata.proto import metadata_store_pb2
def _get_outputs_of_execution(
metadata_handle: metadata.Metadata, execution_id: int
) -> Optional[Dict[str, List[types.Artifact]]]:
"""Fetches outputs produced by a historical execution.
Args:
metadata_handle: A handler to access MLMD store.
execution_id: The id of the execution that produced the outputs.
Returns:
A dict of key -> List[Artifact] as the result if qualified outputs found.
Otherwise returns None.
"""
result = collections.defaultdict(list)
output_events = [
event
for event in metadata_handle.store.get_events_by_execution_ids(
[execution_id]
)
if event.type == metadata_store_pb2.Event.OUTPUT
]
cached_output_artifacts = metadata_handle.store.get_artifacts_by_id(
[e.artifact_id for e in output_events]
)
for artifact in cached_output_artifacts:
# Non-live artifact means partial result, will not be used.
if artifact.state != metadata_store_pb2.Artifact.LIVE:
return None
artifact_types = metadata_handle.store.get_artifact_types_by_id(
[a.type_id for a in cached_output_artifacts]
)
for event, mlmd_artifact, artifact_type in zip(output_events,
cached_output_artifacts,
artifact_types):
key = event.path.steps[0].key
tfx_artifact = artifact_utils.deserialize_artifact(artifact_type,
mlmd_artifact)
result[key].append(tfx_artifact)
return result
The provided code snippet includes necessary dependencies for implementing the `get_cached_outputs` function. Write a Python function `def get_cached_outputs( metadata_handle: metadata.Metadata, cache_context: metadata_store_pb2.Context, ) -> Optional[Dict[str, List[types.Artifact]]]` to solve the following problem:
Tries to get the cached output artifacts given a cache context. Args: metadata_handle: A handler to access MLMD store. cache_context: The context representing the cache key. Returns: The cached output artifacts in a dict format. None if no qualified cache result is found.
Here is the function:
def get_cached_outputs(
metadata_handle: metadata.Metadata,
cache_context: metadata_store_pb2.Context,
) -> Optional[Dict[str, List[types.Artifact]]]:
"""Tries to get the cached output artifacts given a cache context.
Args:
metadata_handle: A handler to access MLMD store.
cache_context: The context representing the cache key.
Returns:
The cached output artifacts in a dict format. None if no qualified cache
result is found.
"""
# Only success executions should be the producer of cached outputs.
cached_executions = filter(
execution_lib.is_execution_successful,
metadata_handle.store.get_executions_by_context(cache_context.id),
)
if not cached_executions:
return None
# Sorts the candidate executions from newer to older.
cached_executions = execution_lib.sort_executions_newest_to_oldest(
cached_executions)
# Defensively traverses candidate executions and returns once we find an
# execution with valid outputs that can be confirmed to still exist.
for execution in cached_executions:
cached_output_artifacts = _get_outputs_of_execution(
metadata_handle, execution.id
)
if cached_output_artifacts is not None:
try:
artifact_utils.verify_artifacts(cached_output_artifacts)
return cached_output_artifacts
except RuntimeError:
pass
return None | Tries to get the cached output artifacts given a cache context. Args: metadata_handle: A handler to access MLMD store. cache_context: The context representing the cache key. Returns: The cached output artifacts in a dict format. None if no qualified cache result is found. |
166,237 | import copy
import sys
from typing import Optional, cast
from tfx.dsl.components.base import base_executor
from tfx.dsl.io import fileio
from tfx.orchestration.portable import base_executor_operator
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import outputs_utils
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import execution_result_pb2
from tfx.types.value_artifact import ValueArtifact
from tfx.utils import import_utils
from google.protobuf import message
class ValueArtifact(Artifact):
"""Artifacts of small scalar-values that can be easily loaded into memory.
Value artifacts are stored to a file located at the `uri` of the artifact.
This is different from other kinds of artifact types that has a directory at
the `uri`. The payload of the file will be determined by each value artifact
types which is a subclass of this class.
The content of a value artifact can be read or written using `.value`
property.
"""
def __init__(self, *args, **kwargs):
"""Initializes ValueArtifact."""
self._has_value = False
self._modified = False
self._value = None
super().__init__(*args, **kwargs)
def read(self):
if not self._has_value:
file_path = self.uri
# Assert there is a file exists.
if not fileio.exists(file_path):
raise RuntimeError(
'Given path does not exist or is not a valid file: %s' % file_path)
self._has_value = True
if not self.get_int_custom_property(_IS_NULL_KEY):
serialized_value = fileio.open(file_path, 'rb').read()
self._value = self.decode(serialized_value)
return self._value
def write(self, value):
if value is None:
self.set_int_custom_property(_IS_NULL_KEY, 1)
serialized_value = b''
else:
self.set_int_custom_property(_IS_NULL_KEY, 0)
serialized_value = self.encode(value)
with fileio.open(self.uri, 'wb') as f:
f.write(serialized_value)
def value(self):
"""Value stored in the artifact."""
if not self._has_value:
raise ValueError('The artifact value has not yet been read from storage.')
return self._value
def value(self, value):
self._modified = True
self._value = value
self._has_value = True
self.write(value)
# Note: behavior of decode() method should not be changed to provide
# backward/forward compatibility.
def decode(self, serialized_value) -> bytes:
"""Method decoding the file content. Implemented by subclasses."""
pass
# Note: behavior of encode() method should not be changed to provide
# backward/forward compatibility.
def encode(self, value) -> Any:
"""Method encoding the file content. Implemented by subclasses."""
pass
def annotate_as(cls, type_annotation: Optional[Type[SystemArtifact]] = None):
"""Annotate the value artifact type with a system artifact class.
Example usage:
```python
from tfx import v1 as tfx
OutputArtifact = tfx.dsl.components.OutputArtifact
String = tfx.types.standard_artifacts.String
Model = tfx.dsl.standard_annotations.Model
def MyTrainer(
model: OutputArtifact[String.annotate_as(Model)]
):
...
```
Args:
type_annotation: the standard annotations used to annotate the value
artifact type. The possible values are in
`tfx.v1.dsl.standard_annotations`.
Returns:
A subclass of the method caller class (e.g., standard_artifacts.String,
standard_artifacts.Float) with TYPE_ANNOTATION attribute set to be
`type_annotation`; returns the original class if`type_annotation` is None.
"""
if not type_annotation:
return cls
if not issubclass(type_annotation, SystemArtifact):
raise ValueError(
'type_annotation %s is not a subclass of SystemArtifact.' %
type_annotation)
type_annotation_str = str(type_annotation.__name__)
return type(
str(cls.__name__) + '_' + type_annotation_str,
(cls,),
{
'TYPE_NAME': str(cls.TYPE_NAME) + '_' + type_annotation_str,
'TYPE_ANNOTATION': type_annotation,
'__module__': cls.__module__,
},
)
The provided code snippet includes necessary dependencies for implementing the `run_with_executor` function. Write a Python function `def run_with_executor( execution_info: data_types.ExecutionInfo, executor: base_executor.BaseExecutor ) -> execution_result_pb2.ExecutorOutput` to solve the following problem:
Invokes executors given an executor instance and input from the Launcher. Args: execution_info: A wrapper of the details of this execution. executor: An executor instance. Returns: The output from executor.
Here is the function:
def run_with_executor(
execution_info: data_types.ExecutionInfo,
executor: base_executor.BaseExecutor
) -> execution_result_pb2.ExecutorOutput:
"""Invokes executors given an executor instance and input from the Launcher.
Args:
execution_info: A wrapper of the details of this execution.
executor: An executor instance.
Returns:
The output from executor.
"""
for _, artifact_list in execution_info.input_dict.items():
for artifact in artifact_list:
if isinstance(artifact, ValueArtifact):
# Read ValueArtifact into memory.
artifact.read()
output_dict = copy.deepcopy(execution_info.output_dict)
result = executor.Do(execution_info.input_dict, output_dict,
execution_info.exec_properties)
if not result:
# If result is not returned from the Do function, then try to
# read from the executor_output_uri.
if fileio.exists(execution_info.execution_output_uri):
result = execution_result_pb2.ExecutorOutput.FromString(
fileio.open(execution_info.execution_output_uri, 'rb').read())
else:
# Old style TFX executor doesn't return executor_output, but modify
# output_dict and exec_properties in place. For backward compatibility,
# we use their executor_output and exec_properties to construct
# ExecutorOutput.
result = execution_result_pb2.ExecutorOutput()
outputs_utils.populate_output_artifact(result, output_dict)
outputs_utils.populate_exec_properties(result,
execution_info.exec_properties)
return result | Invokes executors given an executor instance and input from the Launcher. Args: execution_info: A wrapper of the details of this execution. executor: An executor instance. Returns: The output from executor. |
166,238 | import collections
import contextlib
from typing import Dict, List, Optional, Sequence, Tuple, TypeVar
from tfx import types
from ml_metadata.proto import metadata_store_pb2
_VALID_OUTPUT_EVENT_TYPES = frozenset([
metadata_store_pb2.Event.OUTPUT, metadata_store_pb2.Event.INTERNAL_OUTPUT,
metadata_store_pb2.Event.DECLARED_OUTPUT
])
_VALID_INPUT_EVENT_TYPES = frozenset([
metadata_store_pb2.Event.INPUT, metadata_store_pb2.Event.INTERNAL_INPUT,
metadata_store_pb2.Event.DECLARED_INPUT
])
_Artifact = TypeVar(
'_Artifact', metadata_store_pb2.Artifact, types.Artifact)
_ArtifactMultiDict = Dict[str, List[_Artifact]]
def reconstruct_artifact_multimap(
artifacts: Sequence[_Artifact],
events: Sequence[metadata_store_pb2.Event]) -> _ArtifactMultiDict:
"""Reconstructs input or output artifact maps from events."""
execution_ids = {e.execution_id for e in events}
events_by_artifact_id = {e.artifact_id: e for e in events}
if len(execution_ids) > 1:
raise ValueError(
'All events should be from the same execution but got: '
f'{execution_ids}.')
artifacts_by_id = {a.id: a for a in artifacts}
artifact_id_multimap = reconstruct_artifact_id_multimap(events)
result = {
key: [artifacts_by_id[i] for i in artifact_ids]
for key, artifact_ids in artifact_id_multimap.items()
}
for key, artifacts in result.items():
artifact_types = {a.type_id for a in artifacts}
if len(artifact_types) != 1:
raise ValueError(
f'Artifact type of key "{key}" is heterogeneous: {artifact_types}')
event_types = {events_by_artifact_id[a.id].type for a in artifacts}
if len(event_types) != 1:
raise ValueError(
f'Event type of key "{key}" is heterogeneous: {event_types}')
return result
The provided code snippet includes necessary dependencies for implementing the `reconstruct_inputs_and_outputs` function. Write a Python function `def reconstruct_inputs_and_outputs( artifacts: Sequence[_Artifact], events: Sequence[metadata_store_pb2.Event], ) -> Tuple[_ArtifactMultiDict, _ArtifactMultiDict]` to solve the following problem:
Reconstructs input and output artifact maps from events.
Here is the function:
def reconstruct_inputs_and_outputs(
artifacts: Sequence[_Artifact],
events: Sequence[metadata_store_pb2.Event],
) -> Tuple[_ArtifactMultiDict, _ArtifactMultiDict]:
"""Reconstructs input and output artifact maps from events."""
execution_ids = {event.execution_id for event in events}
if len(execution_ids) > 1:
raise ValueError(
'All events should be from the same execution but got: '
f'{execution_ids}.')
input_events = [e for e in events if e.type in _VALID_INPUT_EVENT_TYPES]
output_events = [e for e in events if e.type in _VALID_OUTPUT_EVENT_TYPES]
return (
reconstruct_artifact_multimap(artifacts, input_events),
reconstruct_artifact_multimap(artifacts, output_events),
) | Reconstructs input and output artifact maps from events. |
166,239 | import collections
import contextlib
from typing import Dict, List, Optional, Sequence, Tuple, TypeVar
from tfx import types
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `is_pending_output_event` function. Write a Python function `def is_pending_output_event(event: metadata_store_pb2.Event) -> bool` to solve the following problem:
Returns true if the event represents a pending (not finalized) output.
Here is the function:
def is_pending_output_event(event: metadata_store_pb2.Event) -> bool:
"""Returns true if the event represents a pending (not finalized) output."""
return event.type == metadata_store_pb2.Event.PENDING_OUTPUT | Returns true if the event represents a pending (not finalized) output. |
166,240 | from __future__ import annotations
import collections
import copy
import itertools
import re
from typing import Any, Dict, Iterable, List, Mapping, Optional, Sequence, Tuple
from absl import logging
from tfx import types
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration.portable import outputs_utils
from tfx.orchestration.portable.mlmd import artifact_lib
from tfx.orchestration.portable.mlmd import common_utils
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import filter_query_builder as q
from tfx.utils import metrics_utils
from tfx.proto.orchestration import execution_result_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import proto_utils
from tfx.utils import typing_utils
from tfx.utils import telemetry_utils
from google.protobuf import json_format
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
def is_internal_key(key: str) -> bool:
"""Returns `True` if the key is an internal-only execution property key."""
return key.startswith('__')
def remove_internal_keys(d: Dict[str, Any]) -> Dict[str, Any]:
return {k: v for k, v in d.items() if not is_internal_key(k)} | null |
166,241 | from __future__ import annotations
import collections
import copy
import itertools
import re
from typing import Any, Dict, Iterable, List, Mapping, Optional, Sequence, Tuple
from absl import logging
from tfx import types
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration.portable import outputs_utils
from tfx.orchestration.portable.mlmd import artifact_lib
from tfx.orchestration.portable.mlmd import common_utils
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import filter_query_builder as q
from tfx.utils import metrics_utils
from tfx.proto.orchestration import execution_result_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import proto_utils
from tfx.utils import typing_utils
from tfx.utils import telemetry_utils
from google.protobuf import json_format
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
def is_execution_running(execution: metadata_store_pb2.Execution) -> bool:
"""Returns `True` if an execution is running.
Args:
execution: An execution message.
Returns:
A bool value indicating whether or not the execution is running.
"""
return execution.last_known_state == metadata_store_pb2.Execution.RUNNING
def _register_reference_output_artifacts(
metadata_handle: metadata.Metadata,
execution: metadata_store_pb2.Execution,
output_artifacts: typing_utils.ArtifactMultiMap,
) -> None:
"""Registers REFERENCE output artifacts for a given execution in MLMD."""
reference_output_artifacts = collections.defaultdict(list)
for key, artifacts in output_artifacts.items():
for artifact in artifacts:
# Only consider artifacts in state REFERENCE.
if artifact.state == ArtifactState.REFERENCE:
reference_output_artifacts[key].append(artifact)
# Find existing registered REFERENCE artifacts with a PENDING_OUTPUT event and
# re-use them.
valid_artifact_states = [metadata_store_pb2.Artifact.State.REFERENCE]
existing_pending_output_artifacts = get_pending_output_artifacts(
metadata_handle, execution.id, valid_artifact_states
)
if existing_pending_output_artifacts:
_reuse_existing_artifacts(
reference_output_artifacts, existing_pending_output_artifacts
)
elif reference_output_artifacts:
# Register the reference output artifacts for this execution.
contexts = metadata_handle.store.get_contexts_by_execution(execution.id)
put_execution(
metadata_handle,
execution,
contexts,
output_artifacts=reference_output_artifacts,
output_event_type=metadata_store_pb2.Event.PENDING_OUTPUT,
)
def _register_pending_output_artifacts(
metadata_handle: metadata.Metadata,
execution: metadata_store_pb2.Execution,
output_artifacts: typing_utils.ArtifactMultiMap,
) -> None:
"""Registers PENDING output artifacts for a given execution in MLMD."""
pending_output_artifacts = collections.defaultdict(list)
for key, artifacts in output_artifacts.items():
for artifact in artifacts:
# Only consider artifacts not in state REFERENCE.
if artifact.state != ArtifactState.REFERENCE:
artifact.state = ArtifactState.PENDING
pending_output_artifacts[key].append(artifact)
# Find existing registered artifacts (that are not in state REFERENCE) with a
# PENDING_OUTPUT event and re-use them.
valid_artifact_states = metadata_store_pb2.Artifact.State.values()
valid_artifact_states.remove(metadata_store_pb2.Artifact.State.REFERENCE)
existing_pending_output_artifacts = get_pending_output_artifacts(
metadata_handle, execution.id, valid_artifact_states
)
if existing_pending_output_artifacts:
_reuse_existing_artifacts(
pending_output_artifacts, existing_pending_output_artifacts
)
elif pending_output_artifacts:
# Register the pending output artifacts for this execution.
contexts = metadata_handle.store.get_contexts_by_execution(execution.id)
put_execution(
metadata_handle,
execution,
contexts,
output_artifacts=pending_output_artifacts,
output_event_type=metadata_store_pb2.Event.PENDING_OUTPUT,
)
The provided code snippet includes necessary dependencies for implementing the `register_output_artifacts` function. Write a Python function `def register_output_artifacts( metadata_handle: metadata.Metadata, execution_id: int, output_artifacts: typing_utils.ArtifactMultiMap, )` to solve the following problem:
Registers REFERENCE and PENDING output artifacts with the execution. Artifacts in output_artifacts not in state REFERENCE will be given state PENDING. Artifacts in state REFERENCE will keep their state. Each output artifact will be linked to the execution with a PENDING_OUTPUT event. The artifacts will be modified in-place to add IDs as registered by MLMD. This function is idempotent if called more than once with the same output artifact dict. In that case, the function will find the already registered output artifacts for the execution and reuse their IDs. This function cannot, however, be called for the same execution more than once with different output artifact dicts. Args: metadata_handle: A handle to access MLMD execution_id: The ID of an existing execution to apply output artifacts to. output_artifacts: The output artifact dict to register. Artifacts will be modified in place to add IDs after creation in MLMD. Raises: ValueError if the specified execution is not active, or if this function was called once before for the same execution but with a different output artifact dict.
Here is the function:
def register_output_artifacts(
metadata_handle: metadata.Metadata,
execution_id: int,
output_artifacts: typing_utils.ArtifactMultiMap,
):
"""Registers REFERENCE and PENDING output artifacts with the execution.
Artifacts in output_artifacts not in state REFERENCE will be given state
PENDING. Artifacts in state REFERENCE will keep their state.
Each output artifact will be linked to the execution with a PENDING_OUTPUT
event. The artifacts will be modified in-place to add IDs as registered by
MLMD.
This function is idempotent if called more than once with the same output
artifact dict. In that case, the function will find the already registered
output artifacts for the execution and reuse their IDs. This function cannot,
however, be called for the same execution more than once with different
output artifact dicts.
Args:
metadata_handle: A handle to access MLMD
execution_id: The ID of an existing execution to apply output artifacts to.
output_artifacts: The output artifact dict to register. Artifacts will be
modified in place to add IDs after creation in MLMD.
Raises:
ValueError if the specified execution is not active, or if this function
was called once before for the same execution but with a different output
artifact dict.
"""
[execution] = metadata_handle.store.get_executions_by_id([execution_id])
if not is_execution_running(execution):
raise ValueError(
'Cannot register output artifacts on inactive '
f'execution ID={execution_id} with state = '
f'{execution.last_known_state}'
)
_register_pending_output_artifacts(
metadata_handle, execution, output_artifacts
)
_register_reference_output_artifacts(
metadata_handle, execution, output_artifacts
) | Registers REFERENCE and PENDING output artifacts with the execution. Artifacts in output_artifacts not in state REFERENCE will be given state PENDING. Artifacts in state REFERENCE will keep their state. Each output artifact will be linked to the execution with a PENDING_OUTPUT event. The artifacts will be modified in-place to add IDs as registered by MLMD. This function is idempotent if called more than once with the same output artifact dict. In that case, the function will find the already registered output artifacts for the execution and reuse their IDs. This function cannot, however, be called for the same execution more than once with different output artifact dicts. Args: metadata_handle: A handle to access MLMD execution_id: The ID of an existing execution to apply output artifacts to. output_artifacts: The output artifact dict to register. Artifacts will be modified in place to add IDs after creation in MLMD. Raises: ValueError if the specified execution is not active, or if this function was called once before for the same execution but with a different output artifact dict. |
166,242 | from __future__ import annotations
import collections
import copy
import itertools
import re
from typing import Any, Dict, Iterable, List, Mapping, Optional, Sequence, Tuple
from absl import logging
from tfx import types
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration.portable import outputs_utils
from tfx.orchestration.portable.mlmd import artifact_lib
from tfx.orchestration.portable.mlmd import common_utils
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import filter_query_builder as q
from tfx.utils import metrics_utils
from tfx.proto.orchestration import execution_result_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import proto_utils
from tfx.utils import typing_utils
from tfx.utils import telemetry_utils
from google.protobuf import json_format
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `get_executions_associated_with_all_contexts` function. Write a Python function `def get_executions_associated_with_all_contexts( metadata_handle: metadata.Metadata, contexts: Iterable[metadata_store_pb2.Context], ) -> List[metadata_store_pb2.Execution]` to solve the following problem:
Returns executions that are associated with all given contexts. Args: metadata_handle: A handler to access MLMD. contexts: MLMD contexts for which to fetch associated executions. Returns: A list of executions associated with all given contexts.
Here is the function:
def get_executions_associated_with_all_contexts(
metadata_handle: metadata.Metadata,
contexts: Iterable[metadata_store_pb2.Context],
) -> List[metadata_store_pb2.Execution]:
"""Returns executions that are associated with all given contexts.
Args:
metadata_handle: A handler to access MLMD.
contexts: MLMD contexts for which to fetch associated executions.
Returns:
A list of executions associated with all given contexts.
"""
execution_query = q.And(
[
'contexts_%s.id = %s' % (i, context.id)
for i, context in enumerate(contexts)
]
)
executions = metadata_handle.store.get_executions(
list_options=execution_query.list_options()
)
return executions | Returns executions that are associated with all given contexts. Args: metadata_handle: A handler to access MLMD. contexts: MLMD contexts for which to fetch associated executions. Returns: A list of executions associated with all given contexts. |
166,243 | import collections
import itertools
from typing import Callable, Mapping, Optional, Sequence, Union
from tfx.dsl.compiler import compiler_utils
from tfx.dsl.compiler import constants
from tfx.orchestration.experimental.core import constants as orchestration_constants
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import filter_query_builder as q
import ml_metadata as mlmd
def _maybe_clause(clause: Optional[str]) -> Sequence[str]:
return [clause] if clause is not None else [] | null |
166,244 | import collections
import itertools
from typing import Callable, Mapping, Optional, Sequence, Union
from tfx.dsl.compiler import compiler_utils
from tfx.dsl.compiler import constants
from tfx.orchestration.experimental.core import constants as orchestration_constants
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import filter_query_builder as q
import ml_metadata as mlmd
def get_live_output_artifacts_of_node_by_output_key(
store: mlmd.MetadataStore,
*,
pipeline_id: str,
node_id: str,
pipeline_run_id: Optional[str] = None,
execution_states: Optional[Sequence['mlmd.proto.Execution.State']] = None,
) -> Mapping[str, Sequence[Sequence[mlmd.proto.Artifact]]]:
"""Get LIVE output artifacts of the given node grouped by output key.
The LIVE output artifacts associated with an output key are represented as a
list of a list of artifacts.
1. The outer list represents artifacts generated across all executions.
2. The inner list represents artifacts generated by one execution.
3. Elements in the outer list are returned in descending order of the creation
time of the execution associated with them.
4. Elements in the inner list have no order guarantee.
5. If no LIVE output artifacts found for one execution, an empty list will be
returned.
Args:
store: A MetadataStore object.
pipeline_id: A pipeline ID.
node_id: A node ID.
pipeline_run_id: The pipeline run ID that the node belongs to. Only
artifacts from the specified pipeline run are returned if specified.
execution_states: The MLMD execution state(s) to pull LIVE artifacts from.
If not specified or is empty, will consider all MLMD execution states.
Returns:
A mapping from output key to all output artifacts from the given node.
"""
# Step 1: Get LIVE artifacts attributed to node with `node_id`.
live_artifacts = _get_node_live_artifacts(
store,
pipeline_id=pipeline_id,
node_id=node_id,
pipeline_run_id=pipeline_run_id,
)
if not live_artifacts:
return {}
# Step 2: Get executions associated with node that created `live_artifacts`
# ordered by execution creation time in descending order.
# These executions should satisfy the constraint:
# min (execution update time) >= min (artifact create time)
min_live_artifact_create_time = min(
[a.create_time_since_epoch for a in live_artifacts], default=0
)
# Within one transaction that updates both artifacts and execution, the
# timestamp of execution is larger or equal than that of the artifacts.
# Apply time skew for the artifacts created before cl/574333630 is rolled out.
# TODO(b/275231956): Remove the following 2 lines if we are sure that there
# are no more artifacts older than the timestamp.
if min_live_artifact_create_time < orchestration_constants.TIME_SKEW_DATE:
min_live_artifact_create_time -= 24 * 3600 * 1000
executions_ordered_by_desc_creation_time = get_node_executions(
store,
pipeline_id=pipeline_id,
node_id=node_id,
pipeline_run_id=pipeline_run_id,
order_by=mlmd.OrderByField.CREATE_TIME,
is_asc=False,
execution_states=execution_states,
min_last_update_time_since_epoch=min_live_artifact_create_time,
)
if not executions_ordered_by_desc_creation_time:
return {}
# Step 3: Get output events by executions obtained in step 2.
events_by_executions = store.get_events_by_execution_ids(
_ids(executions_ordered_by_desc_creation_time)
)
output_events = [
e for e in events_by_executions if event_lib.is_valid_output_event(e)
]
# Step 4: Construct and return `output_artifacts_by_output_key` from events.
#
# Create a mapping from execution_id to an empty list first to make sure
# iteration orders of output_events_by_execution_id and
# output_artifacts_map_by_execution_id are both in desc order of execution's
# creation time.
#
# The desc order is guaranteed by execution_ids and dict is guaranteed to be
# iterated in the insertion order of keys.
output_events_by_execution_id = {
execution.id: [] for execution in executions_ordered_by_desc_creation_time
}
for event in output_events:
output_events_by_execution_id[event.execution_id].append(event)
artifact_ids_by_output_key_map_by_execution_id = {}
for exec_id, events in output_events_by_execution_id.items():
output_artifacts_map = event_lib.reconstruct_artifact_id_multimap(events)
artifact_ids_by_output_key_map_by_execution_id[exec_id] = (
output_artifacts_map
)
output_artifacts_by_output_key = collections.defaultdict(list)
# Keep only LIVE output artifacts when constructing the result.
live_artifacts_by_id = {a.id: a for a in live_artifacts}
for (
artifact_ids_by_output_key
) in artifact_ids_by_output_key_map_by_execution_id.values():
for output_key, artifact_ids in artifact_ids_by_output_key.items():
live_output_artifacts = [
live_artifacts_by_id[artifact_id]
for artifact_id in artifact_ids
if artifact_id in live_artifacts_by_id
]
output_artifacts_by_output_key[output_key].append(live_output_artifacts)
return output_artifacts_by_output_key
The provided code snippet includes necessary dependencies for implementing the `get_live_output_artifacts_of_node` function. Write a Python function `def get_live_output_artifacts_of_node( store: mlmd.MetadataStore, *, pipeline_id: str, node_id: str, ) -> Sequence[mlmd.proto.Artifact]` to solve the following problem:
Gets LIVE output artifacts of the given node. The function query is composed of 3 MLMD API calls: 1. Call get_artifacts() to get LIVE artifacts attributed to the given node. 2. Call get_executions() to get executions that created artifacts from step 1. 3. Call get_events_by_execution_ids() and filter artifacts on whether they are output artifacts of executions from step 2. Args: store: A MetadataStore object. pipeline_id: A pipeline ID. node_id: A node ID. Returns: A list of output artifacts from the given node.
Here is the function:
def get_live_output_artifacts_of_node(
store: mlmd.MetadataStore,
*,
pipeline_id: str,
node_id: str,
) -> Sequence[mlmd.proto.Artifact]:
"""Gets LIVE output artifacts of the given node.
The function query is composed of 3 MLMD API calls:
1. Call get_artifacts() to get LIVE artifacts attributed to the given node.
2. Call get_executions() to get executions that created artifacts from step 1.
3. Call get_events_by_execution_ids() and filter artifacts on whether they are
output artifacts of executions from step 2.
Args:
store: A MetadataStore object.
pipeline_id: A pipeline ID.
node_id: A node ID.
Returns:
A list of output artifacts from the given node.
"""
live_output_artifacts_of_node_by_output_key = (
get_live_output_artifacts_of_node_by_output_key(
store, pipeline_id=pipeline_id, node_id=node_id
)
)
live_output_artifacts = list()
for (
nested_artifact_lists
) in live_output_artifacts_of_node_by_output_key.values():
live_output_artifacts.extend(
itertools.chain.from_iterable(nested_artifact_lists)
)
return live_output_artifacts | Gets LIVE output artifacts of the given node. The function query is composed of 3 MLMD API calls: 1. Call get_artifacts() to get LIVE artifacts attributed to the given node. 2. Call get_executions() to get executions that created artifacts from step 1. 3. Call get_events_by_execution_ids() and filter artifacts on whether they are output artifacts of executions from step 2. Args: store: A MetadataStore object. pipeline_id: A pipeline ID. node_id: A node ID. Returns: A list of output artifacts from the given node. |
166,245 | from typing import Any, Dict, List, cast
from absl import logging
from tfx import types
from tfx.dsl.components.common import importer
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import execution_publish_utils
from tfx.orchestration.portable import inputs_utils
from tfx.orchestration.portable import system_node_handler
from tfx.orchestration.portable.mlmd import context_lib
from tfx.proto.orchestration import pipeline_pb2
def _is_artifact_reimported(output_artifacts: Dict[str, List[types.Artifact]],
output_key: str) -> bool:
# The artifacts are reimported only when there are artifacts in the output
# dict and ids has been assign to them.
return (bool(output_artifacts[output_key]) and all(
(bool(a.id) for a in output_artifacts[output_key]))) | null |
166,246 | import abc
from typing import Any, Optional
from tfx.dsl.compiler import compiler
from tfx.dsl.components.base import base_component
from tfx.orchestration import pipeline as pipeline_py
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import doc_controls
The provided code snippet includes necessary dependencies for implementing the `_make_pipeline_proto` function. Write a Python function `def _make_pipeline_proto( pipeline: pipeline_py.Pipeline) -> pipeline_pb2.Pipeline` to solve the following problem:
Resolve pip dependencies and compile Pipeline object.
Here is the function:
def _make_pipeline_proto(
pipeline: pipeline_py.Pipeline) -> pipeline_pb2.Pipeline:
"""Resolve pip dependencies and compile Pipeline object."""
if isinstance(pipeline, pipeline_pb2.Pipeline):
raise ValueError(
'The "run" method, which is only meant for running Pipeline objects, '
'was called with a Pipeline IR. Did you mean to call the '
'"run_with_ir" method instead?')
for component in pipeline.components:
# TODO(b/187122662): Pass through pip dependencies as a first-class
# component flag.
if isinstance(component, base_component.BaseComponent):
component._resolve_pip_dependencies( # pylint: disable=protected-access
pipeline.pipeline_info.pipeline_root)
return compiler.Compiler().compile(pipeline) | Resolve pip dependencies and compile Pipeline object. |
166,247 | import abc
from typing import Any, Optional
from tfx.dsl.compiler import compiler
from tfx.dsl.components.base import base_component
from tfx.orchestration import pipeline as pipeline_py
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import doc_controls
The provided code snippet includes necessary dependencies for implementing the `_run_opts_to_proto` function. Write a Python function `def _run_opts_to_proto( run_options: pipeline_py.RunOptions, ) -> pipeline_pb2.RunOptions` to solve the following problem:
Converts a RunOptions dataclass to proto.
Here is the function:
def _run_opts_to_proto(
run_options: pipeline_py.RunOptions,
) -> pipeline_pb2.RunOptions:
"""Converts a RunOptions dataclass to proto."""
result = pipeline_pb2.RunOptions()
snapshot_settings = result.partial_run.snapshot_settings
if run_options.base_pipeline_run_id is not None:
snapshot_settings.base_pipeline_run_strategy.base_run_id = (
run_options.base_pipeline_run_id)
else:
snapshot_settings.latest_pipeline_run_strategy.SetInParent()
result.partial_run.from_nodes.extend(run_options.from_nodes or [])
result.partial_run.to_nodes.extend(run_options.to_nodes or [])
return result | Converts a RunOptions dataclass to proto. |
166,248 | from typing import Dict, Sequence, Union
from absl import logging
from tfx import types
from tfx.dsl.compiler import placeholder_utils
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.orchestration.portable.input_resolution import node_inputs_resolver
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import typing_utils
The provided code snippet includes necessary dependencies for implementing the `resolve_parameters` function. Write a Python function `def resolve_parameters( node_parameters: pipeline_pb2.NodeParameters) -> Dict[str, types.Property]` to solve the following problem:
Resolves parameters given parameter spec. Args: node_parameters: The spec to get parameters. Returns: A Dict of parameters. Raises: RuntimeError: When there is at least one parameter still in runtime parameter form.
Here is the function:
def resolve_parameters(
node_parameters: pipeline_pb2.NodeParameters) -> Dict[str, types.Property]:
"""Resolves parameters given parameter spec.
Args:
node_parameters: The spec to get parameters.
Returns:
A Dict of parameters.
Raises:
RuntimeError: When there is at least one parameter still in runtime
parameter form.
"""
result = {}
for key, value in node_parameters.parameters.items():
if not value.HasField('field_value'):
raise RuntimeError('Parameter value not ready for %s' % key)
result[key] = getattr(value.field_value,
value.field_value.WhichOneof('value'))
return result | Resolves parameters given parameter spec. Args: node_parameters: The spec to get parameters. Returns: A Dict of parameters. Raises: RuntimeError: When there is at least one parameter still in runtime parameter form. |
166,249 | import collections
import copy
import datetime
import os
from typing import Any, Dict, List, Mapping, Optional, Sequence, Union
import uuid
from absl import logging
from tfx import types
from tfx import version
from tfx.dsl.io import fileio
from tfx.orchestration import data_types_utils
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.portable import data_types
from tfx.proto.orchestration import execution_result_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import artifact as tfx_artifact
from tfx.types import artifact_utils
from tfx.types.value_artifact import ValueArtifact
from tfx.utils import proto_utils
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `remove_output_dirs` function. Write a Python function `def remove_output_dirs( output_dict: Mapping[str, Sequence[types.Artifact]]) -> None` to solve the following problem:
Remove dirs of output artifacts' URI.
Here is the function:
def remove_output_dirs(
output_dict: Mapping[str, Sequence[types.Artifact]]) -> None:
"""Remove dirs of output artifacts' URI."""
for _, artifact_list in output_dict.items():
for artifact in artifact_list:
# Omit lifecycle management for external artifacts.
if artifact.is_external:
continue
if fileio.isdir(artifact.uri):
fileio.rmtree(artifact.uri)
else:
fileio.remove(artifact.uri) | Remove dirs of output artifacts' URI. |
166,250 | import collections
import copy
import datetime
import os
from typing import Any, Dict, List, Mapping, Optional, Sequence, Union
import uuid
from absl import logging
from tfx import types
from tfx import version
from tfx.dsl.io import fileio
from tfx.orchestration import data_types_utils
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.portable import data_types
from tfx.proto.orchestration import execution_result_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import artifact as tfx_artifact
from tfx.types import artifact_utils
from tfx.types.value_artifact import ValueArtifact
from tfx.utils import proto_utils
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `remove_stateful_working_dir` function. Write a Python function `def remove_stateful_working_dir(stateful_working_dir: str) -> None` to solve the following problem:
Remove stateful_working_dir.
Here is the function:
def remove_stateful_working_dir(stateful_working_dir: str) -> None:
"""Remove stateful_working_dir."""
try:
fileio.rmtree(stateful_working_dir)
logging.info('Deleted stateful_working_dir %s', stateful_working_dir)
except fileio.NotFoundError:
logging.warning(
'stateful_working_dir %s is not found, not going to delete it.',
stateful_working_dir) | Remove stateful_working_dir. |
166,251 | import collections
import copy
import datetime
import os
from typing import Any, Dict, List, Mapping, Optional, Sequence, Union
import uuid
from absl import logging
from tfx import types
from tfx import version
from tfx.dsl.io import fileio
from tfx.orchestration import data_types_utils
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.portable import data_types
from tfx.proto.orchestration import execution_result_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import artifact as tfx_artifact
from tfx.types import artifact_utils
from tfx.types.value_artifact import ValueArtifact
from tfx.utils import proto_utils
from ml_metadata.proto import metadata_store_pb2
def get_executor_output_dir(execution_info: data_types.ExecutionInfo) -> str:
"""Generates executor output directory for a given execution info."""
return os.path.dirname(execution_info.execution_output_uri)
The provided code snippet includes necessary dependencies for implementing the `migrate_executor_output_dir_from_stateful_working_directory` function. Write a Python function `def migrate_executor_output_dir_from_stateful_working_directory( execution_info: data_types.ExecutionInfo, files: collections.abc.Sequence[str], )` to solve the following problem:
Copies files from stateful working dir to executor output dir. Will not overwrite any files already existing in the executor output dir. Args: execution_info: Information for the execution that should have its files migrated. files: The relative file paths to be migrated.
Here is the function:
def migrate_executor_output_dir_from_stateful_working_directory(
execution_info: data_types.ExecutionInfo,
files: collections.abc.Sequence[str],
):
"""Copies files from stateful working dir to executor output dir.
Will not overwrite any files already existing in the executor output dir.
Args:
execution_info: Information for the execution that should have its files
migrated.
files: The relative file paths to be migrated.
"""
executor_output_dir = get_executor_output_dir(execution_info)
stateful_working_dir = execution_info.stateful_working_dir
found_paths = []
for file in files:
stateful_working_file = os.path.join(stateful_working_dir, file)
executor_output_file = os.path.join(executor_output_dir, file)
if fileio.exists(stateful_working_file) and not fileio.exists(
executor_output_file
):
# We may need to make the parent directories for the executor output dir.
executor_output_file_dir = os.path.dirname(executor_output_file)
if not fileio.exists(executor_output_file_dir):
fileio.makedirs(executor_output_file_dir)
found_paths.append(stateful_working_file)
fileio.copy(stateful_working_file, executor_output_file)
if found_paths:
logging.info(
'Executor output dir %s has had the following files migrated to it. %s',
executor_output_dir,
found_paths,
) | Copies files from stateful working dir to executor output dir. Will not overwrite any files already existing in the executor output dir. Args: execution_info: Information for the execution that should have its files migrated. files: The relative file paths to be migrated. |
166,252 | import collections
import copy
import datetime
import os
from typing import Any, Dict, List, Mapping, Optional, Sequence, Union
import uuid
from absl import logging
from tfx import types
from tfx import version
from tfx.dsl.io import fileio
from tfx.orchestration import data_types_utils
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.portable import data_types
from tfx.proto.orchestration import execution_result_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import artifact as tfx_artifact
from tfx.types import artifact_utils
from tfx.types.value_artifact import ValueArtifact
from tfx.utils import proto_utils
from ml_metadata.proto import metadata_store_pb2
_SYSTEM = '.system'
_ORCHESTRATOR_GENERATED_BCL_DIR = 'orchestrator_generated_bcl'
def get_node_dir(
pipeline_runtime_spec: pipeline_pb2.PipelineRuntimeSpec, node_id: str
) -> str:
"""Gets node dir for the given pipeline node."""
return os.path.join(
pipeline_runtime_spec.pipeline_root.field_value.string_value, node_id
)
The provided code snippet includes necessary dependencies for implementing the `get_orchestrator_generated_bcl_dir` function. Write a Python function `def get_orchestrator_generated_bcl_dir( pipeline_runtime_spec: pipeline_pb2.PipelineRuntimeSpec, node_id: str ) -> str` to solve the following problem:
Generates a root directory to hold orchestrator generated BCLs for the given node. Args: pipeline_runtime_spec: pipeline runtime specifications. node_id: unique id of the node within the pipeline. Returns: Path to orchestrator generated bcl root dir, which has the format `<node_dir>/.system/orchestrator_generated_bcl`
Here is the function:
def get_orchestrator_generated_bcl_dir(
pipeline_runtime_spec: pipeline_pb2.PipelineRuntimeSpec, node_id: str
) -> str:
"""Generates a root directory to hold orchestrator generated BCLs for the given node.
Args:
pipeline_runtime_spec: pipeline runtime specifications.
node_id: unique id of the node within the pipeline.
Returns:
Path to orchestrator generated bcl root dir, which has the format
`<node_dir>/.system/orchestrator_generated_bcl`
"""
node_dir = get_node_dir(pipeline_runtime_spec, node_id)
orchestrator_generated_bcl_dir = os.path.join(
node_dir, _SYSTEM, _ORCHESTRATOR_GENERATED_BCL_DIR
)
if not fileio.exists(orchestrator_generated_bcl_dir):
try:
fileio.makedirs(orchestrator_generated_bcl_dir)
except Exception: # pylint: disable=broad-except
logging.exception(
'Failed to make orchestrator generated bcl dir: %s',
orchestrator_generated_bcl_dir,
)
raise
return orchestrator_generated_bcl_dir | Generates a root directory to hold orchestrator generated BCLs for the given node. Args: pipeline_runtime_spec: pipeline runtime specifications. node_id: unique id of the node within the pipeline. Returns: Path to orchestrator generated bcl root dir, which has the format `<node_dir>/.system/orchestrator_generated_bcl` |
166,253 | from concurrent import futures
from typing import Optional
from absl import logging
import grpc
from tfx.orchestration import metadata
from tfx.proto.orchestration import execution_watcher_pb2
from tfx.proto.orchestration import execution_watcher_pb2_grpc
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `generate_service_stub` function. Write a Python function `def generate_service_stub( address: str, creds: Optional[grpc.ChannelCredentials] = None, ) -> execution_watcher_pb2_grpc.ExecutionWatcherServiceStub` to solve the following problem:
Generates a gRPC service stub for a given server address.
Here is the function:
def generate_service_stub(
address: str,
creds: Optional[grpc.ChannelCredentials] = None,
) -> execution_watcher_pb2_grpc.ExecutionWatcherServiceStub:
"""Generates a gRPC service stub for a given server address."""
channel = grpc.secure_channel(
address, creds) if creds else grpc.insecure_channel(address)
return execution_watcher_pb2_grpc.ExecutionWatcherServiceStub(channel) | Generates a gRPC service stub for a given server address. |
166,254 | import threading
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration.experimental.centralized_kubernetes_orchestrator import kubernetes_job_runner
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_scheduler
from tfx.orchestration.portable import data_types
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import execution_invocation_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import status as status_lib
The provided code snippet includes necessary dependencies for implementing the `_create_execution_invocation_proto` function. Write a Python function `def _create_execution_invocation_proto( pipeline: pipeline_pb2.Pipeline, task: task_lib.ExecNodeTask, node: pipeline_pb2.PipelineNode ) -> execution_invocation_pb2.ExecutionInvocation` to solve the following problem:
Creates an ExecutionInvocation proto with some initial info.
Here is the function:
def _create_execution_invocation_proto(
pipeline: pipeline_pb2.Pipeline, task: task_lib.ExecNodeTask,
node: pipeline_pb2.PipelineNode
) -> execution_invocation_pb2.ExecutionInvocation:
"""Creates an ExecutionInvocation proto with some initial info."""
return execution_invocation_pb2.ExecutionInvocation(
execution_properties=(data_types_utils.build_metadata_value_dict(
task.exec_properties)),
execution_properties_with_schema=(
data_types_utils.build_pipeline_value_dict(task.exec_properties)),
output_metadata_uri=task.executor_output_uri,
input_dict=data_types_utils.build_artifact_struct_dict(
task.input_artifacts),
output_dict=data_types_utils.build_artifact_struct_dict(
task.output_artifacts),
stateful_working_dir=task.stateful_working_dir,
tmp_dir=task.tmp_dir,
pipeline_info=pipeline.pipeline_info,
pipeline_node=node,
execution_id=task.execution_id,
pipeline_run_id=pipeline.runtime_spec.pipeline_run_id.field_value
.string_value) | Creates an ExecutionInvocation proto with some initial info. |
166,255 | import threading
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration.experimental.centralized_kubernetes_orchestrator import kubernetes_job_runner
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_scheduler
from tfx.orchestration.portable import data_types
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import execution_invocation_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import status as status_lib
The provided code snippet includes necessary dependencies for implementing the `_get_pipeline_node` function. Write a Python function `def _get_pipeline_node(pipeline: pipeline_pb2.Pipeline, node_id: str) -> pipeline_pb2.PipelineNode` to solve the following problem:
Gets corresponding pipeline node from IR given the node_id.
Here is the function:
def _get_pipeline_node(pipeline: pipeline_pb2.Pipeline,
node_id: str) -> pipeline_pb2.PipelineNode:
"""Gets corresponding pipeline node from IR given the node_id."""
for node in pipeline.nodes:
if node.pipeline_node and (node.pipeline_node.node_info.id == node_id):
return node.pipeline_node
raise status_lib.StatusNotOkError(
code=status_lib.Code.INVALID_ARGUMENT,
message=f'Failed to find corresponding node in the IR, node id: {node_id}'
) | Gets corresponding pipeline node from IR given the node_id. |
166,256 | from concurrent import futures
import contextlib
import time
from absl import app
from absl import flags
from absl import logging
import grpc
from tfx.orchestration import metadata
from tfx.orchestration.experimental.centralized_kubernetes_orchestrator import kubernetes_task_scheduler
from tfx.orchestration.experimental.centralized_kubernetes_orchestrator.service import kubernetes_orchestrator_service
from tfx.orchestration.experimental.centralized_kubernetes_orchestrator.service.proto import service_pb2_grpc
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import pipeline_ops
from tfx.orchestration.experimental.core import pipeline_state
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import task_manager as tm
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core import task_scheduler as ts
_MAX_ACTIVE_TASK_SCHEDULERS_FLAG = flags.DEFINE_integer(
'tflex_max_active_task_schedulers', 100,
'Maximum number of active task schedulers.')
_INACTIVITY_TTL_SECS_FLAG = flags.DEFINE_float(
'tflex_inactivity_ttl_secs', 30, 'Orchestrator inactivity TTL. If set, '
'orchestrator will exit after ttl seconds of no orchestration activity.')
_DEFAULT_POLLING_INTERVAL_SECS_FLAG = flags.DEFINE_float(
'tflex_default_polling_interval_secs', 10.0,
'Default orchestration polling interval.')
def _start_grpc_server(
servicer: kubernetes_orchestrator_service.KubernetesOrchestratorServicer
) -> grpc.Server:
"""Starts GRPC server."""
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
service_pb2_grpc.add_KubernetesOrchestratorServicer_to_server(
servicer, server)
server_creds = grpc.local_server_credentials()
server.add_secure_port(f'[::]:{_SERVER_PORT_FLAG.value}', server_creds)
server.start()
return server
def _create_mlmd_connection():
"""Creates connection for MLMD."""
connection_config = metadata.mysql_metadata_connection_config(
host=_MYSQL_HOST_FLAG.value,
port=_MYSQL_PORT_FLAG.value,
username=_MYSQL_USERNAME_FLAG.value,
database=_MYSQL_DATABASE_FLAG.value,
password=_MYSQL_PASSWORD_FLAG.value)
return metadata.Metadata(connection_config=connection_config)
def _sleep_tick_duration_secs(time_budget: float) -> float:
"""Sleeps and returns new time budget; standalone fn to mock in tests."""
time.sleep(_TICK_DURATION_SECS)
return time_budget - _TICK_DURATION_SECS
)
The provided code snippet includes necessary dependencies for implementing the `_run` function. Write a Python function `def _run() -> None` to solve the following problem:
Runs the main orchestration loop.
Here is the function:
def _run() -> None:
"""Runs the main orchestration loop."""
with contextlib.ExitStack() as stack:
stack.enter_context(event_observer.init())
mlmd_handle = stack.enter_context(_create_mlmd_connection())
orchestrator_servicer = kubernetes_orchestrator_service.KubernetesOrchestratorServicer(
mlmd_handle)
server = _start_grpc_server(orchestrator_servicer)
stack.callback(server.stop, grace=None)
task_queue = tq.TaskQueue()
service_job_manager = service_jobs.DummyServiceJobManager()
task_manager = stack.enter_context(
tm.TaskManager(
mlmd_handle,
task_queue,
max_active_task_schedulers=_MAX_ACTIVE_TASK_SCHEDULERS_FLAG.value))
last_active = time.time()
iteration = 0
while not _INACTIVITY_TTL_SECS_FLAG.value or time.time(
) - last_active <= _INACTIVITY_TTL_SECS_FLAG.value:
try:
iteration += 1
logging.info('Orchestration loop: iteration #%d (since process start).',
iteration)
event_observer.check_active()
# Last pipeline state change time is useful to decide if wait period
# between iterations can be short-circuited.
last_state_change_time_secs = (
pipeline_state.last_state_change_time_secs())
if pipeline_ops.orchestrate(mlmd_handle, task_queue,
service_job_manager):
last_active = time.time()
time_budget = _DEFAULT_POLLING_INTERVAL_SECS_FLAG.value
logging.info(
'Orchestration loop: waiting %s seconds before next iteration.',
time_budget)
while time_budget > 0.0:
# Task manager should never be "done" unless there was an error.
if task_manager.done():
if task_manager.exception():
raise task_manager.exception()
else:
raise RuntimeError(
'Task manager unexpectedly stalled due to an internal error.')
# Short-circuit if state change is detected.
if (pipeline_state.last_state_change_time_secs() >
last_state_change_time_secs):
last_state_change_time_secs = (
pipeline_state.last_state_change_time_secs())
logging.info(
'Orchestration loop: detected state change, exiting wait period '
'early (with %s of %s seconds remaining).', time_budget,
_DEFAULT_POLLING_INTERVAL_SECS_FLAG.value)
break
time_budget = _sleep_tick_duration_secs(time_budget)
except Exception: # pylint: disable=broad-except
logging.exception('Exception in main orchestration loop!')
raise
logging.info('Exiting due to no pipeline run in %s seconds',
_INACTIVITY_TTL_SECS_FLAG.value) | Runs the main orchestration loop. |
166,257 | from concurrent import futures
import contextlib
import time
from absl import app
from absl import flags
from absl import logging
import grpc
from tfx.orchestration import metadata
from tfx.orchestration.experimental.centralized_kubernetes_orchestrator import kubernetes_task_scheduler
from tfx.orchestration.experimental.centralized_kubernetes_orchestrator.service import kubernetes_orchestrator_service
from tfx.orchestration.experimental.centralized_kubernetes_orchestrator.service.proto import service_pb2_grpc
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import pipeline_ops
from tfx.orchestration.experimental.core import pipeline_state
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import task_manager as tm
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core import task_scheduler as ts
The provided code snippet includes necessary dependencies for implementing the `_register_task_schedulers` function. Write a Python function `def _register_task_schedulers() -> None` to solve the following problem:
Registers task schedulers.
Here is the function:
def _register_task_schedulers() -> None:
"""Registers task schedulers."""
ts.TaskSchedulerRegistry.register(
'type.googleapis.com/tfx.orchestration.executable_spec.PythonClassExecutableSpec',
kubernetes_task_scheduler.KubernetesTaskScheduler)
ts.TaskSchedulerRegistry.register(
'type.googleapis.com/tfx.orchestration.executable_spec.BeamExecutableSpec',
kubernetes_task_scheduler.KubernetesTaskScheduler) | Registers task schedulers. |
166,258 | from absl import app
from absl import flags
from absl import logging
from tfx import v1 as tfx
from tfx.orchestration.experimental.centralized_kubernetes_orchestrator import kubernetes_job_runner
from tfx.orchestration.portable import data_types
from tfx.proto.orchestration import pipeline_pb2
from google.protobuf import text_format
The provided code snippet includes necessary dependencies for implementing the `_prepare_sample_execution_info` function. Write a Python function `def _prepare_sample_execution_info(bucket, artifact_path, output_path, data_path)` to solve the following problem:
Prepare sample ImportSchemaGen execution info.
Here is the function:
def _prepare_sample_execution_info(bucket, artifact_path, output_path,
data_path):
"""Prepare sample ImportSchemaGen execution info."""
pipeline_root = f'gs://{bucket}'
sample_artifact = tfx.types.standard_artifacts.Schema()
sample_artifact.uri = pipeline_root + artifact_path
execution_output_uri = pipeline_root + output_path
stateful_working_dir = pipeline_root + '/workding/dir'
exec_properties = {
'schema_file': pipeline_root + data_path,
}
pipeline_info = pipeline_pb2.PipelineInfo(id='my_pipeline')
pipeline_node = text_format.Parse(
"""
node_info {
id: 'my_node'
}
""", pipeline_pb2.PipelineNode())
original = data_types.ExecutionInfo(
input_dict={},
output_dict={'schema': [sample_artifact]},
exec_properties=exec_properties,
execution_output_uri=execution_output_uri,
stateful_working_dir=stateful_working_dir,
pipeline_info=pipeline_info,
pipeline_node=pipeline_node)
return original | Prepare sample ImportSchemaGen execution info. |
166,259 | from absl import app
from absl import flags
from absl import logging
from tfx import v1 as tfx
from tfx.orchestration.experimental.centralized_kubernetes_orchestrator import kubernetes_job_runner
from tfx.orchestration.portable import data_types
from tfx.proto.orchestration import pipeline_pb2
from google.protobuf import text_format
The provided code snippet includes necessary dependencies for implementing the `_prepare_sample_executable_spec` function. Write a Python function `def _prepare_sample_executable_spec()` to solve the following problem:
Prepare sample ImportSchemaGen executable spec.
Here is the function:
def _prepare_sample_executable_spec():
"""Prepare sample ImportSchemaGen executable spec."""
component = tfx.components.ImportSchemaGen.EXECUTOR_SPEC.encode()
return component | Prepare sample ImportSchemaGen executable spec. |
166,260 | from absl import app
from absl import flags
import grpc
from tfx.orchestration.experimental.centralized_kubernetes_orchestrator.service.proto import service_pb2
from tfx.orchestration.experimental.centralized_kubernetes_orchestrator.service.proto import service_pb2_grpc
The provided code snippet includes necessary dependencies for implementing the `_echo_message` function. Write a Python function `def _echo_message(stub, request)` to solve the following problem:
Echoes user's message.
Here is the function:
def _echo_message(stub, request):
"""Echoes user's message."""
try:
response = stub.Echo(request)
print(response)
return 0
except grpc.RpcError as rpc_error:
print(rpc_error)
return -1 | Echoes user's message. |
166,261 | import abc
import datetime
import random
import string
import time
from absl import logging
from kubernetes import client as k8s_client
from tfx.orchestration.experimental.core import task_scheduler
from tfx.orchestration.python_execution_binary import python_execution_binary_utils
from tfx.utils import kube_utils
from tfx.utils import status as status_lib
def _generate_component_name_suffix() -> str:
letters = string.ascii_lowercase
return '-' + ''.join(random.choice(letters) for i in range(10)) | null |
166,262 | import absl
from tfx.orchestration import pipeline as pipeline_module
from tfx.orchestration.experimental.kubernetes import kubernetes_dag_runner
from tfx.orchestration.test_pipelines.download_grep_print_pipeline import create_pipeline_component_instances
_pipeline_name = 'download_grep_print_pipeline'
_pipeline_root = 'gs://my-bucket'
def create_pipeline_component_instances(text_url: str, pattern: str):
"""Creates tasks for the download_grep_print pipeline."""
downloader_task = downloader_component(url=text_url)
grep_task = grep_component(
text=downloader_task.outputs['data'],
pattern=pattern,
)
print_task = print_component(
text=grep_task.outputs['filtered_text'],
)
component_instances = [
downloader_task,
grep_task,
print_task,
]
return component_instances
The provided code snippet includes necessary dependencies for implementing the `_create_pipeline` function. Write a Python function `def _create_pipeline() -> pipeline_module.Pipeline` to solve the following problem:
Create sample container component pipeline.
Here is the function:
def _create_pipeline() -> pipeline_module.Pipeline:
"""Create sample container component pipeline."""
pipeline_name = _pipeline_name
pipeline_root = _pipeline_root
text_url = 'https://raw.githubusercontent.com/karpathy/char-rnn/370cbcd/data/tinyshakespeare/input.txt'
pattern = 'art thou'
components = create_pipeline_component_instances(text_url, pattern)
# Use the default in-cluster MySql metadata config.
config = kubernetes_dag_runner.get_default_kubernetes_metadata_config()
return pipeline_module.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=components,
metadata_connection_config=config,
enable_cache=False,
) | Create sample container component pipeline. |
166,263 | import os
from typing import List
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.dsl.components.common import resolver
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import pipeline
from tfx.orchestration.experimental.kubernetes import kubernetes_dag_runner
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
class Model(_TfxArtifact):
"""Artifact that contains the actual persisted model.
Training components stores the trained model like a saved model in this
artifact. A `Model` artifact contains serialization of the trained model in
one or more formats, each suitable for different usage (e.g. serving,
evaluation), and serving environments.
* File structure:
- `{uri}/`
- `Format-Serving/`: Model exported for serving.
- `saved_model.pb`
- Other actual model files.
- `Format-TFMA/`: Model exported for evaluation.
- `saved_model.pb`
- Other actual model files.
* Commonly used custom properties of the Model artifact:
"""
TYPE_NAME = 'Model'
TYPE_ANNOTATION = SystemModel
class ModelBlessing(_TfxArtifact):
"""Artifact that contains the evaluation of a trained model.
This artifact is usually used with
Conditional when determining
whether to push this model on service or not.
```python
# Run pusher if evaluator has blessed the model.
with tfx.dsl.Cond(evaluator.outputs['blessing'].future()
[0].custom_property('blessed') == 1):
pusher = Pusher(...)
```
* File structure:
- `{uri}/`
- `BLESSED`: if the evaluator has blessed the model.
- `NOT_BLESSED`: if the evaluator has not blessed the model.
- See tfx/components/evaluator/executor.py for how to write
ModelBlessing.
* Commonly used custom properties of the ModelBlessing artifact:
- `blessed`: int value that represents whether the evaluator has blessed its
model or not.
"""
TYPE_NAME = 'ModelBlessing'
The provided code snippet includes necessary dependencies for implementing the `create_pipeline` function. Write a Python function `def create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, serving_model_dir: str, beam_pipeline_args: List[str]) -> pipeline.Pipeline` to solve the following problem:
Implements the chicago taxi pipeline with TFX.
Here is the function:
def create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str,
module_file: str, serving_model_dir: str,
beam_pipeline_args: List[str]) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=data_root)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that implements a model.
trainer = Trainer(
module_file=module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Get the latest blessed model for model validation.
model_resolver = resolver.Resolver(
strategy_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(
type=ModelBlessing)).with_id('latest_blessed_model_resolver')
# Uses TFMA to compute a evaluation statistics over features of a model and
# perform quality validation of a candidate model (compared to a baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'accuracy':
tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
config = kubernetes_dag_runner.get_default_kubernetes_metadata_config()
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
trainer,
model_resolver,
evaluator,
pusher,
],
enable_cache=False,
metadata_connection_config=config,
beam_pipeline_args=beam_pipeline_args) | Implements the chicago taxi pipeline with TFX. |
166,264 | import datetime
import json
from typing import List, Optional, Type
from absl import logging
from tfx.dsl.component.experimental import container_component
from tfx.dsl.components.base import base_node
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration import tfx_runner
from tfx.orchestration.config import base_component_config
from tfx.orchestration.config import config_utils
from tfx.orchestration.config import pipeline_config
from tfx.orchestration.experimental.kubernetes import kubernetes_remote_runner
from tfx.orchestration.experimental.kubernetes import node_wrapper
from tfx.orchestration.launcher import base_component_launcher
from tfx.orchestration.launcher import in_process_component_launcher
from tfx.orchestration.launcher import kubernetes_component_launcher
from tfx.utils import json_utils
from tfx.utils import kube_utils
from tfx.utils import name_utils
from google.protobuf import json_format
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `launch_container_component` function. Write a Python function `def launch_container_component( component: base_node.BaseNode, component_launcher_class: Type[ base_component_launcher.BaseComponentLauncher], component_config: base_component_config.BaseComponentConfig, pipeline: tfx_pipeline.Pipeline)` to solve the following problem:
Use the kubernetes component launcher to launch the component. Args: component: Container component to be executed. component_launcher_class: The class of the launcher to launch the component. component_config: component config to launch the component. pipeline: Logical pipeline that contains pipeline related information.
Here is the function:
def launch_container_component(
component: base_node.BaseNode,
component_launcher_class: Type[
base_component_launcher.BaseComponentLauncher],
component_config: base_component_config.BaseComponentConfig,
pipeline: tfx_pipeline.Pipeline):
"""Use the kubernetes component launcher to launch the component.
Args:
component: Container component to be executed.
component_launcher_class: The class of the launcher to launch the component.
component_config: component config to launch the component.
pipeline: Logical pipeline that contains pipeline related information.
"""
driver_args = data_types.DriverArgs(enable_cache=pipeline.enable_cache)
metadata_connection = metadata.Metadata(pipeline.metadata_connection_config)
component_launcher = component_launcher_class.create(
component=component,
pipeline_info=pipeline.pipeline_info,
driver_args=driver_args,
metadata_connection=metadata_connection,
beam_pipeline_args=pipeline.beam_pipeline_args,
additional_pipeline_args=pipeline.additional_pipeline_args,
component_config=component_config)
logging.info('Component %s is running.', component.id)
component_launcher.launch()
logging.info('Component %s is finished.', component.id) | Use the kubernetes component launcher to launch the component. Args: component: Container component to be executed. component_launcher_class: The class of the launcher to launch the component. component_config: component config to launch the component. pipeline: Logical pipeline that contains pipeline related information. |
166,265 | import datetime
import json
import time
from typing import Dict, List
from absl import logging
from kubernetes import client
from tfx.dsl.components.base import base_node
from tfx.dsl.context_managers import dsl_context_registry
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration.experimental.kubernetes import node_wrapper
from tfx.utils import json_utils
from tfx.utils import kube_utils
from google.protobuf import json_format
from ml_metadata.proto import metadata_store_pb2
_ORCHESTRATOR_COMMAND = [
'python', '-m',
'tfx.orchestration.experimental.kubernetes.orchestrator_container_entrypoint'
]
JOB_CREATION_TIMEOUT = 300
def _serialize_pipeline(pipeline: tfx_pipeline.Pipeline) -> str:
"""Serializes a TFX pipeline.
To be replaced with the the TFX Intermediate Representation:
tensorflow/community#271. This serialization procedure extracts from
the pipeline properties necessary for reconstructing the pipeline instance
from within the cluster. For properties such as components and metadata
config that can not be directly dumped with JSON, we use NodeWrapper and
MessageToJson to serialize them beforehand.
Args:
pipeline: Logical pipeline containing pipeline args and components.
Returns:
Pipeline serialized as JSON string.
"""
serialized_components = []
for component in pipeline.components:
serialized_components.append(
json_utils.dumps(node_wrapper.NodeWrapper(component)))
# Extract and pass pipeline graph information which are lost during the
# serialization process. The orchestrator container uses downstream_ids
# to reconstruct pipeline graph.
downstream_ids = _extract_downstream_ids(pipeline.components)
return json.dumps({
'pipeline_name':
pipeline.pipeline_info.pipeline_name,
'pipeline_root':
pipeline.pipeline_info.pipeline_root,
'enable_cache':
pipeline.enable_cache,
'components':
serialized_components,
'downstream_ids':
downstream_ids,
'metadata_connection_config':
json_format.MessageToJson(
message=pipeline.metadata_connection_config,
preserving_proto_field_name=True,
),
'beam_pipeline_args':
pipeline.beam_pipeline_args,
})
The provided code snippet includes necessary dependencies for implementing the `run_as_kubernetes_job` function. Write a Python function `def run_as_kubernetes_job(pipeline: tfx_pipeline.Pipeline, tfx_image: str) -> None` to solve the following problem:
Submits and runs a TFX pipeline from outside the cluster. Args: pipeline: Logical pipeline containing pipeline args and components. tfx_image: Container image URI for the TFX container. Raises: RuntimeError: When an error is encountered running the Kubernetes Job.
Here is the function:
def run_as_kubernetes_job(pipeline: tfx_pipeline.Pipeline,
tfx_image: str) -> None:
"""Submits and runs a TFX pipeline from outside the cluster.
Args:
pipeline: Logical pipeline containing pipeline args and components.
tfx_image: Container image URI for the TFX container.
Raises:
RuntimeError: When an error is encountered running the Kubernetes Job.
"""
# TODO(ccy): Look for alternative serialization schemes once available.
serialized_pipeline = _serialize_pipeline(pipeline)
arguments = [
'--serialized_pipeline',
serialized_pipeline,
'--tfx_image',
tfx_image,
]
batch_api = kube_utils.make_batch_v1_api()
job_name = 'Job_' + pipeline.pipeline_info.run_id
pod_label = kube_utils.sanitize_pod_name(job_name)
container_name = 'pipeline-orchestrator'
job = kube_utils.make_job_object(
name=job_name,
container_image=tfx_image,
command=_ORCHESTRATOR_COMMAND + arguments,
container_name=container_name,
pod_labels={
'job-name': pod_label,
},
service_account_name=kube_utils.TFX_SERVICE_ACCOUNT,
)
try:
batch_api.create_namespaced_job('default', job, pretty=True)
except client.rest.ApiException as e:
raise RuntimeError('Failed to submit job! \nReason: %s\nBody: %s' %
(e.reason, e.body))
# Wait for pod to start.
orchestrator_pods = []
core_api = kube_utils.make_core_v1_api()
start_time = datetime.datetime.utcnow()
# Wait for the kubernetes job to launch a pod.
while not orchestrator_pods and (datetime.datetime.utcnow() -
start_time).seconds < JOB_CREATION_TIMEOUT:
try:
orchestrator_pods = core_api.list_namespaced_pod(
namespace='default',
label_selector='job-name={}'.format(pod_label)).items
except client.rest.ApiException as e:
if e.status != 404:
raise RuntimeError('Unknown error! \nReason: %s\nBody: %s' %
(e.reason, e.body))
time.sleep(1)
# Transient orchestrator should only have 1 pod.
if len(orchestrator_pods) != 1:
raise RuntimeError('Expected 1 pod launched by Kubernetes job, found %d' %
len(orchestrator_pods))
orchestrator_pod = orchestrator_pods.pop()
pod_name = orchestrator_pod.metadata.name
logging.info('Waiting for pod "default:%s" to start.', pod_name)
kube_utils.wait_pod(
core_api,
pod_name,
'default',
exit_condition_lambda=kube_utils.pod_is_not_pending,
condition_description='non-pending status')
# Stream logs from orchestrator pod.
logging.info('Start log streaming for pod "default:%s".', pod_name)
try:
logs = core_api.read_namespaced_pod_log(
name=pod_name,
namespace='default',
container=container_name,
follow=True,
_preload_content=False).stream()
except client.rest.ApiException as e:
raise RuntimeError(
'Failed to stream the logs from the pod!\nReason: %s\nBody: %s' %
(e.reason, e.body))
for log in logs:
logging.info(log.decode().rstrip('\n'))
resp = kube_utils.wait_pod(
core_api,
pod_name,
'default',
exit_condition_lambda=kube_utils.pod_is_done,
condition_description='done state',
exponential_backoff=True)
if resp.status.phase == kube_utils.PodPhase.FAILED.value:
raise RuntimeError('Pod "default:%s" failed with status "%s".' %
(pod_name, resp.status)) | Submits and runs a TFX pipeline from outside the cluster. Args: pipeline: Logical pipeline containing pipeline args and components. tfx_image: Container image URI for the TFX container. Raises: RuntimeError: When an error is encountered running the Kubernetes Job. |
166,266 | import datetime
import json
import time
from typing import Dict, List
from absl import logging
from kubernetes import client
from tfx.dsl.components.base import base_node
from tfx.dsl.context_managers import dsl_context_registry
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration.experimental.kubernetes import node_wrapper
from tfx.utils import json_utils
from tfx.utils import kube_utils
from google.protobuf import json_format
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `deserialize_pipeline` function. Write a Python function `def deserialize_pipeline(serialized_pipeline: str) -> tfx_pipeline.Pipeline` to solve the following problem:
Deserializes a TFX pipeline. To be replaced with the the TFX Intermediate Representation: tensorflow/community#271. This deserialization procedure reverses the serialization procedure and reconstructs the pipeline instance. Args: serialized_pipeline: Pipeline JSON string serialized with the procedure from _serialize_pipeline. Returns: Original pipeline containing pipeline args and components.
Here is the function:
def deserialize_pipeline(serialized_pipeline: str) -> tfx_pipeline.Pipeline:
"""Deserializes a TFX pipeline.
To be replaced with the the TFX Intermediate Representation:
tensorflow/community#271. This deserialization procedure reverses the
serialization procedure and reconstructs the pipeline instance.
Args:
serialized_pipeline: Pipeline JSON string serialized with the procedure from
_serialize_pipeline.
Returns:
Original pipeline containing pipeline args and components.
"""
pipeline = json.loads(serialized_pipeline)
components = [
json_utils.loads(component) for component in pipeline['components']
]
for c in components:
dsl_context_registry.get().put_node(c)
metadata_connection_config = metadata_store_pb2.ConnectionConfig()
json_format.Parse(pipeline['metadata_connection_config'],
metadata_connection_config)
# Restore component dependencies.
downstream_ids = pipeline['downstream_ids']
if not isinstance(downstream_ids, dict):
raise ValueError("downstream_ids needs to be a 'dict'.")
if len(downstream_ids) != len(components):
raise ValueError(
'Wrong number of items in downstream_ids. Expected: %s. Actual: %d' %
len(components), len(downstream_ids))
id_to_component = {component.id: component for component in components}
for component in components:
# Since downstream and upstream node attributes are discarded during the
# serialization process, we initialize them here.
component._upstream_nodes = set() # pylint: disable=protected-access
component._downstream_nodes = set() # pylint: disable=protected-access
for upstream_id, downstream_id_list in downstream_ids.items():
upstream_component = id_to_component[upstream_id]
for downstream_id in downstream_id_list:
upstream_component.add_downstream_node(id_to_component[downstream_id])
return tfx_pipeline.Pipeline(
pipeline_name=pipeline['pipeline_name'],
pipeline_root=pipeline['pipeline_root'],
components=components,
enable_cache=pipeline['enable_cache'],
metadata_connection_config=metadata_connection_config,
beam_pipeline_args=pipeline['beam_pipeline_args'],
) | Deserializes a TFX pipeline. To be replaced with the the TFX Intermediate Representation: tensorflow/community#271. This deserialization procedure reverses the serialization procedure and reconstructs the pipeline instance. Args: serialized_pipeline: Pipeline JSON string serialized with the procedure from _serialize_pipeline. Returns: Original pipeline containing pipeline args and components. |
166,267 | from __future__ import annotations
from typing import Optional
from absl import logging
from tfx.dsl.io import fileio
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import component_generated_alert_pb2
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import garbage_collection
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_scheduler as ts
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import execution_publish_utils
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import execution_result_pb2
from tfx.utils import status as status_lib
from tfx.utils import typing_utils
from ml_metadata import proto
def _update_execution_state_in_mlmd(
mlmd_handle: metadata.Metadata,
node_uid: task_lib.NodeUid,
execution_id: int,
new_state: proto.Execution.State,
error_code: int,
error_msg: str,
execution_result: Optional[execution_result_pb2.ExecutionResult] = None,
) -> None:
"""Updates the execution state and sets execution_result if provided."""
with mlmd_state.mlmd_execution_atomic_op(
mlmd_handle,
execution_id,
on_commit=event_observer.make_notify_execution_state_change_fn(
node_uid)) as execution:
execution.last_known_state = new_state
data_types_utils.set_metadata_value(
execution.custom_properties[constants.EXECUTION_ERROR_CODE_KEY],
error_code,
)
if error_msg:
data_types_utils.set_metadata_value(
execution.custom_properties[constants.EXECUTION_ERROR_MSG_KEY],
error_msg)
if execution_result:
execution_lib.set_execution_result(execution_result, execution)
def remove_temporary_task_dirs(
stateful_working_dir: str = '', tmp_dir: str = '') -> None:
"""Removes temporary directories created for the task."""
if stateful_working_dir:
try:
fileio.rmtree(stateful_working_dir)
except fileio.NotFoundError:
logging.warning('stateful_working_dir %s not found, ignoring.',
stateful_working_dir)
if tmp_dir:
try:
fileio.rmtree(tmp_dir)
except fileio.NotFoundError:
logging.warning(
'tmp_dir %s not found while attempting to delete, ignoring.')
The provided code snippet includes necessary dependencies for implementing the `publish_execution_results_for_task` function. Write a Python function `def publish_execution_results_for_task(mlmd_handle: metadata.Metadata, task: task_lib.ExecNodeTask, result: ts.TaskSchedulerResult) -> None` to solve the following problem:
Publishes execution results to MLMD for task.
Here is the function:
def publish_execution_results_for_task(mlmd_handle: metadata.Metadata,
task: task_lib.ExecNodeTask,
result: ts.TaskSchedulerResult) -> None:
"""Publishes execution results to MLMD for task."""
def _update_state(
status: status_lib.Status,
execution_result: Optional[execution_result_pb2.ExecutionResult] = None
) -> None:
assert status.code != status_lib.Code.OK
remove_temporary_task_dirs(tmp_dir=task.tmp_dir)
if status.code == status_lib.Code.CANCELLED and execution_result is None:
# Mark the execution as cancelled only if the task was cancelled by the
# task scheduler, and not by the executor.
logging.info('Cancelling execution (id: %s); task id: %s; status: %s',
task.execution_id, task.task_id, status)
execution_state = proto.Execution.CANCELED
else:
logging.info(
'Aborting execution (id: %s) due to error (code: %s); task id: %s',
task.execution_id, status.code, task.task_id)
execution_state = proto.Execution.FAILED
_update_execution_state_in_mlmd(
mlmd_handle=mlmd_handle,
node_uid=task.node_uid,
execution_id=task.execution_id,
new_state=execution_state,
error_code=status.code,
error_msg=status.message,
execution_result=execution_result)
if result.status.code != status_lib.Code.OK:
_update_state(result.status)
return
if isinstance(result.output, ts.ExecutorNodeOutput):
executor_output = result.output.executor_output
if executor_output is not None:
if executor_output.execution_result.code != status_lib.Code.OK:
_update_state(
status_lib.Status(
code=executor_output.execution_result.code,
message=executor_output.execution_result.result_message),
executor_output.execution_result)
return
remove_temporary_task_dirs(
stateful_working_dir=task.stateful_working_dir, tmp_dir=task.tmp_dir)
# TODO(b/262040844): Instead of directly using the context manager here, we
# should consider creating and using wrapper functions.
with mlmd_state.evict_from_cache(task.execution_id):
_, execution = execution_publish_utils.publish_succeeded_execution(
mlmd_handle,
execution_id=task.execution_id,
contexts=task.contexts,
output_artifacts=task.output_artifacts,
executor_output=executor_output)
garbage_collection.run_garbage_collection_for_node(mlmd_handle,
task.node_uid,
task.get_node())
if constants.COMPONENT_GENERATED_ALERTS_KEY in execution.custom_properties:
alerts_proto = component_generated_alert_pb2.ComponentGeneratedAlertList()
execution.custom_properties[
constants.COMPONENT_GENERATED_ALERTS_KEY
].proto_value.Unpack(alerts_proto)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline=task.pipeline)
for alert in alerts_proto.component_generated_alert_list:
alert_event = event_observer.ComponentGeneratedAlert(
execution=execution,
pipeline_uid=pipeline_uid,
pipeline_run=pipeline_uid.pipeline_run_id,
node_id=task.node_uid.node_id,
alert_body=alert.alert_body,
alert_name=alert.alert_name,
)
event_observer.notify(alert_event)
elif isinstance(result.output, ts.ImporterNodeOutput):
output_artifacts = result.output.output_artifacts
remove_temporary_task_dirs(
stateful_working_dir=task.stateful_working_dir, tmp_dir=task.tmp_dir)
# TODO(b/262040844): Instead of directly using the context manager here, we
# should consider creating and using wrapper functions.
with mlmd_state.evict_from_cache(task.execution_id):
execution_publish_utils.publish_succeeded_execution(
mlmd_handle,
execution_id=task.execution_id,
contexts=task.contexts,
output_artifacts=output_artifacts)
elif isinstance(result.output, ts.ResolverNodeOutput):
resolved_input_artifacts = result.output.resolved_input_artifacts
# TODO(b/262040844): Instead of directly using the context manager here, we
# should consider creating and using wrapper functions.
with mlmd_state.evict_from_cache(task.execution_id):
execution_publish_utils.publish_internal_execution(
mlmd_handle,
execution_id=task.execution_id,
contexts=task.contexts,
output_artifacts=resolved_input_artifacts)
else:
raise TypeError(f'Unable to process task scheduler result: {result}') | Publishes execution results to MLMD for task. |
166,268 | from __future__ import annotations
from typing import Optional
from absl import logging
from tfx.dsl.io import fileio
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import component_generated_alert_pb2
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import garbage_collection
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_scheduler as ts
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import execution_publish_utils
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import execution_result_pb2
from tfx.utils import status as status_lib
from tfx.utils import typing_utils
from ml_metadata import proto
def _update_execution_state_in_mlmd(
mlmd_handle: metadata.Metadata,
node_uid: task_lib.NodeUid,
execution_id: int,
new_state: proto.Execution.State,
error_code: int,
error_msg: str,
execution_result: Optional[execution_result_pb2.ExecutionResult] = None,
) -> None:
"""Updates the execution state and sets execution_result if provided."""
with mlmd_state.mlmd_execution_atomic_op(
mlmd_handle,
execution_id,
on_commit=event_observer.make_notify_execution_state_change_fn(
node_uid)) as execution:
execution.last_known_state = new_state
data_types_utils.set_metadata_value(
execution.custom_properties[constants.EXECUTION_ERROR_CODE_KEY],
error_code,
)
if error_msg:
data_types_utils.set_metadata_value(
execution.custom_properties[constants.EXECUTION_ERROR_MSG_KEY],
error_msg)
if execution_result:
execution_lib.set_execution_result(execution_result, execution)
def remove_temporary_task_dirs(
stateful_working_dir: str = '', tmp_dir: str = '') -> None:
"""Removes temporary directories created for the task."""
if stateful_working_dir:
try:
fileio.rmtree(stateful_working_dir)
except fileio.NotFoundError:
logging.warning('stateful_working_dir %s not found, ignoring.',
stateful_working_dir)
if tmp_dir:
try:
fileio.rmtree(tmp_dir)
except fileio.NotFoundError:
logging.warning(
'tmp_dir %s not found while attempting to delete, ignoring.')
The provided code snippet includes necessary dependencies for implementing the `publish_execution_results` function. Write a Python function `def publish_execution_results( mlmd_handle: metadata.Metadata, executor_output: execution_result_pb2.ExecutorOutput, execution_info: data_types.ExecutionInfo, contexts: list[proto.Context]) -> Optional[typing_utils.ArtifactMultiMap]` to solve the following problem:
Publishes execution result to MLMD for single component run.
Here is the function:
def publish_execution_results(
mlmd_handle: metadata.Metadata,
executor_output: execution_result_pb2.ExecutorOutput,
execution_info: data_types.ExecutionInfo,
contexts: list[proto.Context]) -> Optional[typing_utils.ArtifactMultiMap]:
"""Publishes execution result to MLMD for single component run."""
if executor_output.execution_result.code != status_lib.Code.OK:
if executor_output.execution_result.code == status_lib.Code.CANCELLED:
execution_state = proto.Execution.CANCELED
else:
execution_state = proto.Execution.FAILED
remove_temporary_task_dirs(tmp_dir=execution_info.tmp_dir)
node_uid = task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline_id_and_run_id(
pipeline_id=execution_info.pipeline_info.id,
pipeline_run_id=execution_info.pipeline_run_id),
node_id=execution_info.pipeline_node.node_info.id)
_update_execution_state_in_mlmd(
mlmd_handle=mlmd_handle,
node_uid=node_uid,
execution_id=execution_info.execution_id,
new_state=execution_state,
error_code=executor_output.execution_result.code,
error_msg=executor_output.execution_result.result_message,
execution_result=executor_output.execution_result)
return
remove_temporary_task_dirs(
stateful_working_dir=execution_info.stateful_working_dir,
tmp_dir=execution_info.tmp_dir)
# TODO(b/262040844): Instead of directly using the context manager here, we
# should consider creating and using wrapper functions.
with mlmd_state.evict_from_cache(execution_info.execution_id):
output_dict, _ = execution_publish_utils.publish_succeeded_execution(
mlmd_handle,
execution_id=execution_info.execution_id,
contexts=contexts,
output_artifacts=execution_info.output_dict,
executor_output=executor_output)
return output_dict | Publishes execution result to MLMD for single component run. |
166,269 | import collections
import itertools
import json
import sys
import textwrap
from typing import Callable, Dict, Iterable, List, MutableMapping, Optional, Sequence, Type
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.compiler import constants as context_constants
from tfx.dsl.compiler import placeholder_utils
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import inputs_utils
from tfx.orchestration.portable import outputs_utils
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.orchestration.portable.mlmd import common_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.orchestration.portable.mlmd import filter_query_builder as q
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import proto_utils
from tfx.utils import status as status_lib
from tfx.utils import typing_utils
from tfx.orchestration.experimental.core import deployment_config_utils
import ml_metadata as mlmd
from ml_metadata import errors
from ml_metadata.proto import metadata_store_pb2
class InputAndParam:
input_artifacts: Optional[typing_utils.ArtifactMultiMap] = None
exec_properties: Optional[MutableMapping[str, types.ExecPropertyTypes]] = None
class ResolvedInfo:
contexts: List[metadata_store_pb2.Context]
input_and_params: List[InputAndParam]
def resolve_exec_properties(
node: node_proto_view.NodeProtoView) -> Dict[str, types.ExecPropertyTypes]:
"""Resolves execution properties for executing the node."""
return data_types_utils.build_parsed_value_dict(
inputs_utils.resolve_parameters_with_schema(
node_parameters=node.parameters))
def _create_placeholder_context(
pipeline: pipeline_pb2.Pipeline,
node: node_proto_view.NodeProtoView,
input_artifacts: typing_utils.ArtifactMultiMap,
) -> placeholder_utils.ResolutionContext:
"""Collects context information into an object for placeholder resolution."""
exec_info = data_types.ExecutionInfo(
input_dict={key: list(value) for key, value in input_artifacts.items()},
pipeline_node=node.raw_proto(),
pipeline_info=pipeline.pipeline_info,
pipeline_run_id=pipeline.runtime_spec.pipeline_run_id.field_value.string_value,
top_level_pipeline_run_id=pipeline.runtime_spec.top_level_pipeline_run_id,
frontend_url=pipeline.runtime_spec.frontend_url,
)
if not pipeline.deployment_config.Is(
pipeline_pb2.IntermediateDeploymentConfig.DESCRIPTOR
):
return placeholder_utils.ResolutionContext(exec_info=exec_info)
depl_config = pipeline_pb2.IntermediateDeploymentConfig()
pipeline.deployment_config.Unpack(depl_config)
return placeholder_utils.ResolutionContext(
exec_info=exec_info,
executor_spec=deployment_config_utils.get_node_executor_spec(
depl_config, node.node_info.id
),
platform_config=deployment_config_utils.get_node_platform_config(
depl_config, node.node_info.id
),
pipeline_platform_config=deployment_config_utils.get_pipeline_platform_config(
depl_config
),
)
The provided code snippet includes necessary dependencies for implementing the `generate_resolved_info` function. Write a Python function `def generate_resolved_info( mlmd_handle_like: mlmd_cm.HandleLike, node: node_proto_view.NodeProtoView, pipeline: pipeline_pb2.Pipeline, skip_errors: Iterable[Type[exceptions.InputResolutionError]] = (), ) -> ResolvedInfo` to solve the following problem:
Returns a `ResolvedInfo` object for executing the node or `None` to skip. Args: mlmd_handle_like: An instance of mlmd handle which connect one MLMD DB, or a MLMDConnectionManager which manages connections to multiple MLMD DBs. node: The pipeline node for which to generate. pipeline: The pipeline proto from which the node was taken (for context). skip_errors: A list of errors to skip on the given error types. Returns: A `ResolvedInfo` with input resolutions. If execution should be skipped, ResolvedInfo has empty input_and_params. Raises: InputResolutionError: If there are some errors when we try to resolve input.
Here is the function:
def generate_resolved_info(
mlmd_handle_like: mlmd_cm.HandleLike,
node: node_proto_view.NodeProtoView,
pipeline: pipeline_pb2.Pipeline,
skip_errors: Iterable[Type[exceptions.InputResolutionError]] = (),
) -> ResolvedInfo:
"""Returns a `ResolvedInfo` object for executing the node or `None` to skip.
Args:
mlmd_handle_like: An instance of mlmd handle which connect one MLMD DB, or a
MLMDConnectionManager which manages connections to multiple MLMD DBs.
node: The pipeline node for which to generate.
pipeline: The pipeline proto from which the node was taken (for context).
skip_errors: A list of errors to skip on the given error types.
Returns:
A `ResolvedInfo` with input resolutions. If execution should be skipped,
ResolvedInfo has empty input_and_params.
Raises:
InputResolutionError: If there are some errors when we try to resolve input.
"""
# Register node contexts.
contexts = context_lib.prepare_contexts(
metadata_handle=mlmd_cm.get_handle(mlmd_handle_like),
node_contexts=node.contexts,
)
result = ResolvedInfo(
contexts=contexts,
input_and_params=[],
)
# Resolve execution properties.
exec_properties = resolve_exec_properties(node)
# Resolve inputs.
try:
resolved_input_artifacts: Sequence[typing_utils.ArtifactMultiMap] = (
inputs_utils.resolve_input_artifacts(
metadata_handle=mlmd_handle_like, pipeline_node=node
)
)
except exceptions.InputResolutionError as e:
for skip_error in skip_errors:
if isinstance(e, skip_error):
logging.info('[%s] Input resolution skipped: %s', node.node_info.id, e)
return result
raise
if not resolved_input_artifacts:
return result
for input_artifacts in resolved_input_artifacts:
try:
dynamic_exec_properties = inputs_utils.resolve_dynamic_parameters(
node_parameters=node.parameters,
context=_create_placeholder_context(pipeline, node, input_artifacts),
)
except exceptions.InputResolutionError as e:
logging.exception(
'[%s] Parameter resolution error: %s', node.node_info.id, e
)
raise
result.input_and_params.append(
InputAndParam(
input_artifacts=input_artifacts,
exec_properties={**exec_properties, **dynamic_exec_properties},
)
)
return result | Returns a `ResolvedInfo` object for executing the node or `None` to skip. Args: mlmd_handle_like: An instance of mlmd handle which connect one MLMD DB, or a MLMDConnectionManager which manages connections to multiple MLMD DBs. node: The pipeline node for which to generate. pipeline: The pipeline proto from which the node was taken (for context). skip_errors: A list of errors to skip on the given error types. Returns: A `ResolvedInfo` with input resolutions. If execution should be skipped, ResolvedInfo has empty input_and_params. Raises: InputResolutionError: If there are some errors when we try to resolve input. |
166,270 | import collections
import itertools
import json
import sys
import textwrap
from typing import Callable, Dict, Iterable, List, MutableMapping, Optional, Sequence, Type
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.compiler import constants as context_constants
from tfx.dsl.compiler import placeholder_utils
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import inputs_utils
from tfx.orchestration.portable import outputs_utils
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.orchestration.portable.mlmd import common_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.orchestration.portable.mlmd import filter_query_builder as q
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import proto_utils
from tfx.utils import status as status_lib
from tfx.utils import typing_utils
from tfx.orchestration.experimental.core import deployment_config_utils
import ml_metadata as mlmd
from ml_metadata import errors
from ml_metadata.proto import metadata_store_pb2
_EXTERNAL_EXECUTION_INDEX = '__external_execution_index__'
The provided code snippet includes necessary dependencies for implementing the `get_num_of_failures_from_failed_execution` function. Write a Python function `def get_num_of_failures_from_failed_execution( executions: Iterable[metadata_store_pb2.Execution], failed_execution: metadata_store_pb2.Execution) -> int` to solve the following problem:
Returns the num of failed executions. Only the executions that have the same external execution index as the failed execution will be counted. Args: executions: An iterable of executions. failed_execution: A failed execution whose execution index will be tested against to count the total number of failed execution.
Here is the function:
def get_num_of_failures_from_failed_execution(
executions: Iterable[metadata_store_pb2.Execution],
failed_execution: metadata_store_pb2.Execution) -> int:
"""Returns the num of failed executions.
Only the executions that have the same external execution index as the failed
execution will be counted.
Args:
executions: An iterable of executions.
failed_execution: A failed execution whose execution index will be tested
against to count the total number of failed execution.
"""
target_index = failed_execution.custom_properties[
_EXTERNAL_EXECUTION_INDEX
].int_value
failed_executions = [
e
for e in executions
if (
e.last_known_state == metadata_store_pb2.Execution.FAILED
and e.custom_properties[_EXTERNAL_EXECUTION_INDEX].int_value
== target_index
)
]
return len(failed_executions) | Returns the num of failed executions. Only the executions that have the same external execution index as the failed execution will be counted. Args: executions: An iterable of executions. failed_execution: A failed execution whose execution index will be tested against to count the total number of failed execution. |
166,271 | import collections
import itertools
import json
import sys
import textwrap
from typing import Callable, Dict, Iterable, List, MutableMapping, Optional, Sequence, Type
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.compiler import constants as context_constants
from tfx.dsl.compiler import placeholder_utils
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import inputs_utils
from tfx.orchestration.portable import outputs_utils
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.orchestration.portable.mlmd import common_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.orchestration.portable.mlmd import filter_query_builder as q
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import proto_utils
from tfx.utils import status as status_lib
from tfx.utils import typing_utils
from tfx.orchestration.experimental.core import deployment_config_utils
import ml_metadata as mlmd
from ml_metadata import errors
from ml_metadata.proto import metadata_store_pb2
_EXTERNAL_EXECUTION_INDEX = '__external_execution_index__'
The provided code snippet includes necessary dependencies for implementing the `get_next_active_execution_to_run` function. Write a Python function `def get_next_active_execution_to_run( executions: Sequence[metadata_store_pb2.Execution], ) -> Optional[metadata_store_pb2.Execution]` to solve the following problem:
Returns next active execution to run or `None` if no active executions exist. The active execution with lowest index will be returned. Args: executions: A list of executions Returns: An active execution or `None` if there is no active execution.
Here is the function:
def get_next_active_execution_to_run(
executions: Sequence[metadata_store_pb2.Execution],
) -> Optional[metadata_store_pb2.Execution]:
"""Returns next active execution to run or `None` if no active executions exist.
The active execution with lowest index will be returned.
Args:
executions: A list of executions
Returns:
An active execution or `None` if there is no active execution.
"""
active_executions = [
e for e in executions if execution_lib.is_execution_active(e)
]
if not active_executions:
return None
# Sorts active executions by index.
sorted_active_executions = sorted(
active_executions,
key=lambda e: e.custom_properties[_EXTERNAL_EXECUTION_INDEX].int_value,
)
return sorted_active_executions[0] | Returns next active execution to run or `None` if no active executions exist. The active execution with lowest index will be returned. Args: executions: A list of executions Returns: An active execution or `None` if there is no active execution. |
166,272 | import collections
import itertools
import json
import sys
import textwrap
from typing import Callable, Dict, Iterable, List, MutableMapping, Optional, Sequence, Type
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.compiler import constants as context_constants
from tfx.dsl.compiler import placeholder_utils
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import inputs_utils
from tfx.orchestration.portable import outputs_utils
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.orchestration.portable.mlmd import common_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.orchestration.portable.mlmd import filter_query_builder as q
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import proto_utils
from tfx.utils import status as status_lib
from tfx.utils import typing_utils
from tfx.orchestration.experimental.core import deployment_config_utils
import ml_metadata as mlmd
from ml_metadata import errors
from ml_metadata.proto import metadata_store_pb2
_EXTERNAL_EXECUTION_INDEX = '__external_execution_index__'
def resolve_exec_properties(
node: node_proto_view.NodeProtoView) -> Dict[str, types.ExecPropertyTypes]:
"""Resolves execution properties for executing the node."""
return data_types_utils.build_parsed_value_dict(
inputs_utils.resolve_parameters_with_schema(
node_parameters=node.parameters))
def _create_placeholder_context(
pipeline: pipeline_pb2.Pipeline,
node: node_proto_view.NodeProtoView,
input_artifacts: typing_utils.ArtifactMultiMap,
) -> placeholder_utils.ResolutionContext:
"""Collects context information into an object for placeholder resolution."""
exec_info = data_types.ExecutionInfo(
input_dict={key: list(value) for key, value in input_artifacts.items()},
pipeline_node=node.raw_proto(),
pipeline_info=pipeline.pipeline_info,
pipeline_run_id=pipeline.runtime_spec.pipeline_run_id.field_value.string_value,
top_level_pipeline_run_id=pipeline.runtime_spec.top_level_pipeline_run_id,
frontend_url=pipeline.runtime_spec.frontend_url,
)
if not pipeline.deployment_config.Is(
pipeline_pb2.IntermediateDeploymentConfig.DESCRIPTOR
):
return placeholder_utils.ResolutionContext(exec_info=exec_info)
depl_config = pipeline_pb2.IntermediateDeploymentConfig()
pipeline.deployment_config.Unpack(depl_config)
return placeholder_utils.ResolutionContext(
exec_info=exec_info,
executor_spec=deployment_config_utils.get_node_executor_spec(
depl_config, node.node_info.id
),
platform_config=deployment_config_utils.get_node_platform_config(
depl_config, node.node_info.id
),
pipeline_platform_config=deployment_config_utils.get_pipeline_platform_config(
depl_config
),
)
The provided code snippet includes necessary dependencies for implementing the `register_executions_from_existing_executions` function. Write a Python function `def register_executions_from_existing_executions( metadata_handle: metadata.Metadata, pipeline: pipeline_pb2.Pipeline, node: node_proto_view.NodeProtoView, existing_executions: List[metadata_store_pb2.Execution], ) -> Sequence[metadata_store_pb2.Execution]` to solve the following problem:
Registers a list of new executions from a list of failed/canceled executions.
Here is the function:
def register_executions_from_existing_executions(
metadata_handle: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline,
node: node_proto_view.NodeProtoView,
existing_executions: List[metadata_store_pb2.Execution],
) -> Sequence[metadata_store_pb2.Execution]:
"""Registers a list of new executions from a list of failed/canceled executions."""
if not existing_executions:
return []
exec_properties = resolve_exec_properties(node)
exec_type = common_utils.register_type_if_not_exist(
metadata_handle, node.node_info.type
)
new_executions = []
input_artifacts = []
for existing_execution in existing_executions:
input_artifacts_for_existing_execution = execution_lib.get_input_artifacts(
metadata_handle, existing_execution.id
)
try:
dynamic_exec_properties = inputs_utils.resolve_dynamic_parameters(
node.parameters,
_create_placeholder_context(
pipeline, node, input_artifacts_for_existing_execution
),
)
except exceptions.InputResolutionError as e:
logging.exception(
'[%s] Parameter resolution error: %s', node.node_info.id, e
)
raise
combined_exec_properties = {**exec_properties, **dynamic_exec_properties}
logging.info(
'exec properties for execution id: %s: %s',
existing_execution.id,
exec_properties,
)
logging.info(
'dynamic exec properties for execution id: %s: %s',
existing_execution.id,
dynamic_exec_properties,
)
logging.info(
'combined exec properties for execution id: %s: %s',
existing_execution.id,
combined_exec_properties,
)
new_execution = execution_lib.prepare_execution(
metadata_handle=metadata_handle,
execution_type=exec_type,
state=metadata_store_pb2.Execution.NEW,
exec_properties=combined_exec_properties,
execution_name=str(uuid.uuid4()),
)
if node.execution_options.reset_stateful_working_dir:
# TODO(b/258539860): We may consider removing stateful working dir when
# users choose to NOT reuse it upon execution retries.
stateful_working_dir_index = (
outputs_utils.get_stateful_working_dir_index())
else:
# Potentially old executions may have been run under a different state of
# stateful_working_dir but we only respect the current one in this check.
# For SYNC pipelines this should only change after an update,
# but for ASYNC it may happen after a stop/start.
stateful_working_dir_index = outputs_utils.get_stateful_working_dir_index(
existing_execution
)
# Only copy necessary custom_properties from the failed/canceled execution.
# LINT.IfChange(new_execution_custom_properties)
data_types_utils.set_metadata_value(
new_execution.custom_properties[constants.STATEFUL_WORKING_DIR_INDEX],
stateful_working_dir_index,
)
new_execution.custom_properties[_EXTERNAL_EXECUTION_INDEX].CopyFrom(
existing_execution.custom_properties[_EXTERNAL_EXECUTION_INDEX]
)
# LINT.ThenChange(:execution_custom_properties)
new_executions.append(new_execution)
input_artifacts.append(input_artifacts_for_existing_execution)
contexts = metadata_handle.store.get_contexts_by_execution(
existing_executions[0].id
)
return execution_lib.put_executions(
metadata_handle,
new_executions,
contexts,
input_artifacts_maps=input_artifacts,
) | Registers a list of new executions from a list of failed/canceled executions. |
166,273 | import collections
import itertools
import json
import sys
import textwrap
from typing import Callable, Dict, Iterable, List, MutableMapping, Optional, Sequence, Type
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.compiler import constants as context_constants
from tfx.dsl.compiler import placeholder_utils
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import inputs_utils
from tfx.orchestration.portable import outputs_utils
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.orchestration.portable.mlmd import common_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.orchestration.portable.mlmd import filter_query_builder as q
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import proto_utils
from tfx.utils import status as status_lib
from tfx.utils import typing_utils
from tfx.orchestration.experimental.core import deployment_config_utils
import ml_metadata as mlmd
from ml_metadata import errors
from ml_metadata.proto import metadata_store_pb2
_EXTERNAL_EXECUTION_INDEX = '__external_execution_index__'
class InputAndParam:
input_artifacts: Optional[typing_utils.ArtifactMultiMap] = None
exec_properties: Optional[MutableMapping[str, types.ExecPropertyTypes]] = None
The provided code snippet includes necessary dependencies for implementing the `register_executions` function. Write a Python function `def register_executions( metadata_handle: metadata.Metadata, execution_type: metadata_store_pb2.ExecutionType, contexts: Sequence[metadata_store_pb2.Context], input_and_params: Sequence[InputAndParam], ) -> Sequence[metadata_store_pb2.Execution]` to solve the following problem:
Registers multiple executions in MLMD. Along with the execution: - the input artifacts will be linked to the executions. - the contexts will be linked to both the executions and its input artifacts. Args: metadata_handle: A handler to access MLMD. execution_type: The type of the execution. contexts: MLMD contexts to associate with the executions. input_and_params: A list of InputAndParams, which includes input_dicts (dictionaries of artifacts. One execution will be registered for each of the input_dict) and corresponding exec_properties. Returns: A list of MLMD executions that are registered in MLMD, with id populated. All registered executions have a state of NEW.
Here is the function:
def register_executions(
metadata_handle: metadata.Metadata,
execution_type: metadata_store_pb2.ExecutionType,
contexts: Sequence[metadata_store_pb2.Context],
input_and_params: Sequence[InputAndParam],
) -> Sequence[metadata_store_pb2.Execution]:
"""Registers multiple executions in MLMD.
Along with the execution:
- the input artifacts will be linked to the executions.
- the contexts will be linked to both the executions and its input artifacts.
Args:
metadata_handle: A handler to access MLMD.
execution_type: The type of the execution.
contexts: MLMD contexts to associate with the executions.
input_and_params: A list of InputAndParams, which includes input_dicts
(dictionaries of artifacts. One execution will be registered for each of
the input_dict) and corresponding exec_properties.
Returns:
A list of MLMD executions that are registered in MLMD, with id populated.
All registered executions have a state of NEW.
"""
executions = []
registered_execution_type = common_utils.register_type_if_not_exist(
metadata_handle, execution_type
)
for index, input_and_param in enumerate(input_and_params):
# Prepare executions.
execution = execution_lib.prepare_execution(
metadata_handle,
registered_execution_type,
metadata_store_pb2.Execution.NEW,
input_and_param.exec_properties,
execution_name=str(uuid.uuid4()),
)
# LINT.IfChange(execution_custom_properties)
data_types_utils.set_metadata_value(
execution.custom_properties[constants.STATEFUL_WORKING_DIR_INDEX],
outputs_utils.get_stateful_working_dir_index(execution),
)
execution.custom_properties[_EXTERNAL_EXECUTION_INDEX].int_value = index
# LINT.ThenChange(:new_execution_custom_properties)
executions.append(execution)
if len(executions) == 1:
return [
execution_lib.put_execution(
metadata_handle,
executions[0],
contexts,
input_artifacts=input_and_params[0].input_artifacts,
)
]
return execution_lib.put_executions(
metadata_handle,
executions,
contexts,
[input_and_param.input_artifacts for input_and_param in input_and_params],
) | Registers multiple executions in MLMD. Along with the execution: - the input artifacts will be linked to the executions. - the contexts will be linked to both the executions and its input artifacts. Args: metadata_handle: A handler to access MLMD. execution_type: The type of the execution. contexts: MLMD contexts to associate with the executions. input_and_params: A list of InputAndParams, which includes input_dicts (dictionaries of artifacts. One execution will be registered for each of the input_dict) and corresponding exec_properties. Returns: A list of MLMD executions that are registered in MLMD, with id populated. All registered executions have a state of NEW. |
166,274 | import collections
import itertools
import json
import sys
import textwrap
from typing import Callable, Dict, Iterable, List, MutableMapping, Optional, Sequence, Type
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.compiler import constants as context_constants
from tfx.dsl.compiler import placeholder_utils
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import inputs_utils
from tfx.orchestration.portable import outputs_utils
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.orchestration.portable.mlmd import common_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.orchestration.portable.mlmd import filter_query_builder as q
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import proto_utils
from tfx.utils import status as status_lib
from tfx.utils import typing_utils
from tfx.orchestration.experimental.core import deployment_config_utils
import ml_metadata as mlmd
from ml_metadata import errors
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `update_external_artifact_type` function. Write a Python function `def update_external_artifact_type( local_mlmd_handle: metadata.Metadata, artifacts: Sequence[types.artifact.Artifact], ) -> Sequence[types.artifact.Artifact]` to solve the following problem:
Copies artifact types of external artifacts to local db. Args: local_mlmd_handle: A handle to access local MLMD db. artifacts: A list of artifacts. Returns: A list of updated artifacts
Here is the function:
def update_external_artifact_type(
local_mlmd_handle: metadata.Metadata,
artifacts: Sequence[types.artifact.Artifact],
) -> Sequence[types.artifact.Artifact]:
"""Copies artifact types of external artifacts to local db.
Args:
local_mlmd_handle: A handle to access local MLMD db.
artifacts: A list of artifacts.
Returns:
A list of updated artifacts
"""
updated_artifacts = []
local_type_id_by_name = {}
for artifact in artifacts:
if not artifact.artifact_type.HasField('id'):
type_name = artifact.type_name
if type_name not in local_type_id_by_name:
try:
local_artifact_type = local_mlmd_handle.store.get_artifact_type(
type_name=type_name)
local_type_id_by_name[type_name] = local_artifact_type.id
except errors.NotFoundError:
external_artifact_type = artifact.artifact_type
new_type_id = local_mlmd_handle.store.put_artifact_type(
external_artifact_type)
local_type_id_by_name[type_name] = new_type_id
local_artifact_type_id = local_type_id_by_name[type_name]
artifact.type_id = local_artifact_type_id
artifact.artifact_type.id = local_artifact_type_id
updated_artifacts.append(artifact)
return updated_artifacts | Copies artifact types of external artifacts to local db. Args: local_mlmd_handle: A handle to access local MLMD db. artifacts: A list of artifacts. Returns: A list of updated artifacts |
166,275 | import collections
import itertools
import json
import sys
import textwrap
from typing import Callable, Dict, Iterable, List, MutableMapping, Optional, Sequence, Type
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.compiler import constants as context_constants
from tfx.dsl.compiler import placeholder_utils
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import inputs_utils
from tfx.orchestration.portable import outputs_utils
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.orchestration.portable.mlmd import common_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.orchestration.portable.mlmd import filter_query_builder as q
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import proto_utils
from tfx.utils import status as status_lib
from tfx.utils import typing_utils
from tfx.orchestration.experimental.core import deployment_config_utils
import ml_metadata as mlmd
from ml_metadata import errors
from ml_metadata.proto import metadata_store_pb2
class InputAndParam:
input_artifacts: Optional[typing_utils.ArtifactMultiMap] = None
exec_properties: Optional[MutableMapping[str, types.ExecPropertyTypes]] = None
class ResolvedInfo:
contexts: List[metadata_store_pb2.Context]
input_and_params: List[InputAndParam]
def get_executions(
metadata_handle: metadata.Metadata,
node: node_proto_view.NodeProtoView,
limit: Optional[int] = None,
backfill_token: str = '',
additional_filters: Optional[List[str]] = None,
) -> List[metadata_store_pb2.Execution]:
"""Returns all executions for the given pipeline node.
This finds all executions having the same set of contexts as the pipeline
node.
Args:
metadata_handle: A handler to access MLMD db.
node: The pipeline node for which to obtain executions.
limit: limit the number of executions return by the function. Executions are
ordered descendingly by CREATE_TIME, so the newest executions will return.
backfill_token: If non-empty, only executions with custom property
`__backfill_token__` set to the value are returned. Should only be set
when backfilling in ASYNC mode.
additional_filters: Additional filters to select executions.
Returns:
List of executions ordered descendingly by CREATE_TIME for the given node.
"""
if not node.contexts.contexts:
return []
# Get all the contexts associated with the node.
filter_query = q.And([])
# "node" context or "pipeline_run" context is a strict sub-context of a
# "pipeline" context thus we can remove "pipeline" context from the filter
# query to improve performance.
filter_contexts = node.contexts.contexts
context_types = {context.type.name for context in filter_contexts}
if (
context_constants.PIPELINE_RUN_CONTEXT_TYPE_NAME in context_types
or context_constants.NODE_CONTEXT_TYPE_NAME in context_types
):
context_types.discard(context_constants.PIPELINE_CONTEXT_TYPE_NAME)
filter_contexts = [
q for q in filter_contexts if q.type.name in context_types
]
for i, context_spec in enumerate(filter_contexts):
context_type = context_spec.type.name
context_name = data_types_utils.get_value(context_spec.name)
filter_query.append(
q.And([
f"contexts_{i}.type = '{context_type}'",
f"contexts_{i}.name = '{context_name}'",
])
)
if backfill_token:
filter_query.append(
(
'custom_properties.__backfill_token__.string_value ='
f" '{backfill_token}'"
),
)
if additional_filters:
filter_query.extend(additional_filters)
return metadata_handle.store.get_executions(
list_options=mlmd.ListOptions(
order_by=mlmd.OrderByField.CREATE_TIME,
is_asc=False,
filter_query=str(filter_query),
limit=limit,
)
)
The provided code snippet includes necessary dependencies for implementing the `get_unprocessed_inputs` function. Write a Python function `def get_unprocessed_inputs( metadata_handle: metadata.Metadata, resolved_info: ResolvedInfo, node: node_proto_view.NodeProtoView, ) -> List[InputAndParam]` to solve the following problem:
Get a list of unprocessed input from resolved_info. Args: metadata_handle: A handle to access local MLMD db. resolved_info: Resolved input of a node. It may contain processed and unprocessed input. node: The pipeline node of the input. Returns: A list of InputAndParam that have not been processed.
Here is the function:
def get_unprocessed_inputs(
metadata_handle: metadata.Metadata,
resolved_info: ResolvedInfo,
node: node_proto_view.NodeProtoView,
) -> List[InputAndParam]:
"""Get a list of unprocessed input from resolved_info.
Args:
metadata_handle: A handle to access local MLMD db.
resolved_info: Resolved input of a node. It may contain processed and
unprocessed input.
node: The pipeline node of the input.
Returns:
A list of InputAndParam that have not been processed.
"""
if not resolved_info.input_and_params:
return []
# Finds out the keys that should be ignored.
input_triggers = node.execution_options.async_trigger.input_triggers
ignore_keys = {
k for k, t in input_triggers.items() if k.startswith('_') or t.no_trigger
}
max_timestamp_in_each_input: List[int] = []
for input_and_param in resolved_info.input_and_params:
max_timestamp_in_one_input = 0
for key, artifacts in input_and_param.input_artifacts.items():
if key in ignore_keys or not artifacts:
continue
max_timestamp_in_one_input = max(
max_timestamp_in_one_input,
max(a.mlmd_artifact.create_time_since_epoch for a in artifacts),
)
max_timestamp_in_each_input.append(max_timestamp_in_one_input)
# A resolved input whose artifacts with max timestamp T is not an input
# to a execution having creation timestamp < T. So, we only need to
# get executions with timestamp larger than the minimum timestamp of all
# the inputs in resolved_info.
executions = get_executions(
metadata_handle,
node,
additional_filters=[
(
'create_time_since_epoch >='
f' {min(max_timestamp_in_each_input, default=0)}'
),
q.Or([
'last_known_state = COMPLETE',
'last_known_state = CACHED',
'last_known_state = FAILED',
'last_known_state = CANCELED',
]),
],
)
# Get the successful, failed and canceled executions, and group them by input.
successful_executions_by_input = collections.defaultdict(list)
failed_executions_by_input = collections.defaultdict(list)
cancelled_executions_by_input = collections.defaultdict(list)
events = metadata_handle.store.get_events_by_execution_ids(
[e.id for e in executions]
)
for execution in executions:
input_events = [
e
for e in events
if e.type == metadata_store_pb2.Event.INPUT
and event_lib.is_valid_input_event(e)
and e.execution_id == execution.id
]
input_ids_by_key = event_lib.reconstruct_artifact_id_multimap(input_events)
# Filters out the keys starting with '_' and the keys should be ignored.
input_ids_by_key = {
k: tuple(sorted(v))
for k, v in input_ids_by_key.items()
if k not in ignore_keys
}
encoded_input = json.dumps(input_ids_by_key, sort_keys=True)
if execution_lib.is_execution_successful(execution):
successful_executions_by_input[encoded_input].append(execution)
elif execution_lib.is_execution_failed(execution):
failed_executions_by_input[encoded_input].append(execution)
elif execution_lib.is_execution_canceled(execution):
cancelled_executions_by_input[encoded_input].append(execution)
# Some input artifacts are from external pipelines, so we need to find out the
# external_id to id mapping in the local db.
local_id_by_external_id: Dict[str, int] = {}
for input_and_param in resolved_info.input_and_params:
for artifact in itertools.chain(*input_and_param.input_artifacts.values()):
if artifact.mlmd_artifact.external_id:
local_id_by_external_id[artifact.mlmd_artifact.external_id] = -1
if local_id_by_external_id:
try:
for artifact in metadata_handle.store.get_artifacts_by_external_ids(
external_ids=local_id_by_external_id
):
local_id_by_external_id[artifact.external_id] = artifact.id
except errors.NotFoundError:
# If all the external ids do not exist in local db, we get NotFoundError.
# It is safe to pass, and we will handle them in the following code.
pass
except Exception as e: # pylint:disable=broad-except
logging.exception('Error when getting artifacts by external ids: %s', e)
return []
# Finds out the unprocessed inputs.
# By default, the retry limit in async pipeline is infinite.
retry_limit = sys.maxsize
if node.execution_options.HasField('max_execution_retries'):
retry_limit = node.execution_options.max_execution_retries
unprocessed_inputs = []
for input_and_param in resolved_info.input_and_params:
resolved_input_ids_by_key = collections.defaultdict(list)
for key, artifacts in input_and_param.input_artifacts.items():
for a in artifacts:
if a.id:
resolved_input_ids_by_key[key].append(a.id)
elif a.mlmd_artifact.external_id:
resolved_input_ids_by_key[key].append(
local_id_by_external_id[a.mlmd_artifact.external_id]
)
resolved_input_ids_by_key[key] = tuple(resolved_input_ids_by_key[key])
# Filters out the keys starting with '_' and the keys should be ignored.
resolved_input_ids_by_key = {
k: tuple(sorted(v))
for k, v in resolved_input_ids_by_key.items()
if k not in ignore_keys
}
encoded_input = json.dumps(resolved_input_ids_by_key, sort_keys=True)
if len(failed_executions_by_input[encoded_input]) >= retry_limit + 1:
# This input has failed and has also reached its retry limit.
logging.info(
'Node %s has reach retry limit of %d.',
node.node_info.id,
retry_limit,
)
elif encoded_input not in successful_executions_by_input:
# This input should be processed.
failed_or_cancelled_executions = (
failed_executions_by_input[encoded_input]
+ cancelled_executions_by_input[encoded_input]
)
# If the previous stateful_working_dir_index should be reused, save the
# index into input_and_param.exec_properties
if (
not node.execution_options.reset_stateful_working_dir
and failed_or_cancelled_executions
):
execution_for_retry = execution_lib.sort_executions_newest_to_oldest(
failed_or_cancelled_executions
)[0]
if input_and_param.exec_properties is None:
input_and_param.exec_properties = {}
input_and_param.exec_properties[
constants.STATEFUL_WORKING_DIR_INDEX
] = outputs_utils.get_stateful_working_dir_index(execution_for_retry)
unprocessed_inputs.append(input_and_param)
return unprocessed_inputs | Get a list of unprocessed input from resolved_info. Args: metadata_handle: A handle to access local MLMD db. resolved_info: Resolved input of a node. It may contain processed and unprocessed input. node: The pipeline node of the input. Returns: A list of InputAndParam that have not been processed. |
166,276 | import collections
import itertools
import json
import sys
import textwrap
from typing import Callable, Dict, Iterable, List, MutableMapping, Optional, Sequence, Type
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.compiler import constants as context_constants
from tfx.dsl.compiler import placeholder_utils
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import inputs_utils
from tfx.orchestration.portable import outputs_utils
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.orchestration.portable.mlmd import common_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.orchestration.portable.mlmd import filter_query_builder as q
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import proto_utils
from tfx.utils import status as status_lib
from tfx.utils import typing_utils
from tfx.orchestration.experimental.core import deployment_config_utils
import ml_metadata as mlmd
from ml_metadata import errors
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `interpret_status_from_failed_execution` function. Write a Python function `def interpret_status_from_failed_execution( execution: metadata_store_pb2.Execution, ) -> status_lib.Status` to solve the following problem:
Interprets `Status` from given failed execution. Args: execution: An execution with last_known_state=FAILED. Returns: A `Status` object interpreted from the execution state. Raises: ValueError: If the given execution has `last_known_state` other than `FAILED`.
Here is the function:
def interpret_status_from_failed_execution(
execution: metadata_store_pb2.Execution,
) -> status_lib.Status:
"""Interprets `Status` from given failed execution.
Args:
execution: An execution with last_known_state=FAILED.
Returns:
A `Status` object interpreted from the execution state.
Raises:
ValueError: If the given execution has `last_known_state` other than
`FAILED`.
"""
if not execution_lib.is_execution_failed(execution):
raise ValueError(
'Must be called with an execution with last_known_state = FAILED.'
)
# If execution result is available, that will have the most proximate cause
# for the failed execution.
execution_result = execution_lib.get_execution_result(
execution, ignore_parse_errors=True
)
if execution_result is not None:
# We expect the error code to be non-OK but if by any chance it is OK,
# we account it as UNKNOWN.
error_code = execution_result.code or status_lib.Code.UNKNOWN
error_msg = execution_result.result_message or None
else:
error_code_value = execution.custom_properties.get(
constants.EXECUTION_ERROR_CODE_KEY
)
if error_code_value is not None:
# If error code is set, we expect it to be non-OK. By any chance if it is
# OK, we account it as UNKNOWN.
error_code = (
data_types_utils.get_metadata_value(error_code_value)
or status_lib.Code.UNKNOWN
)
else:
error_code = status_lib.Code.UNKNOWN
error_msg_value = execution.custom_properties.get(
constants.EXECUTION_ERROR_MSG_KEY
)
error_msg = (
data_types_utils.get_metadata_value(error_msg_value)
if error_msg_value is not None
else None
)
error_msg = textwrap.shorten(error_msg, width=512) if error_msg else None
return status_lib.Status(code=error_code, message=error_msg) | Interprets `Status` from given failed execution. Args: execution: An execution with last_known_state=FAILED. Returns: A `Status` object interpreted from the execution state. Raises: ValueError: If the given execution has `last_known_state` other than `FAILED`. |
166,277 | import collections
import itertools
import json
import sys
import textwrap
from typing import Callable, Dict, Iterable, List, MutableMapping, Optional, Sequence, Type
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.compiler import constants as context_constants
from tfx.dsl.compiler import placeholder_utils
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import inputs_utils
from tfx.orchestration.portable import outputs_utils
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.orchestration.portable.mlmd import common_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.orchestration.portable.mlmd import filter_query_builder as q
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import proto_utils
from tfx.utils import status as status_lib
from tfx.utils import typing_utils
from tfx.orchestration.experimental.core import deployment_config_utils
import ml_metadata as mlmd
from ml_metadata import errors
from ml_metadata.proto import metadata_store_pb2
class InputAndParam:
input_artifacts: Optional[typing_utils.ArtifactMultiMap] = None
exec_properties: Optional[MutableMapping[str, types.ExecPropertyTypes]] = None
The provided code snippet includes necessary dependencies for implementing the `generate_tasks_from_one_input` function. Write a Python function `def generate_tasks_from_one_input( metadata_handle: metadata.Metadata, node: node_proto_view.NodeProtoView, execution: metadata_store_pb2.Execution, input_and_param: InputAndParam, contexts: Sequence[metadata_store_pb2.Context], pipeline: pipeline_pb2.Pipeline, execution_node_state: str, backfill_token: str = '', execution_commit_fn: Optional[ Callable[ [ Optional[metadata_store_pb2.Execution], metadata_store_pb2.Execution, ], None, ] ] = None, ) -> Sequence[task_lib.Task]` to solve the following problem:
Generates tasks for node an execution. Args: metadata_handle: Handle to interact with MLMD. node: Node to tasks for. execution: Metadata execution to generate tasks for. input_and_param: Inputs and param for node execution. contexts: Contexts for node execution. pipeline: Pipeline for this execution. execution_node_state: What state the execution should be set to. Should always be pstate.NodeState.RUNNING but we can't import pstate here due to circular dependencies. backfill_token: The backfill token for the execution, if applicable. execution_commit_fn: Optional function to be provided when the new execution is updated. Returns: A list of tasks for the node. Guaranteed to be in the form of: [UpdateNodeStateTask, ExecNodeTask].
Here is the function:
def generate_tasks_from_one_input(
metadata_handle: metadata.Metadata,
node: node_proto_view.NodeProtoView,
execution: metadata_store_pb2.Execution,
input_and_param: InputAndParam,
contexts: Sequence[metadata_store_pb2.Context],
pipeline: pipeline_pb2.Pipeline,
execution_node_state: str,
backfill_token: str = '',
execution_commit_fn: Optional[
Callable[
[
Optional[metadata_store_pb2.Execution],
metadata_store_pb2.Execution,
],
None,
]
] = None,
) -> Sequence[task_lib.Task]:
"""Generates tasks for node an execution.
Args:
metadata_handle: Handle to interact with MLMD.
node: Node to tasks for.
execution: Metadata execution to generate tasks for.
input_and_param: Inputs and param for node execution.
contexts: Contexts for node execution.
pipeline: Pipeline for this execution.
execution_node_state: What state the execution should be set to. Should
always be pstate.NodeState.RUNNING but we can't import pstate here due to
circular dependencies.
backfill_token: The backfill token for the execution, if applicable.
execution_commit_fn: Optional function to be provided when the new execution
is updated.
Returns:
A list of tasks for the node. Guaranteed to be in the form of:
[UpdateNodeStateTask, ExecNodeTask].
"""
with mlmd_state.mlmd_execution_atomic_op(
metadata_handle, execution.id, on_commit=execution_commit_fn
) as execution:
execution.last_known_state = metadata_store_pb2.Execution.RUNNING
outputs_resolver = outputs_utils.OutputsResolver(
node,
pipeline.pipeline_info,
pipeline.runtime_spec,
pipeline.execution_mode,
)
output_artifacts = outputs_resolver.generate_output_artifacts(execution.id)
outputs_utils.make_output_dirs(output_artifacts)
node_uid = task_lib.NodeUid.from_node(pipeline, node)
tasks = []
tasks.append(
task_lib.UpdateNodeStateTask(
node_uid=node_uid,
state=execution_node_state,
backfill_token=backfill_token,
)
)
tasks.append(
task_lib.ExecNodeTask(
node_uid=node_uid,
execution_id=execution.id,
contexts=contexts,
input_artifacts=input_and_param.input_artifacts,
exec_properties=input_and_param.exec_properties,
output_artifacts=output_artifacts,
executor_output_uri=outputs_resolver.get_executor_output_uri(
execution.id
),
stateful_working_dir=outputs_resolver.get_stateful_working_directory(
execution
),
tmp_dir=outputs_resolver.make_tmp_dir(execution.id),
pipeline=pipeline,
)
)
return tasks | Generates tasks for node an execution. Args: metadata_handle: Handle to interact with MLMD. node: Node to tasks for. execution: Metadata execution to generate tasks for. input_and_param: Inputs and param for node execution. contexts: Contexts for node execution. pipeline: Pipeline for this execution. execution_node_state: What state the execution should be set to. Should always be pstate.NodeState.RUNNING but we can't import pstate here due to circular dependencies. backfill_token: The backfill token for the execution, if applicable. execution_commit_fn: Optional function to be provided when the new execution is updated. Returns: A list of tasks for the node. Guaranteed to be in the form of: [UpdateNodeStateTask, ExecNodeTask]. |
166,278 | import os
import tempfile
from typing import Optional, Callable
from absl import app
from absl import flags
from tfx.dsl.compiler import constants
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import pipeline_ops
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import test_utils
from tfx.orchestration.experimental.core.testing import test_sync_pipeline
from tfx.orchestration.portable import runtime_parameter_utils
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import io_utils
from tfx.utils import status as status_lib
from google.protobuf import message
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `_get_mlmd_connection` function. Write a Python function `def _get_mlmd_connection(path: str) -> metadata.Metadata` to solve the following problem:
Returns a MetadataStore for performing MLMD API calls.
Here is the function:
def _get_mlmd_connection(path: str) -> metadata.Metadata:
"""Returns a MetadataStore for performing MLMD API calls."""
if os.path.isfile(path):
raise IOError('File already exists: %s' % path)
connection_config = metadata.sqlite_metadata_connection_config(path)
connection_config.sqlite.SetInParent()
return metadata.Metadata(connection_config=connection_config) | Returns a MetadataStore for performing MLMD API calls. |
166,279 | import os
import tempfile
from typing import Optional, Callable
from absl import app
from absl import flags
from tfx.dsl.compiler import constants
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import pipeline_ops
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import test_utils
from tfx.orchestration.experimental.core.testing import test_sync_pipeline
from tfx.orchestration.portable import runtime_parameter_utils
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import io_utils
from tfx.utils import status as status_lib
from google.protobuf import message
from ml_metadata.proto import metadata_store_pb2
FLAGS = flags.FLAGS
def _execute_nodes(handle: metadata.Metadata, pipeline: pipeline_pb2.Pipeline,
version: int):
"""Creates fake execution of nodes."""
for node in pstate.get_all_nodes(pipeline):
if node.node_info.id == 'my_example_gen':
test_utils.fake_example_gen_run_with_handle(handle, node, 1, version)
else:
test_utils.fake_component_output_with_handle(handle, node, active=False)
pipeline_state = test_utils.get_or_create_pipeline_state(handle, pipeline)
with pipeline_state:
with pipeline_state.node_state_update_context(
task_lib.NodeUid.from_node(pipeline, node)
) as node_state:
node_state.update(
pstate.NodeState.COMPLETE,
status_lib.Status(code=status_lib.Code.OK, message='all ok'),
)
def create_sample_pipeline(m: metadata.Metadata,
pipeline_id: str,
run_num: int,
export_ir_path: str = '',
external_ir_file: str = '',
deployment_config: Optional[message.Message] = None,
execute_nodes_func: Callable[
[metadata.Metadata, pipeline_pb2.Pipeline, int],
None] = _execute_nodes):
"""Creates a list of pipeline and node execution."""
ir_path = _get_ir_path(external_ir_file)
for i in range(run_num):
run_id = 'run%02d' % i
pipeline = _test_pipeline(ir_path, pipeline_id, run_id, deployment_config)
if export_ir_path:
output_path = os.path.join(export_ir_path,
'%s_%s.pbtxt' % (pipeline_id, run_id))
io_utils.write_pbtxt_file(output_path, pipeline)
pipeline_state = pipeline_ops.initiate_pipeline_start(m, pipeline)
if not external_ir_file:
execute_nodes_func(m, pipeline, i)
if i < run_num - 1:
with pipeline_state:
pipeline_state.set_pipeline_execution_state(
metadata_store_pb2.Execution.COMPLETE)
def main_factory(mlmd_connection_func: Callable[[str], metadata.Metadata],
execute_nodes_func: Callable[
[metadata.Metadata, pipeline_pb2.Pipeline, int],
None] = _execute_nodes):
def main(argv):
del argv
with mlmd_connection_func(FLAGS.path) as m:
depl_config = pipeline_pb2.IntermediateDeploymentConfig()
executor_spec = pipeline_pb2.ExecutorSpec.PythonClassExecutorSpec(
class_path='fake.ClassPath')
depl_config.executor_specs['arg1'].Pack(executor_spec)
depl_config.executor_specs['arg2'].Pack(executor_spec)
create_sample_pipeline(m, FLAGS.pipeline_id, FLAGS.pipeline_run_num,
FLAGS.export_ir_dir, FLAGS.ir_file, depl_config,
execute_nodes_func)
return main | null |
166,280 | import collections
import contextlib
import copy
import dataclasses
import datetime
import functools
import itertools
import os
import random
import threading
import time
from typing import Callable, Dict, List, Mapping, Optional, Sequence
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.dsl.io import filesystem
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import async_pipeline_task_gen
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import sync_pipeline_task_gen
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core.task_schedulers import manual_task_scheduler
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable import partial_run_utils
from tfx.orchestration.portable.mlmd import artifact_lib
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import io_utils
from tfx.utils import status as status_lib
from ml_metadata import errors as mlmd_errors
from ml_metadata.proto import metadata_store_pb2
_PIPELINE_OPS_LOCK = threading.RLock()
The provided code snippet includes necessary dependencies for implementing the `_pipeline_op` function. Write a Python function `def _pipeline_op(lock: bool = True)` to solve the following problem:
Decorator factory for pipeline ops.
Here is the function:
def _pipeline_op(lock: bool = True):
"""Decorator factory for pipeline ops."""
def _decorator(fn):
"""Decorator for pipeline ops."""
@functools.wraps(fn)
def _wrapper(*args, **kwargs):
with contextlib.ExitStack() as stack:
if lock:
stack.enter_context(_PIPELINE_OPS_LOCK)
health_status = env.get_env().health_status()
if health_status.code != status_lib.Code.OK:
raise status_lib.StatusNotOkError(
code=health_status.code,
message=(
'Operation cannot be completed because the Orchestrator is'
f' unhealthy. Error: {health_status.message}'
),
)
try:
return fn(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
logging.exception('Error raised by `%s`:', fn.__name__)
if isinstance(e, status_lib.StatusNotOkError):
raise
raise status_lib.StatusNotOkError(
code=status_lib.Code.UNKNOWN,
message=f'`{fn.__name__}` error: {str(e)}',
) from e
return _wrapper
return _decorator | Decorator factory for pipeline ops. |
166,281 | import collections
import contextlib
import copy
import dataclasses
import datetime
import functools
import itertools
import os
import random
import threading
import time
from typing import Callable, Dict, List, Mapping, Optional, Sequence
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.dsl.io import filesystem
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import async_pipeline_task_gen
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import sync_pipeline_task_gen
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core.task_schedulers import manual_task_scheduler
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable import partial_run_utils
from tfx.orchestration.portable.mlmd import artifact_lib
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import io_utils
from tfx.utils import status as status_lib
from ml_metadata import errors as mlmd_errors
from ml_metadata.proto import metadata_store_pb2
def stop_pipelines(
mlmd_handle: metadata.Metadata,
pipeline_uids: List[task_lib.PipelineUid],
timeout_secs: Optional[float] = None,
ignore_non_existent_or_inactive: Optional[bool] = False,
) -> None:
"""Stops multiple pipelines.
Initiates pipeline stop operations and waits for the pipeline executions to be
gracefully stopped in the orchestration loop.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline_uids: UIDs of the pipeline to be stopped.
timeout_secs: Amount of time in seconds total to wait for all pipelines to
stop. If `None`, waits indefinitely.
ignore_non_existent_or_inactive: If a pipeline is not found or inactive,
skips it. This is useful if pipeline uids contain nested pipelines.
Stopping outer pipeline automatically stops inner pipelines, hence we may
need to skip inner pipelines here.
Raises:
status_lib.StatusNotOkError: Failure to initiate pipeline stop.
"""
pipeline_ids_str = ', '.join([x.pipeline_id for x in pipeline_uids])
pipeline_states = []
logging.info(
'Received request to stop pipelines; pipeline ids: %s', pipeline_ids_str
)
with _PIPELINE_OPS_LOCK:
for pipeline_uid in pipeline_uids:
try:
with pstate.PipelineState.load(
mlmd_handle, pipeline_uid
) as pipeline_state:
pipeline_state.initiate_stop(
status_lib.Status(
code=status_lib.Code.CANCELLED,
message='Cancellation requested by client.',
)
)
pipeline_states.append(pipeline_state)
except status_lib.StatusNotOkError as e:
if (
e.code == status_lib.Code.NOT_FOUND
and ignore_non_existent_or_inactive
):
logging.info(
'Ignored non-existent or inactive pipeline %s.', pipeline_uid
)
continue
raise e
logging.info(
'Waiting for pipelines to be stopped; pipeline ids: %s', pipeline_ids_str
)
def _are_pipelines_inactivated() -> bool:
for pipeline_state in pipeline_states:
with pipeline_state:
if pipeline_state.is_active():
return False
return True
_wait_for_predicate(
_are_pipelines_inactivated,
'inactivation of pipelines',
_IN_MEMORY_PREDICATE_FN_DEFAULT_POLLING_INTERVAL_SECS,
timeout_secs,
)
logging.info(
'Done waiting for pipelines to be stopped; pipeline ids: %s',
pipeline_ids_str,
)
The provided code snippet includes necessary dependencies for implementing the `stop_pipeline` function. Write a Python function `def stop_pipeline( mlmd_handle: metadata.Metadata, pipeline_uid: task_lib.PipelineUid, timeout_secs: Optional[float] = None, ) -> None` to solve the following problem:
Stops a single pipeline. Convenience wrapper around stop_pipelines.
Here is the function:
def stop_pipeline(
mlmd_handle: metadata.Metadata,
pipeline_uid: task_lib.PipelineUid,
timeout_secs: Optional[float] = None,
) -> None:
"""Stops a single pipeline. Convenience wrapper around stop_pipelines."""
return stop_pipelines(
mlmd_handle=mlmd_handle,
pipeline_uids=[pipeline_uid],
timeout_secs=timeout_secs,
) | Stops a single pipeline. Convenience wrapper around stop_pipelines. |
166,282 | import collections
import contextlib
import copy
import dataclasses
import datetime
import functools
import itertools
import os
import random
import threading
import time
from typing import Callable, Dict, List, Mapping, Optional, Sequence
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.dsl.io import filesystem
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import async_pipeline_task_gen
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import sync_pipeline_task_gen
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core.task_schedulers import manual_task_scheduler
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable import partial_run_utils
from tfx.orchestration.portable.mlmd import artifact_lib
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import io_utils
from tfx.utils import status as status_lib
from ml_metadata import errors as mlmd_errors
from ml_metadata.proto import metadata_store_pb2
)
The provided code snippet includes necessary dependencies for implementing the `initiate_node_start` function. Write a Python function `def initiate_node_start( mlmd_handle: metadata.Metadata, node_uid: task_lib.NodeUid ) -> pstate.PipelineState` to solve the following problem:
Initiates a node start operation for a pipeline node. Args: mlmd_handle: A handle to the MLMD db. node_uid: Uid of the node to be started. Returns: The `PipelineState` object upon success. Raises: status_lib.StatusNotOkError: Failure to initiate node start operation.
Here is the function:
def initiate_node_start(
mlmd_handle: metadata.Metadata, node_uid: task_lib.NodeUid
) -> pstate.PipelineState:
"""Initiates a node start operation for a pipeline node.
Args:
mlmd_handle: A handle to the MLMD db.
node_uid: Uid of the node to be started.
Returns:
The `PipelineState` object upon success.
Raises:
status_lib.StatusNotOkError: Failure to initiate node start operation.
"""
logging.info('Received request to start node; node uid: %s', node_uid)
with pstate.PipelineState.load(
mlmd_handle, node_uid.pipeline_uid
) as pipeline_state:
with pipeline_state.node_state_update_context(node_uid) as node_state:
if node_state.is_startable():
node_state.update(pstate.NodeState.STARTED)
return pipeline_state | Initiates a node start operation for a pipeline node. Args: mlmd_handle: A handle to the MLMD db. node_uid: Uid of the node to be started. Returns: The `PipelineState` object upon success. Raises: status_lib.StatusNotOkError: Failure to initiate node start operation. |
166,283 | import collections
import contextlib
import copy
import dataclasses
import datetime
import functools
import itertools
import os
import random
import threading
import time
from typing import Callable, Dict, List, Mapping, Optional, Sequence
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.dsl.io import filesystem
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import async_pipeline_task_gen
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import sync_pipeline_task_gen
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core.task_schedulers import manual_task_scheduler
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable import partial_run_utils
from tfx.orchestration.portable.mlmd import artifact_lib
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import io_utils
from tfx.utils import status as status_lib
from ml_metadata import errors as mlmd_errors
from ml_metadata.proto import metadata_store_pb2
)
The provided code snippet includes necessary dependencies for implementing the `initiate_node_backfill` function. Write a Python function `def initiate_node_backfill( mlmd_handle: metadata.Metadata, node_uid: task_lib.NodeUid ) -> None` to solve the following problem:
Initiates a node backfill operation for a pipeline node. Only works on ASYNC pipelines. Doesn't work on nodes within subpipelines. Args: mlmd_handle: A handle to the MLMD db. node_uid: Uid of the node to be backfilled. Returns: The `PipelineState` object upon success. Raises: status_lib.StatusNotOkError: Failure to initiate node backfill operation.
Here is the function:
def initiate_node_backfill(
mlmd_handle: metadata.Metadata, node_uid: task_lib.NodeUid
) -> None:
"""Initiates a node backfill operation for a pipeline node.
Only works on ASYNC pipelines. Doesn't work on nodes within subpipelines.
Args:
mlmd_handle: A handle to the MLMD db.
node_uid: Uid of the node to be backfilled.
Returns:
The `PipelineState` object upon success.
Raises:
status_lib.StatusNotOkError: Failure to initiate node backfill operation.
"""
logging.info('Received request to backfill node; node uid: %s', node_uid)
with pstate.PipelineState.load(
mlmd_handle, node_uid.pipeline_uid
) as pipeline_state:
if pipeline_state.pipeline.execution_mode != pipeline_pb2.Pipeline.ASYNC:
raise status_lib.StatusNotOkError(
code=status_lib.Code.INVALID_ARGUMENT,
message=(
'Can only backfill nodes in an ASYNC pipeline, but pipeline '
f'{node_uid.pipeline_uid.pipeline_id} is not ASYNC'
),
)
with pipeline_state.node_state_update_context(node_uid) as node_state:
if node_state.backfill_token:
raise status_lib.StatusNotOkError(
code=status_lib.Code.INVALID_ARGUMENT,
message=(
f'Node {node_uid} is already in backfill mode with token '
f'{node_state.backfill_token}. If you want to abort the '
'backfill and start a new one, stop the node first.'
),
)
if node_state.is_backfillable():
# Generate a unique backfill token for this request.
backfill_token = 'backfill-%s-%06s' % (
datetime.datetime.now().strftime('%Y%m%d-%H%M%S'),
random.randint(0, 999999),
)
node_state.update(
pstate.NodeState.STARTED, backfill_token=backfill_token
)
else:
raise status_lib.StatusNotOkError(
code=status_lib.Code.INVALID_ARGUMENT,
message=(
'Can only backfill nodes in a stopped or failed state, '
f'but node {node_uid} was in state {node_state.state}. '
'Try stopping the node first.'
),
) | Initiates a node backfill operation for a pipeline node. Only works on ASYNC pipelines. Doesn't work on nodes within subpipelines. Args: mlmd_handle: A handle to the MLMD db. node_uid: Uid of the node to be backfilled. Returns: The `PipelineState` object upon success. Raises: status_lib.StatusNotOkError: Failure to initiate node backfill operation. |
166,284 | import collections
import contextlib
import copy
import dataclasses
import datetime
import functools
import itertools
import os
import random
import threading
import time
from typing import Callable, Dict, List, Mapping, Optional, Sequence
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.dsl.io import filesystem
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import async_pipeline_task_gen
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import sync_pipeline_task_gen
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core.task_schedulers import manual_task_scheduler
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable import partial_run_utils
from tfx.orchestration.portable.mlmd import artifact_lib
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import io_utils
from tfx.utils import status as status_lib
from ml_metadata import errors as mlmd_errors
from ml_metadata.proto import metadata_store_pb2
_PIPELINE_OPS_LOCK = threading.RLock()
def _check_nodes_exist(
node_uids: Sequence[task_lib.NodeUid],
pipeline: pipeline_pb2.Pipeline,
op_name: str,
) -> None:
"""Raises an error if node_uid does not exist in the pipeline."""
node_id_set = set(n.node_id for n in node_uids)
nodes = pstate.get_all_nodes(pipeline)
filtered_nodes = [n for n in nodes if n.node_info.id in node_id_set]
if len(filtered_nodes) != len(node_id_set):
raise status_lib.StatusNotOkError(
code=status_lib.Code.INVALID_ARGUMENT,
message=(
f'`f{op_name}` operation failed, cannot find node(s) '
f'{", ".join(node_id_set)} in the pipeline IR.'
),
)
def _wait_for_node_inactivation(
pipeline_state: pstate.PipelineState,
node_uid: task_lib.NodeUid,
timeout_secs: Optional[float],
) -> None:
"""Waits for the given node to become inactive.
Args:
pipeline_state: Pipeline state.
node_uid: Uid of the node whose inactivation is awaited.
timeout_secs: Amount of time in seconds to wait. If `None`, waits
indefinitely.
Raises:
StatusNotOkError: With error code `DEADLINE_EXCEEDED` if node is not
inactive after waiting approx. `timeout_secs`.
"""
def _is_inactivated() -> bool:
with pipeline_state:
node_state = pipeline_state.get_node_state(node_uid)
return node_state.state in (
pstate.NodeState.COMPLETE,
pstate.NodeState.FAILED,
pstate.NodeState.SKIPPED,
pstate.NodeState.STOPPED,
)
_wait_for_predicate(
_is_inactivated,
'node inactivation',
_IN_MEMORY_PREDICATE_FN_DEFAULT_POLLING_INTERVAL_SECS,
timeout_secs,
)
)
The provided code snippet includes necessary dependencies for implementing the `stop_node` function. Write a Python function `def stop_node( mlmd_handle: metadata.Metadata, node_uid: task_lib.NodeUid, timeout_secs: Optional[float] = None, ) -> None` to solve the following problem:
Stops a node. Initiates a node stop operation and waits for the node execution to become inactive. Args: mlmd_handle: A handle to the MLMD db. node_uid: Uid of the node to be stopped. timeout_secs: Amount of time in seconds to wait for node to stop. If `None`, waits indefinitely. Raises: status_lib.StatusNotOkError: Failure to stop the node.
Here is the function:
def stop_node(
mlmd_handle: metadata.Metadata,
node_uid: task_lib.NodeUid,
timeout_secs: Optional[float] = None,
) -> None:
"""Stops a node.
Initiates a node stop operation and waits for the node execution to become
inactive.
Args:
mlmd_handle: A handle to the MLMD db.
node_uid: Uid of the node to be stopped.
timeout_secs: Amount of time in seconds to wait for node to stop. If `None`,
waits indefinitely.
Raises:
status_lib.StatusNotOkError: Failure to stop the node.
"""
logging.info('Received request to stop node; node uid: %s', node_uid)
with _PIPELINE_OPS_LOCK:
with pstate.PipelineState.load(
mlmd_handle, node_uid.pipeline_uid
) as pipeline_state:
_check_nodes_exist([node_uid], pipeline_state.pipeline, 'stop_node')
with pipeline_state.node_state_update_context(node_uid) as node_state:
if node_state.is_stoppable():
node_state.update(
pstate.NodeState.STOPPING,
status_lib.Status(
code=status_lib.Code.CANCELLED,
message='Cancellation requested by client.',
),
)
# Wait until the node is stopped or time out.
_wait_for_node_inactivation(
pipeline_state, node_uid, timeout_secs=timeout_secs
) | Stops a node. Initiates a node stop operation and waits for the node execution to become inactive. Args: mlmd_handle: A handle to the MLMD db. node_uid: Uid of the node to be stopped. timeout_secs: Amount of time in seconds to wait for node to stop. If `None`, waits indefinitely. Raises: status_lib.StatusNotOkError: Failure to stop the node. |
166,285 | import collections
import contextlib
import copy
import dataclasses
import datetime
import functools
import itertools
import os
import random
import threading
import time
from typing import Callable, Dict, List, Mapping, Optional, Sequence
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.dsl.io import filesystem
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import async_pipeline_task_gen
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import sync_pipeline_task_gen
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core.task_schedulers import manual_task_scheduler
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable import partial_run_utils
from tfx.orchestration.portable.mlmd import artifact_lib
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import io_utils
from tfx.utils import status as status_lib
from ml_metadata import errors as mlmd_errors
from ml_metadata.proto import metadata_store_pb2
)
The provided code snippet includes necessary dependencies for implementing the `resume_manual_node` function. Write a Python function `def resume_manual_node( mlmd_handle: metadata.Metadata, node_uid: task_lib.NodeUid ) -> None` to solve the following problem:
Resumes a manual node. Args: mlmd_handle: A handle to the MLMD db. node_uid: Uid of the manual node to be resumed. Raises: status_lib.StatusNotOkError: Failure to resume a manual node.
Here is the function:
def resume_manual_node(
mlmd_handle: metadata.Metadata, node_uid: task_lib.NodeUid
) -> None:
"""Resumes a manual node.
Args:
mlmd_handle: A handle to the MLMD db.
node_uid: Uid of the manual node to be resumed.
Raises:
status_lib.StatusNotOkError: Failure to resume a manual node.
"""
logging.info('Received request to resume manual node; node uid: %s', node_uid)
with pstate.PipelineState.load(
mlmd_handle, node_uid.pipeline_uid
) as pipeline_state:
nodes = pstate.get_all_nodes(pipeline_state.pipeline)
filtered_nodes = [n for n in nodes if n.node_info.id == node_uid.node_id]
if len(filtered_nodes) != 1:
raise status_lib.StatusNotOkError(
code=status_lib.Code.NOT_FOUND,
message=f'Unable to find manual node to resume: {node_uid}',
)
node = filtered_nodes[0]
node_type = node.node_info.type.name
if node_type != constants.MANUAL_NODE_TYPE:
raise status_lib.StatusNotOkError(
code=status_lib.Code.INVALID_ARGUMENT,
message=(
'Unable to resume a non-manual node. '
f'Got non-manual node id: {node_uid}'
),
)
executions = task_gen_utils.get_executions(mlmd_handle, node)
active_executions = [
e for e in executions if execution_lib.is_execution_active(e)
]
if not active_executions:
raise status_lib.StatusNotOkError(
code=status_lib.Code.NOT_FOUND,
message=f'Unable to find active manual node to resume: {node_uid}',
)
if len(active_executions) > 1:
raise status_lib.StatusNotOkError(
code=status_lib.Code.INTERNAL,
message=(
f'Unexpected multiple active executions for manual node: {node_uid}'
),
)
with mlmd_state.mlmd_execution_atomic_op(
mlmd_handle=mlmd_handle, execution_id=active_executions[0].id
) as execution:
completed_state = manual_task_scheduler.ManualNodeState(
state=manual_task_scheduler.ManualNodeState.COMPLETED
)
completed_state.set_mlmd_value(
execution.custom_properties.get_or_create(
manual_task_scheduler.NODE_STATE_PROPERTY_KEY
)
) | Resumes a manual node. Args: mlmd_handle: A handle to the MLMD db. node_uid: Uid of the manual node to be resumed. Raises: status_lib.StatusNotOkError: Failure to resume a manual node. |
166,286 | import collections
import contextlib
import copy
import dataclasses
import datetime
import functools
import itertools
import os
import random
import threading
import time
from typing import Callable, Dict, List, Mapping, Optional, Sequence
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.dsl.io import filesystem
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import async_pipeline_task_gen
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import sync_pipeline_task_gen
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core.task_schedulers import manual_task_scheduler
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable import partial_run_utils
from tfx.orchestration.portable.mlmd import artifact_lib
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import io_utils
from tfx.utils import status as status_lib
from ml_metadata import errors as mlmd_errors
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `delete_pipeline_run` function. Write a Python function `def delete_pipeline_run( mlmd_handle: metadata.Metadata, pipeline_id: str, pipeline_run_id: str ) -> None` to solve the following problem:
Deletes a pipeline run. Mark the pipeline run execution custom_priority['deleted'] to true and pipeline run output artifacts as DELETED. Args: mlmd_handle: A handle to the MLMD db. pipeline_id: id of the pipeline which has the pipeline run. pipeline_run_id: id of the pipeline run will be deleted. Raises: status_lib.StatusNotOkError: Failure to delete a pipeline run.
Here is the function:
def delete_pipeline_run(
mlmd_handle: metadata.Metadata, pipeline_id: str, pipeline_run_id: str
) -> None:
"""Deletes a pipeline run.
Mark the pipeline run execution custom_priority['deleted'] to true and
pipeline run output artifacts as DELETED.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline_id: id of the pipeline which has the pipeline run.
pipeline_run_id: id of the pipeline run will be deleted.
Raises:
status_lib.StatusNotOkError: Failure to delete a pipeline run.
"""
try:
pipeline_view = pstate.PipelineView.load(
mlmd_handle, pipeline_id, pipeline_run_id
)
if (
pipeline_view.pipeline_execution_mode
== pipeline_pb2.Pipeline.ExecutionMode.ASYNC
):
raise status_lib.StatusNotOkError(
code=status_lib.Code.FAILED_PRECONDITION,
message='delete pipeline run does not support ASYNC pipeline',
)
if (
pipeline_view.execution.last_known_state
== mlmd_state.metadata_store_pb2.Execution.State.RUNNING
):
raise status_lib.StatusNotOkError(
code=status_lib.Code.FAILED_PRECONDITION,
message=(
"Tflex doesn't allow deleting the active running pipeline run,"
' please stop the pipeline run first.'
),
)
# mark executions as deleted using atomic op to avoid race condition.
with mlmd_state.mlmd_execution_atomic_op(
mlmd_handle=mlmd_handle,
execution_id=pipeline_view.execution.id,
) as execution:
if not execution:
raise status_lib.StatusNotOkError(
code=status_lib.Code.NOT_FOUND,
message=(
'Execution with given execution_id not found: '
f'{pipeline_view.execution.id}'
),
)
execution.custom_properties['deleted'].CopyFrom(
mlmd_state.metadata_store_pb2.Value(bool_value=True)
)
# TODO(fangyuancai):consider using atomic operation when modify artifacts.
artifacts = []
artifacts_dict = pstate.get_all_node_artifacts(
pipeline_view.pipeline, mlmd_handle
)
for _, node_artifacts in artifacts_dict.items():
for _, execution_artifacts in node_artifacts.items():
for _, artifact_list in execution_artifacts.items():
artifacts.extend(artifact_list)
for artifact in artifacts:
artifact.state = mlmd_state.metadata_store_pb2.Artifact.State.DELETED
try:
io_utils.delete_dir(artifact.uri)
except Exception: # pylint: disable=broad-exception-caught
logging.warning(
"The artifact's uri is not a directory. We will mark it as"
' DELETED in MLMD but keep the path'
)
mlmd_handle.store.put_artifacts(artifacts)
except LookupError as e:
raise status_lib.StatusNotOkError(
code=status_lib.Code.NOT_FOUND, message=str(e)
) | Deletes a pipeline run. Mark the pipeline run execution custom_priority['deleted'] to true and pipeline run output artifacts as DELETED. Args: mlmd_handle: A handle to the MLMD db. pipeline_id: id of the pipeline which has the pipeline run. pipeline_run_id: id of the pipeline run will be deleted. Raises: status_lib.StatusNotOkError: Failure to delete a pipeline run. |
166,287 | import collections
import contextlib
import copy
import dataclasses
import datetime
import functools
import itertools
import os
import random
import threading
import time
from typing import Callable, Dict, List, Mapping, Optional, Sequence
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.dsl.io import filesystem
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import async_pipeline_task_gen
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import sync_pipeline_task_gen
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core.task_schedulers import manual_task_scheduler
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable import partial_run_utils
from tfx.orchestration.portable.mlmd import artifact_lib
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import io_utils
from tfx.utils import status as status_lib
from ml_metadata import errors as mlmd_errors
from ml_metadata.proto import metadata_store_pb2
_IN_MEMORY_PREDICATE_FN_DEFAULT_POLLING_INTERVAL_SECS = 1.0
def _initiate_pipeline_update(
mlmd_handle: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline,
update_options: pipeline_pb2.UpdateOptions,
) -> pstate.PipelineState:
"""Initiates pipeline update."""
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
with pstate.PipelineState.load(mlmd_handle, pipeline_uid) as pipeline_state:
pipeline_state.initiate_update(pipeline, update_options)
return pipeline_state
def _wait_for_predicate(
predicate_fn: Callable[[], bool],
waiting_for_desc: str,
polling_interval_secs: float,
timeout_secs: Optional[float],
) -> None:
"""Waits for `predicate_fn` to return `True` or until timeout seconds elapse."""
if timeout_secs is None:
while not predicate_fn():
logging.info(
'Sleeping %f sec(s) waiting for predicate: %s',
polling_interval_secs,
waiting_for_desc,
)
time.sleep(polling_interval_secs)
return
polling_interval_secs = min(polling_interval_secs, timeout_secs / 4)
end_time = time.time() + timeout_secs
while end_time - time.time() > 0:
if predicate_fn():
return
sleep_secs = max(0, min(polling_interval_secs, end_time - time.time()))
logging.info(
'Sleeping %f sec(s) waiting for predicate: %s',
sleep_secs,
waiting_for_desc,
)
time.sleep(sleep_secs)
raise status_lib.StatusNotOkError(
code=status_lib.Code.DEADLINE_EXCEEDED,
message=(
f'Timed out ({timeout_secs} secs) waiting for {waiting_for_desc}.'
),
)
)
The provided code snippet includes necessary dependencies for implementing the `update_pipeline` function. Write a Python function `def update_pipeline( mlmd_handle: metadata.Metadata, pipeline: pipeline_pb2.Pipeline, update_options: pipeline_pb2.UpdateOptions, timeout_secs: Optional[float] = None, ) -> None` to solve the following problem:
Updates an active pipeline with a new pipeline IR. Initiates a pipeline update operation and waits for it to finish. Args: mlmd_handle: A handle to the MLMD db. pipeline: New pipeline IR to be applied. update_options: Selection of active nodes to be reloaded upon update. timeout_secs: Timeout in seconds to wait for the update to finish. If `None`, waits indefinitely. Raises: status_lib.StatusNotOkError: Failure to update the pipeline.
Here is the function:
def update_pipeline(
mlmd_handle: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline,
update_options: pipeline_pb2.UpdateOptions,
timeout_secs: Optional[float] = None,
) -> None:
"""Updates an active pipeline with a new pipeline IR.
Initiates a pipeline update operation and waits for it to finish.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline: New pipeline IR to be applied.
update_options: Selection of active nodes to be reloaded upon update.
timeout_secs: Timeout in seconds to wait for the update to finish. If
`None`, waits indefinitely.
Raises:
status_lib.StatusNotOkError: Failure to update the pipeline.
"""
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
logging.info(
'Received request to update pipeline; pipeline uid: %s', pipeline_uid
)
pipeline_state = _initiate_pipeline_update(
mlmd_handle, pipeline, update_options
)
def _is_update_applied() -> bool:
with pipeline_state:
if pipeline_state.is_active():
return not pipeline_state.is_update_initiated()
# If the pipeline is no longer active, whether or not the update is
# applied is irrelevant.
return True
logging.info('Waiting for pipeline update; pipeline uid: %s', pipeline_uid)
_wait_for_predicate(
_is_update_applied,
'pipeline update',
_IN_MEMORY_PREDICATE_FN_DEFAULT_POLLING_INTERVAL_SECS,
timeout_secs,
)
logging.info(
'Done waiting for pipeline update; pipeline uid: %s', pipeline_uid
) | Updates an active pipeline with a new pipeline IR. Initiates a pipeline update operation and waits for it to finish. Args: mlmd_handle: A handle to the MLMD db. pipeline: New pipeline IR to be applied. update_options: Selection of active nodes to be reloaded upon update. timeout_secs: Timeout in seconds to wait for the update to finish. If `None`, waits indefinitely. Raises: status_lib.StatusNotOkError: Failure to update the pipeline. |
166,288 | import collections
import contextlib
import copy
import dataclasses
import datetime
import functools
import itertools
import os
import random
import threading
import time
from typing import Callable, Dict, List, Mapping, Optional, Sequence
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.dsl.io import filesystem
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import async_pipeline_task_gen
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import sync_pipeline_task_gen
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core.task_schedulers import manual_task_scheduler
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable import partial_run_utils
from tfx.orchestration.portable.mlmd import artifact_lib
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import io_utils
from tfx.utils import status as status_lib
from ml_metadata import errors as mlmd_errors
from ml_metadata.proto import metadata_store_pb2
def skip_nodes(
mlmd_handle: metadata.Metadata, node_uids: Sequence[task_lib.NodeUid]
) -> None:
"""Marks node executions to be skipped."""
# All node_uids must have the same pipeline_uid.
pipeline_uids_set = set(n.pipeline_uid for n in node_uids)
if len(pipeline_uids_set) != 1:
raise status_lib.StatusNotOkError(
code=status_lib.Code.INVALID_ARGUMENT,
message='Can skip nodes of a single pipeline at once.',
)
pipeline_uid = pipeline_uids_set.pop()
with pstate.PipelineState.load(mlmd_handle, pipeline_uid) as pipeline_state:
_check_nodes_exist(node_uids, pipeline_state.pipeline, 'skip_nodes')
for node_uid in node_uids:
with pipeline_state.node_state_update_context(node_uid) as node_state:
if node_state.state == pstate.NodeState.SKIPPED:
continue
elif node_state.is_programmatically_skippable():
node_state.update(
pstate.NodeState.SKIPPED,
status_lib.Status(
code=status_lib.Code.OK,
message='Node skipped by client request.',
),
)
else:
raise status_lib.StatusNotOkError(
code=status_lib.Code.FAILED_PRECONDITION,
message=(
f'Node in state {node_state.state} is not programmatically'
' skippable.'
),
)
def _get_previously_skipped_nodes(
reused_pipeline_view: Optional[pstate.PipelineView],
) -> List[str]:
"""Returns id of nodes skipped in previous pipeline run due to conditional."""
reused_pipeline_node_states = (
reused_pipeline_view.get_node_states_dict()
if reused_pipeline_view
else dict()
)
reused_pipeline_previous_node_states = (
reused_pipeline_view.get_previous_node_states_dict()
if reused_pipeline_view
else dict()
)
skipped_nodes = []
for node_id, node_state in itertools.chain(
reused_pipeline_node_states.items(),
reused_pipeline_previous_node_states.items(),
):
if node_state.state == pstate.NodeState.SKIPPED:
skipped_nodes.append(node_id)
return skipped_nodes
def _load_reused_pipeline_view(
mlmd_handle: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline,
snapshot_settings: pipeline_pb2.SnapshotSettings,
) -> Optional[pstate.PipelineView]:
"""Loads pipeline view of the pipeline reused for partial pipeline run."""
base_run_id = None
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
if snapshot_settings.HasField('base_pipeline_run_strategy'):
base_run_id = snapshot_settings.base_pipeline_run_strategy.base_run_id
try:
reused_pipeline_view = pstate.PipelineView.load(
mlmd_handle=mlmd_handle,
pipeline_id=pipeline_uid.pipeline_id,
pipeline_run_id=base_run_id,
# If current pipeline run is allowed and base_run_id is not specified,
# reuse the most recent completed run.
non_active_only=env.get_env().concurrent_pipeline_runs_enabled(),
)
except status_lib.StatusNotOkError as e:
if e.code == status_lib.Code.NOT_FOUND:
# A previous pipeline run is not strictly required, since users are
# allowed to start a partial run without reusing any nodes. Returns None
# to delay the error handling to caller function.
logging.info(e.message)
return None
else:
raise
if reused_pipeline_view.pipeline.execution_mode != pipeline_pb2.Pipeline.SYNC:
raise status_lib.StatusNotOkError(
code=status_lib.Code.FAILED_PRECONDITION,
message=(
'Only SYNC pipeline execution modes supported; previous pipeline '
'run has execution mode: '
f'{reused_pipeline_view.pipeline.execution_mode}'
),
)
if execution_lib.is_execution_active(reused_pipeline_view.execution):
raise status_lib.StatusNotOkError(
code=status_lib.Code.FAILED_PRECONDITION,
message=(
'The previous pipeline run'
f' {reused_pipeline_view.pipeline_run_id} is still active.'
),
)
return reused_pipeline_view
The provided code snippet includes necessary dependencies for implementing the `resume_pipeline` function. Write a Python function `def resume_pipeline( mlmd_handle: metadata.Metadata, pipeline: pipeline_pb2.Pipeline, run_id: Optional[str] = None, ) -> pstate.PipelineState` to solve the following problem:
Resumes a pipeline run from previously failed nodes. Upon success, MLMD is updated to signal that the pipeline must be started. Args: mlmd_handle: A handle to the MLMD db. pipeline: IR of the pipeline to resume. run_id: the run_id of the pipeline run to resume. Returns: The `PipelineState` object upon success. Raises: status_lib.StatusNotOkError: Failure to resume pipeline. With code `ALREADY_EXISTS` if a pipeline is already running. With code `status_lib.Code.FAILED_PRECONDITION` if a previous pipeline run is not found for resuming. With code 'INVALID_ARGUMENT' if concurrent pipeline runs are enabled but pipeline run id is missing.
Here is the function:
def resume_pipeline(
mlmd_handle: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline,
run_id: Optional[str] = None,
) -> pstate.PipelineState:
"""Resumes a pipeline run from previously failed nodes.
Upon success, MLMD is updated to signal that the pipeline must be started.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline: IR of the pipeline to resume.
run_id: the run_id of the pipeline run to resume.
Returns:
The `PipelineState` object upon success.
Raises:
status_lib.StatusNotOkError: Failure to resume pipeline. With code
`ALREADY_EXISTS` if a pipeline is already running. With code
`status_lib.Code.FAILED_PRECONDITION` if a previous pipeline run
is not found for resuming. With code 'INVALID_ARGUMENT' if concurrent
pipeline runs are enabled but pipeline run id is missing.
"""
logging.info(
'Received request to resume pipeline; pipeline uid: %s',
task_lib.PipelineUid.from_pipeline(pipeline),
)
if pipeline.execution_mode != pipeline_pb2.Pipeline.SYNC:
raise status_lib.StatusNotOkError(
code=status_lib.Code.FAILED_PRECONDITION,
message=(
'Only SYNC pipeline execution modes supported; '
f'found pipeline with execution mode: {pipeline.execution_mode}'
),
)
if (
env.get_env().concurrent_pipeline_runs_enabled()
and not run_id
):
raise status_lib.StatusNotOkError(
code=status_lib.Code.INVALID_ARGUMENT,
message=(
'Pipeline Run ID of the old pipeline to resume must be '
'provided when concurrent pipeline runs are enabled.'
),
)
if run_id:
snapshot_settings = pipeline_pb2.SnapshotSettings()
partial_run_utils.set_base_pipeline_run_strategy(
snapshot_settings, run_id
)
else:
snapshot_settings = partial_run_utils.latest_pipeline_snapshot_settings()
latest_pipeline_view = _load_reused_pipeline_view(
mlmd_handle, pipeline, snapshot_settings
)
if not latest_pipeline_view:
raise status_lib.StatusNotOkError(
code=status_lib.Code.NOT_FOUND,
message='Pipeline failed to resume. No previous pipeline run found.',
)
# TODO(b/200206549): Remove once testing is complete
# Get succeeded nodes in latest pipeline run.
previously_succeeded_nodes = []
for node, node_state in latest_pipeline_view.get_node_states_dict().items():
if node_state.is_success():
previously_succeeded_nodes.append(node)
pipeline_nodes = [
node.node_info.id for node in pstate.get_all_nodes(pipeline)
]
# Mark nodes using partial pipeline run lib.
# Nodes marked as SKIPPED (due to conditional) do not have an execution
# registered in MLMD, so we skip their snapshotting step.
try:
pipeline = partial_run_utils.mark_pipeline(
pipeline,
from_nodes=pipeline_nodes,
to_nodes=pipeline_nodes,
skip_nodes=previously_succeeded_nodes,
skip_snapshot_nodes=_get_previously_skipped_nodes(
latest_pipeline_view
),
snapshot_settings=snapshot_settings,
)
except ValueError as e:
raise status_lib.StatusNotOkError(
code=status_lib.Code.INVALID_ARGUMENT, message=str(e)
)
if pipeline.runtime_spec.HasField('snapshot_settings'):
try:
partial_run_utils.snapshot(
mlmd_handle, pipeline, latest_pipeline_view.pipeline_run_id
)
except ValueError as e:
raise status_lib.StatusNotOkError(
code=status_lib.Code.INVALID_ARGUMENT, message=str(e)
)
except LookupError as e:
raise status_lib.StatusNotOkError(
code=status_lib.Code.FAILED_PRECONDITION, message=str(e)
)
return pstate.PipelineState.new(
mlmd_handle, pipeline, reused_pipeline_view=latest_pipeline_view
) | Resumes a pipeline run from previously failed nodes. Upon success, MLMD is updated to signal that the pipeline must be started. Args: mlmd_handle: A handle to the MLMD db. pipeline: IR of the pipeline to resume. run_id: the run_id of the pipeline run to resume. Returns: The `PipelineState` object upon success. Raises: status_lib.StatusNotOkError: Failure to resume pipeline. With code `ALREADY_EXISTS` if a pipeline is already running. With code `status_lib.Code.FAILED_PRECONDITION` if a previous pipeline run is not found for resuming. With code 'INVALID_ARGUMENT' if concurrent pipeline runs are enabled but pipeline run id is missing. |
166,289 | import collections
import contextlib
import copy
import dataclasses
import datetime
import functools
import itertools
import os
import random
import threading
import time
from typing import Callable, Dict, List, Mapping, Optional, Sequence
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.dsl.io import filesystem
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import async_pipeline_task_gen
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import sync_pipeline_task_gen
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core.task_schedulers import manual_task_scheduler
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable import partial_run_utils
from tfx.orchestration.portable.mlmd import artifact_lib
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import io_utils
from tfx.utils import status as status_lib
from ml_metadata import errors as mlmd_errors
from ml_metadata.proto import metadata_store_pb2
def _recursively_revive_pipelines(
mlmd_handle: metadata.Metadata,
pipeline_state: pstate.PipelineState,
) -> pstate.PipelineState:
"""Recursively revives all pipelines, resuing executions if present."""
with pipeline_state:
nodes = pstate.get_all_nodes(pipeline_state.pipeline)
node_by_name = {node.node_info.id: node for node in nodes}
# TODO(b/272015049): Add support for manager start nodes.
nodes_to_start = [
node_uid
for node_uid, state in pipeline_state.get_node_states_dict().items()
if state.is_startable()
]
logging.info(
'The following nodes will be attempted to be started: %s',
[node.node_id for node in nodes_to_start],
)
for node_uid in nodes_to_start:
new_node_state = pstate.NodeState.STARTED
node = node_by_name[node_uid.node_id]
# Subpipelines are represented in their parent pipeline as node,
# so to revive the full pipeline in place we need to peer into the
# subpipeline.
if isinstance(node, node_proto_view.ComposablePipelineProtoView):
subpipeline_base_run_id = (
node.raw_proto().runtime_spec.pipeline_run_id.field_value.string_value
)
logging.info(
'%s is a subpipeline, run_id: %s',
node.node_info.id,
subpipeline_base_run_id,
)
# Subpipeline run id's are structured like:
# ${SUBPIPELINE_ID}_${PARENT_PIPELINE_ID}_${SUBPIPELINE_EXECUTION_ID}
# So we need to determine the execution id for the pipeline so it can
# be revived. If there's no execution found then assume it hasn't been
# run so it can be marked as STARTED.
executions = task_gen_utils.get_executions(mlmd_handle, node)
latest_execution_set = task_gen_utils.get_latest_executions_set(
executions
)
logging.info(
'Executions for subpipeline %s: %s',
node.node_info.id,
[
f'{e.id}: state:'
f' {metadata_store_pb2.Execution.State.Name(e.last_known_state)}'
for e in latest_execution_set
],
)
if not latest_execution_set:
logging.info(
'No executions found for subpipeline %s, marking as STARTED.',
node.node_info.id,
)
new_node_state = pstate.NodeState.STARTED
elif all(
execution_lib.is_execution_successful(execution)
for execution in latest_execution_set
):
logging.info(
'All executions in subpipeline %s were SUCCESSFUL, will mark as'
' COMPLETE.',
node.node_info.id,
)
new_node_state = pstate.NodeState.COMPLETE
else:
# Mark all subpipeline executions as NEW, and the node state as
# RUNNING.
new_node_state = pstate.NodeState.RUNNING
non_successful_executions = [
e
for e in latest_execution_set
if not execution_lib.is_execution_successful(e)
]
for execution in non_successful_executions:
# TODO: b/324962451 - Consolidate all subpipeline run naming into a
# utility function.
new_run_id = f'{subpipeline_base_run_id}_{execution.id}'
# Potentially, a subpipeline execution can be CANCELLED but have
# never started, for instance if it's in the second iteration of
# ForEach. In this case we *do not* want to revive recursively, as
# there is no pipeline run started.
try:
subpipeline_state = pstate.PipelineState.load_run(
mlmd_handle, pipeline_id=node.node_info.id, run_id=new_run_id
)
except status_lib.StatusNotOkError:
logging.info(
'Failed to load run %s of pipeline %s. Assuming there is no'
' existing run.',
new_run_id,
node.node_info.id,
)
else:
_recursively_revive_pipelines(
mlmd_handle,
subpipeline_state,
)
# Mark the execution as NEW and the node state as RUNNING so we can
# re-use the existing execution during task generation.
with mlmd_state.mlmd_execution_atomic_op(
mlmd_handle, execution.id
) as execution:
logging.info(
'Execution for subpipeline %s: %s. Changing from state %s'
' to %s.',
node.node_info.id,
execution.id,
metadata_store_pb2.Execution.State.Name(
execution.last_known_state
),
metadata_store_pb2.Execution.State.Name(
metadata_store_pb2.Execution.State.NEW
),
)
execution.last_known_state = (
metadata_store_pb2.Execution.State.NEW
)
if execution.custom_properties.get(
constants.EXECUTION_ERROR_CODE_KEY
):
del execution.custom_properties[
constants.EXECUTION_ERROR_CODE_KEY
]
if execution.custom_properties.get(
constants.EXECUTION_ERROR_MSG_KEY
):
del execution.custom_properties[
constants.EXECUTION_ERROR_MSG_KEY
]
with pipeline_state.node_state_update_context(node_uid) as node_state:
node_state.update(new_node_state)
pipeline_state.initiate_resume()
new_pipeline_state = metadata_store_pb2.Execution.State.NEW
pipeline_state.set_pipeline_execution_state(new_pipeline_state)
return pipeline_state
)
The provided code snippet includes necessary dependencies for implementing the `revive_pipeline_run` function. Write a Python function `def revive_pipeline_run( mlmd_handle: metadata.Metadata, pipeline_id: str, pipeline_run_id: str, pipeline_to_update_with: Optional[pipeline_pb2.Pipeline] = None, ) -> pstate.PipelineState` to solve the following problem:
Revives a pipeline run from previously failed nodes. Args: mlmd_handle: A handle to the MLMD db. pipeline_id: The id (name) of the pipeline to resume. pipeline_run_id: the run_id of the pipeline run to resume. pipeline_to_update_with: Optionally an IR to update to for the revived run. Returns: The `PipelineState` object upon success. Raises: status_lib.StatusNotOkError: Failure to resume pipeline. With code `ALREADY_EXISTS` if a pipeline is already running. With code `status_lib.Code.FAILED_PRECONDITION` if a previous pipeline run is not found for resuming. With code 'INVALID_ARGUMENT' if trying to revive a pipeline run while there's another active run and concurrent runs are not enabled.
Here is the function:
def revive_pipeline_run(
mlmd_handle: metadata.Metadata,
pipeline_id: str,
pipeline_run_id: str,
pipeline_to_update_with: Optional[pipeline_pb2.Pipeline] = None,
) -> pstate.PipelineState:
"""Revives a pipeline run from previously failed nodes.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline_id: The id (name) of the pipeline to resume.
pipeline_run_id: the run_id of the pipeline run to resume.
pipeline_to_update_with: Optionally an IR to update to for the revived run.
Returns:
The `PipelineState` object upon success.
Raises:
status_lib.StatusNotOkError: Failure to resume pipeline. With code
`ALREADY_EXISTS` if a pipeline is already running. With code
`status_lib.Code.FAILED_PRECONDITION` if a previous pipeline run
is not found for resuming. With code 'INVALID_ARGUMENT' if trying to
revive a pipeline run while there's another active run and concurrent runs
are not enabled.
"""
logging.info(
'Received request to revive run %s of pipeline %s',
pipeline_run_id,
pipeline_id,
)
with pstate.PipelineState.load_run(
mlmd_handle, pipeline_id=pipeline_id, run_id=pipeline_run_id
) as pipeline_state:
pipeline = pipeline_state.pipeline
if pipeline.execution_mode != pipeline_pb2.Pipeline.SYNC:
raise status_lib.StatusNotOkError(
code=status_lib.Code.FAILED_PRECONDITION,
message=(
'Only SYNC pipeline execution modes supported; '
f'but pipeline had execution mode: {pipeline.execution_mode}'
),
)
if pipeline_state.is_active():
raise status_lib.StatusNotOkError(
code=status_lib.Code.ALREADY_EXISTS,
message='Cannot revive a live pipeline run.',
)
if not env.get_env().concurrent_pipeline_runs_enabled() and (
all_active := pstate.PipelineState.load_all_active(mlmd_handle)
):
raise status_lib.StatusNotOkError(
code=status_lib.Code.INVALID_ARGUMENT,
message=(
'Concurrent runs must be enabled to revive a pipeline run while'
' another run is active. Active runs: '
f'{[p.pipeline_run_id for p in all_active]}'
),
)
# Since the pipeline is not active we can apply the update right away.
if pipeline_to_update_with is not None:
logging.info('Trying to update during revive')
pipeline_state.initiate_update(
pipeline_to_update_with, pipeline_pb2.UpdateOptions()
)
logging.info('Initiated update')
pipeline_state.apply_pipeline_update()
logging.info('Applied update')
revived_pipeline_state = _recursively_revive_pipelines(
mlmd_handle, pipeline_state
)
return revived_pipeline_state | Revives a pipeline run from previously failed nodes. Args: mlmd_handle: A handle to the MLMD db. pipeline_id: The id (name) of the pipeline to resume. pipeline_run_id: the run_id of the pipeline run to resume. pipeline_to_update_with: Optionally an IR to update to for the revived run. Returns: The `PipelineState` object upon success. Raises: status_lib.StatusNotOkError: Failure to resume pipeline. With code `ALREADY_EXISTS` if a pipeline is already running. With code `status_lib.Code.FAILED_PRECONDITION` if a previous pipeline run is not found for resuming. With code 'INVALID_ARGUMENT' if trying to revive a pipeline run while there's another active run and concurrent runs are not enabled. |
166,290 | import collections
import contextlib
import copy
import dataclasses
import datetime
import functools
import itertools
import os
import random
import threading
import time
from typing import Callable, Dict, List, Mapping, Optional, Sequence
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.dsl.io import filesystem
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import async_pipeline_task_gen
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import sync_pipeline_task_gen
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core.task_schedulers import manual_task_scheduler
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable import partial_run_utils
from tfx.orchestration.portable.mlmd import artifact_lib
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import io_utils
from tfx.utils import status as status_lib
from ml_metadata import errors as mlmd_errors
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `filter_by_pipeline_uid` function. Write a Python function `def filter_by_pipeline_uid( pipeline_uid: task_lib.PipelineUid, ) -> Callable[[pstate.PipelineState], bool]` to solve the following problem:
Returns filter_fn for orchestrate for the given pipeline_uid.
Here is the function:
def filter_by_pipeline_uid(
pipeline_uid: task_lib.PipelineUid,
) -> Callable[[pstate.PipelineState], bool]:
"""Returns filter_fn for orchestrate for the given pipeline_uid."""
return lambda p: p.pipeline_uid == pipeline_uid | Returns filter_fn for orchestrate for the given pipeline_uid. |
166,291 | import collections
import contextlib
import copy
import dataclasses
import datetime
import functools
import itertools
import os
import random
import threading
import time
from typing import Callable, Dict, List, Mapping, Optional, Sequence
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.dsl.io import filesystem
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import async_pipeline_task_gen
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import sync_pipeline_task_gen
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core.task_schedulers import manual_task_scheduler
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable import partial_run_utils
from tfx.orchestration.portable.mlmd import artifact_lib
from tfx.orchestration.portable.mlmd import event_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import io_utils
from tfx.utils import status as status_lib
from ml_metadata import errors as mlmd_errors
from ml_metadata.proto import metadata_store_pb2
def _get_mlmd_protos_for_execution(
mlmd_handle: metadata.Metadata,
execution_id: int,
output_key: str,
) -> _MLMDProtos:
"""Gets MLMD protos associated with the execution ID and output key.
Args:
mlmd_handle: A handle to the MLMD database.
execution_id: The execution ID.
output_key: The output key.
Returns:
A _MLMDProtos struct with the MLMD protos for the reference artifact,
intermediate artifacts, artifact type, and execution.
"""
# Get the LineageGraph associated with the execution.
try:
lineage_graph = mlmd_handle.store.get_lineage_subgraph(
query_options=metadata_store_pb2.LineageSubgraphQueryOptions(
starting_executions=(
metadata_store_pb2.LineageSubgraphQueryOptions.StartingNodes(
filter_query=f'id = {execution_id}',
)
),
max_num_hops=1,
direction=metadata_store_pb2.LineageSubgraphQueryOptions.DOWNSTREAM,
),
field_mask_paths=[
'artifacts',
'events',
],
)
except mlmd_errors.StatusError as e:
raise status_lib.StatusNotOkError(code=e.error_code, message=str(e))
output_artifact_ids = set()
for event in lineage_graph.events:
# We check both OUTPUT and PENDING_OUTPUT state because the REFERENCE
# artifact will have event type PENDING_OUTPUT, but LIVE intermediate
# artifacts will have event type OUTPUT.
if event_lib.contains_key(event, output_key) and event.type in [
metadata_store_pb2.Event.PENDING_OUTPUT,
metadata_store_pb2.Event.OUTPUT,
]:
output_artifact_ids.add(event.artifact_id)
output_artifacts = [
a for a in lineage_graph.artifacts if a.id in output_artifact_ids
]
# Find the REFERENCE and LIVE artifacts in the subgraph.
reference_artifact = None
intermediate_artifacts = []
for artifact in output_artifacts:
if artifact.state == metadata_store_pb2.Artifact.State.REFERENCE:
if reference_artifact is not None:
raise status_lib.StatusNotOkError(
code=status_lib.Code.ALREADY_EXISTS,
message=(
'Found multiple REFERENCE Artifacts with output_key '
f'{output_key} for execution_id {execution_id}.'
),
)
reference_artifact = artifact
elif artifact.state == metadata_store_pb2.Artifact.State.LIVE:
intermediate_artifacts.append(artifact)
if reference_artifact is None:
raise status_lib.StatusNotOkError(
code=status_lib.Code.NOT_FOUND,
message=(
f'REFERENCE Artifact with output_key {output_key} for '
f'execution_id {execution_id} not found.'
),
)
return _MLMDProtos(
reference_artifact=reference_artifact,
intermediate_artifacts=intermediate_artifacts,
)
def _generate_reference_uri_subdir(
reference_artifact_uri: str,
) -> str:
"""Generates and returns the URI for the intermediate artifact."""
# TODO(b/285399450): Properly handle ValueArtifacts, which have a uri of
# a file, e.g. some/uri/value instead of a directory.
now = datetime.datetime.now(datetime.timezone.utc)
# The subdirectory will be intermediate_artifact_YYYYMMDD_HHMMSS_FFFFFF.
subdirectory = now.strftime(f'{constants.PREFIX}_%Y%m%d_%H%M%S_%f')
# Return the intermediate artifact URI.
return os.path.join(reference_artifact_uri, subdirectory)
The provided code snippet includes necessary dependencies for implementing the `publish_intermediate_artifact` function. Write a Python function `def publish_intermediate_artifact( mlmd_handle: metadata.Metadata, execution_id: int, output_key: str, properties: Optional[Dict[str, metadata_store_pb2.Value]], custom_properties: Optional[Dict[str, metadata_store_pb2.Value]], external_uri: Optional[str] = None, temp_uri: Optional[str] = None, ) -> metadata_store_pb2.Artifact` to solve the following problem:
Publishes an intermediate artifact. Args: mlmd_handle: A handle to the MLMD database. execution_id: The ID of the execution which generates the artifact. output_key: The output key of the artifact. properties: Properties of the artifact. custom_properties: Custom properties of the artifact. external_uri: The external URI provided by the user. Exactly one of external_uri and temp_uri must be set. temp_uri: Temp URI generated internally by Tflex. Exactly one of external_uri and temp_uri must be set. Returns: The published intermediate Artifact proto.
Here is the function:
def publish_intermediate_artifact(
mlmd_handle: metadata.Metadata,
execution_id: int,
output_key: str,
properties: Optional[Dict[str, metadata_store_pb2.Value]],
custom_properties: Optional[Dict[str, metadata_store_pb2.Value]],
external_uri: Optional[str] = None,
temp_uri: Optional[str] = None,
) -> metadata_store_pb2.Artifact:
"""Publishes an intermediate artifact.
Args:
mlmd_handle: A handle to the MLMD database.
execution_id: The ID of the execution which generates the artifact.
output_key: The output key of the artifact.
properties: Properties of the artifact.
custom_properties: Custom properties of the artifact.
external_uri: The external URI provided by the user. Exactly one of
external_uri and temp_uri must be set.
temp_uri: Temp URI generated internally by Tflex. Exactly one of
external_uri and temp_uri must be set.
Returns:
The published intermediate Artifact proto.
"""
# Check that a REFERENCE artifact corresponding to the output key and
# execution ID exists.
mlmd_protos = _get_mlmd_protos_for_execution(
mlmd_handle, execution_id, output_key
)
if external_uri:
# The final URI for the intermediate artifact is an external URI.
final_uri = external_uri
# Verify that an external artifact with the same URI has not already been
# published.
for artifact in mlmd_protos.intermediate_artifacts:
if artifact.uri == final_uri:
raise status_lib.StatusNotOkError(
code=status_lib.Code.ALREADY_EXISTS,
message=(
f'Artifact with URI {final_uri} has already been published: '
f'{artifact}'
),
)
elif temp_uri:
# The final URI for the intermediate artifact is a subdirectory of the
# REFERENCE artifact's URI.
final_uri = _generate_reference_uri_subdir(
mlmd_protos.reference_artifact.uri,
)
try:
fileio.rename(temp_uri, final_uri)
except filesystem.NotFoundError as e:
raise status_lib.StatusNotOkError(
code=status_lib.Code.ABORTED, message=str(e)
)
logging.info(
'Moved temporary URI %s contents to final URI %s',
temp_uri,
final_uri,
)
else:
raise status_lib.StatusNotOkError(
code=status_lib.Code.INVALID_ARGUMENT,
message='Neither external_uri nor temp_uri was provided.',
)
# Build the intermediate artifact object. We set its state to LIVE, so that
# it can be immediately consumed.
intermediate_artifact = metadata_store_pb2.Artifact()
intermediate_artifact.CopyFrom(mlmd_protos.reference_artifact)
intermediate_artifact.uri = final_uri
intermediate_artifact.state = metadata_store_pb2.Artifact.State.LIVE
intermediate_artifact.ClearField('id')
intermediate_artifact.ClearField('create_time_since_epoch')
intermediate_artifact.ClearField('last_update_time_since_epoch')
# Copy any new properties/custom properties for the artifact.
if properties:
for key, value in properties.items():
intermediate_artifact.properties[key].CopyFrom(value)
if custom_properties:
for key, value in custom_properties.items():
intermediate_artifact.custom_properties[key].CopyFrom(value)
try:
contexts = mlmd_handle.store.get_contexts_by_execution(execution_id)
event = event_lib.generate_event(
event_type=metadata_store_pb2.Event.OUTPUT,
key=output_key,
# We intentionally start the OUTPUT Event at index at 0, even though
# there is a PENDING_OUTPUT Event with index 0 associated with the
# REFERENCE artifact.
index=len(mlmd_protos.intermediate_artifacts),
)
# TODO(b/262040844): Instead of directly using the context manager here, we
# should consider creating and using wrapper functions.
with mlmd_state.evict_from_cache(execution_id):
[execution] = mlmd_handle.store.get_executions_by_id([execution_id])
# Link the Execution to the Artifact with an OUTPUT Event edge.
mlmd_handle.store.put_execution(
execution=execution,
artifact_and_events=[(intermediate_artifact, event)],
contexts=contexts,
reuse_context_if_already_exist=True,
reuse_artifact_if_already_exist_by_external_id=True,
# Intermediate artifacts are published after the execution is created.
# We need to set force_update_time to True, to ensuer
# last_update_time_since_epoch is updated whenevery we publish new
# intermediate artifacts.
force_update_time=True,
)
except mlmd_errors.StatusError as e:
raise status_lib.StatusNotOkError(code=e.error_code, message=str(e))
logging.info('Published intermediate artifact: %s', intermediate_artifact)
return intermediate_artifact | Publishes an intermediate artifact. Args: mlmd_handle: A handle to the MLMD database. execution_id: The ID of the execution which generates the artifact. output_key: The output key of the artifact. properties: Properties of the artifact. custom_properties: Custom properties of the artifact. external_uri: The external URI provided by the user. Exactly one of external_uri and temp_uri must be set. temp_uri: Temp URI generated internally by Tflex. Exactly one of external_uri and temp_uri must be set. Returns: The published intermediate Artifact proto. |
166,292 | import base64
import contextlib
import copy
import dataclasses
import functools
import json
import os
import threading
import time
from typing import Any, Callable, Dict, Iterator, List, Mapping, Optional, Set, Tuple
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import orchestration_options
from tfx.utils import metrics_utils
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import metadata_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.proto.orchestration import run_state_pb2
from tfx.utils import deprecation_utils
from tfx.utils import json_utils
from tfx.utils import status as status_lib
from tfx.utils import telemetry_utils
from google.protobuf import message
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
class NodeState(json_utils.Jsonable):
"""Records node state.
Attributes:
state: Current state of the node.
status: Status of the node in state STOPPING or STOPPED.
"""
STARTED = 'started' # Node is ready for execution.
STOPPING = 'stopping' # Pending work before state can change to STOPPED.
STOPPED = 'stopped' # Node execution is stopped.
RUNNING = 'running' # Node is under active execution (i.e. triggered).
COMPLETE = 'complete' # Node execution completed successfully.
# Node execution skipped due to condition not satisfied when pipeline has
# conditionals.
SKIPPED = 'skipped'
# Node execution skipped due to partial run.
SKIPPED_PARTIAL_RUN = 'skipped_partial_run'
FAILED = 'failed' # Node execution failed due to errors.
state: str = attr.ib(
default=STARTED,
validator=attr.validators.in_([
STARTED,
STOPPING,
STOPPED,
RUNNING,
COMPLETE,
SKIPPED,
SKIPPED_PARTIAL_RUN,
FAILED,
]),
on_setattr=attr.setters.validate,
)
backfill_token: str = ''
status_code: Optional[int] = None
status_msg: str = ''
last_updated_time: float = attr.ib(factory=lambda: time.time()) # pylint:disable=unnecessary-lambda
state_history: List[StateRecord] = attr.ib(default=attr.Factory(list))
def status(self) -> Optional[status_lib.Status]:
if self.status_code is not None:
return status_lib.Status(code=self.status_code, message=self.status_msg)
return None
def update(
self,
state: str,
status: Optional[status_lib.Status] = None,
backfill_token: str = '',
) -> None:
if self.state != state:
self.state_history.append(
StateRecord(
state=self.state,
backfill_token=self.backfill_token,
status_code=self.status_code,
update_time=self.last_updated_time,
)
)
if len(self.state_history) > _MAX_STATE_HISTORY_LEN:
self.state_history = self.state_history[-_MAX_STATE_HISTORY_LEN:]
self.last_updated_time = time.time()
self.state = state
self.backfill_token = backfill_token
self.status_code = status.code if status is not None else None
self.status_msg = (status.message or '') if status is not None else ''
def is_startable(self) -> bool:
"""Returns True if the node can be started."""
return self.state in set([self.STOPPING, self.STOPPED, self.FAILED])
def is_stoppable(self) -> bool:
"""Returns True if the node can be stopped."""
return self.state in set([self.STARTED, self.RUNNING])
def is_backfillable(self) -> bool:
"""Returns True if the node can be backfilled."""
return self.state in set([self.STOPPED, self.FAILED])
def is_programmatically_skippable(self) -> bool:
"""Returns True if the node can be skipped via programmatic operation."""
return self.state in set([self.STARTED, self.STOPPED])
def is_success(self) -> bool:
return is_node_state_success(self.state)
def is_failure(self) -> bool:
return is_node_state_failure(self.state)
def to_run_state(self) -> run_state_pb2.RunState:
"""Returns this NodeState converted to a RunState."""
status_code_value = None
if self.status_code is not None:
status_code_value = run_state_pb2.RunState.StatusCodeValue(
value=self.status_code)
return run_state_pb2.RunState(
state=_NODE_STATE_TO_RUN_STATE_MAP.get(
self.state, run_state_pb2.RunState.UNKNOWN
),
status_code=status_code_value,
status_msg=self.status_msg,
update_time=int(self.last_updated_time * 1000),
)
def to_run_state_history(self) -> List[run_state_pb2.RunState]:
run_state_history = []
for state in self.state_history:
# STARTING, PAUSING and PAUSED has been deprecated but may still be
# present in state_history.
if (
state.state == 'starting'
or state.state == 'pausing'
or state.state == 'paused'
):
continue
run_state_history.append(
NodeState(
state=state.state,
status_code=state.status_code,
last_updated_time=state.update_time).to_run_state())
return run_state_history
# By default, json_utils.Jsonable serializes and deserializes objects using
# obj.__dict__, which prevents attr.ib from populating default fields.
# Overriding this function to ensure default fields are populated.
def from_json_dict(cls, dict_data: Dict[str, Any]) -> Any:
"""Convert from dictionary data to an object."""
return cls(**dict_data)
def latest_predicate_time_s(self, predicate: Callable[[StateRecord], bool],
include_current_state: bool) -> Optional[int]:
"""Returns the latest time the StateRecord satisfies the given predicate.
Args:
predicate: Predicate that takes the state string.
include_current_state: Whether to include the current node state when
checking the node state history (the node state history doesn't include
the current node state).
Returns:
The latest time (in the state history) the StateRecord satisfies the given
predicate, or None if the predicate is never satisfied.
"""
if include_current_state:
current_record = StateRecord(
state=self.state,
backfill_token=self.backfill_token,
status_code=self.status_code,
update_time=self.last_updated_time,
)
if predicate(current_record):
return int(current_record.update_time)
for s in reversed(self.state_history):
if predicate(s):
return int(s.update_time)
return None
def latest_running_time_s(self) -> Optional[int]:
"""Returns the latest time the node entered a RUNNING state.
Returns:
The latest time (in the state history) the node entered a RUNNING
state, or None if the node never entered a RUNNING state.
"""
return self.latest_predicate_time_s(
lambda s: is_node_state_running(s.state), include_current_state=True)
def is_node_state_running(state: str) -> bool:
return state == NodeState.RUNNING | null |
166,293 | import base64
import contextlib
import copy
import dataclasses
import functools
import json
import os
import threading
import time
from typing import Any, Callable, Dict, Iterator, List, Mapping, Optional, Set, Tuple
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import orchestration_options
from tfx.utils import metrics_utils
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import metadata_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.proto.orchestration import run_state_pb2
from tfx.utils import deprecation_utils
from tfx.utils import json_utils
from tfx.utils import status as status_lib
from tfx.utils import telemetry_utils
from google.protobuf import message
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
_last_state_change_time_secs = -1.0
_state_change_time_lock = threading.Lock()
The provided code snippet includes necessary dependencies for implementing the `record_state_change_time` function. Write a Python function `def record_state_change_time() -> None` to solve the following problem:
Records current time at the point of function call as state change time. This function may be called after any operation that changes pipeline state or node execution state that requires further processing in the next iteration of the orchestration loop. As an optimization, the orchestration loop can elide wait period in between iterations when such state change is detected.
Here is the function:
def record_state_change_time() -> None:
"""Records current time at the point of function call as state change time.
This function may be called after any operation that changes pipeline state or
node execution state that requires further processing in the next iteration of
the orchestration loop. As an optimization, the orchestration loop can elide
wait period in between iterations when such state change is detected.
"""
global _last_state_change_time_secs
with _state_change_time_lock:
_last_state_change_time_secs = time.time() | Records current time at the point of function call as state change time. This function may be called after any operation that changes pipeline state or node execution state that requires further processing in the next iteration of the orchestration loop. As an optimization, the orchestration loop can elide wait period in between iterations when such state change is detected. |
166,294 | import base64
import contextlib
import copy
import dataclasses
import functools
import json
import os
import threading
import time
from typing import Any, Callable, Dict, Iterator, List, Mapping, Optional, Set, Tuple
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import orchestration_options
from tfx.utils import metrics_utils
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import metadata_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.proto.orchestration import run_state_pb2
from tfx.utils import deprecation_utils
from tfx.utils import json_utils
from tfx.utils import status as status_lib
from tfx.utils import telemetry_utils
from google.protobuf import message
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
_active_pipelines_lock = threading.Lock()
def _synchronized(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
with _active_pipelines_lock:
return f(*args, **kwargs)
return wrapper | null |
166,295 | import base64
import contextlib
import copy
import dataclasses
import functools
import json
import os
import threading
import time
from typing import Any, Callable, Dict, Iterator, List, Mapping, Optional, Set, Tuple
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import orchestration_options
from tfx.utils import metrics_utils
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import metadata_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.proto.orchestration import run_state_pb2
from tfx.utils import deprecation_utils
from tfx.utils import json_utils
from tfx.utils import status as status_lib
from tfx.utils import telemetry_utils
from google.protobuf import message
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
_ORCHESTRATOR_RESERVED_ID = '__ORCHESTRATOR__'
The provided code snippet includes necessary dependencies for implementing the `get_orchestrator_contexts` function. Write a Python function `def get_orchestrator_contexts(mlmd_handle: metadata.Metadata, **kwargs) -> List[metadata_store_pb2.Context]` to solve the following problem:
Returns all of the orchestrator contexts.
Here is the function:
def get_orchestrator_contexts(mlmd_handle: metadata.Metadata,
**kwargs) -> List[metadata_store_pb2.Context]:
"""Returns all of the orchestrator contexts."""
return mlmd_handle.store.get_contexts_by_type(_ORCHESTRATOR_RESERVED_ID,
**kwargs) | Returns all of the orchestrator contexts. |
166,296 | import base64
import contextlib
import copy
import dataclasses
import functools
import json
import os
import threading
import time
from typing import Any, Callable, Dict, Iterator, List, Mapping, Optional, Set, Tuple
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import orchestration_options
from tfx.utils import metrics_utils
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import metadata_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.proto.orchestration import run_state_pb2
from tfx.utils import deprecation_utils
from tfx.utils import json_utils
from tfx.utils import status as status_lib
from tfx.utils import telemetry_utils
from google.protobuf import message
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `pipeline_id_from_orchestrator_context` function. Write a Python function `def pipeline_id_from_orchestrator_context( context: metadata_store_pb2.Context) -> str` to solve the following problem:
Returns pipeline id from orchestrator reserved context.
Here is the function:
def pipeline_id_from_orchestrator_context(
context: metadata_store_pb2.Context) -> str:
"""Returns pipeline id from orchestrator reserved context."""
return context.name | Returns pipeline id from orchestrator reserved context. |
166,297 | import base64
import contextlib
import copy
import dataclasses
import functools
import json
import os
import threading
import time
from typing import Any, Callable, Dict, Iterator, List, Mapping, Optional, Set, Tuple
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import orchestration_options
from tfx.utils import metrics_utils
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import metadata_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.proto.orchestration import run_state_pb2
from tfx.utils import deprecation_utils
from tfx.utils import json_utils
from tfx.utils import status as status_lib
from tfx.utils import telemetry_utils
from google.protobuf import message
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
def get_all_nodes(
pipeline: pipeline_pb2.Pipeline) -> List[node_proto_view.NodeProtoView]:
"""Returns the views of nodes or inner pipelines in the given pipeline."""
# TODO(goutham): Handle system nodes.
return [
node_proto_view.get_view(pipeline_or_node)
for pipeline_or_node in pipeline.nodes
]
The provided code snippet includes necessary dependencies for implementing the `_is_node_uid_in_pipeline` function. Write a Python function `def _is_node_uid_in_pipeline(node_uid: task_lib.NodeUid, pipeline: pipeline_pb2.Pipeline) -> bool` to solve the following problem:
Returns `True` if the `node_uid` belongs to the given pipeline.
Here is the function:
def _is_node_uid_in_pipeline(node_uid: task_lib.NodeUid,
pipeline: pipeline_pb2.Pipeline) -> bool:
"""Returns `True` if the `node_uid` belongs to the given pipeline."""
for node in get_all_nodes(pipeline):
if task_lib.NodeUid.from_node(pipeline, node) == node_uid:
return True
return False | Returns `True` if the `node_uid` belongs to the given pipeline. |
166,298 | import base64
import contextlib
import copy
import dataclasses
import functools
import json
import os
import threading
import time
from typing import Any, Callable, Dict, Iterator, List, Mapping, Optional, Set, Tuple
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import orchestration_options
from tfx.utils import metrics_utils
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import metadata_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.proto.orchestration import run_state_pb2
from tfx.utils import deprecation_utils
from tfx.utils import json_utils
from tfx.utils import status as status_lib
from tfx.utils import telemetry_utils
from google.protobuf import message
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
_PIPELINE_IR = 'pipeline_ir'
class _PipelineIRCodec:
"""A class for encoding / decoding pipeline IR."""
_ORCHESTRATOR_METADATA_DIR = '.orchestrator'
_PIPELINE_IRS_DIR = 'pipeline_irs'
_PIPELINE_IR_URL_KEY = 'pipeline_ir_url'
_obj = None
_lock = threading.Lock()
def get(cls) -> '_PipelineIRCodec':
with cls._lock:
if not cls._obj:
cls._obj = cls()
return cls._obj
def testonly_reset(cls) -> None:
"""Reset global state, for tests only."""
with cls._lock:
cls._obj = None
def __init__(self):
self.base_dir = env.get_env().get_base_dir()
if self.base_dir:
self.pipeline_irs_dir = os.path.join(self.base_dir,
self._ORCHESTRATOR_METADATA_DIR,
self._PIPELINE_IRS_DIR)
fileio.makedirs(self.pipeline_irs_dir)
else:
self.pipeline_irs_dir = None
def encode(self, pipeline: pipeline_pb2.Pipeline) -> str:
"""Encodes pipeline IR."""
# Attempt to store as a base64 encoded string. If base_dir is provided
# and the length is too large, store the IR on disk and retain the URL.
# TODO(b/248786921): Always store pipeline IR to base_dir once the
# accessibility issue is resolved.
pipeline_encoded = _base64_encode(pipeline)
max_mlmd_str_value_len = env.get_env().max_mlmd_str_value_length()
if self.base_dir and max_mlmd_str_value_len is not None and len(
pipeline_encoded) > max_mlmd_str_value_len:
pipeline_id = task_lib.PipelineUid.from_pipeline(pipeline).pipeline_id
pipeline_url = os.path.join(self.pipeline_irs_dir,
f'{pipeline_id}_{uuid.uuid4()}.pb')
with fileio.open(pipeline_url, 'wb') as file:
file.write(pipeline.SerializeToString())
pipeline_encoded = json.dumps({self._PIPELINE_IR_URL_KEY: pipeline_url})
return pipeline_encoded
def decode(self, value: str) -> pipeline_pb2.Pipeline:
"""Decodes pipeline IR."""
# Attempt to load as JSON. If it fails, fallback to decoding it as a base64
# encoded string for backward compatibility.
try:
pipeline_encoded = json.loads(value)
with fileio.open(pipeline_encoded[self._PIPELINE_IR_URL_KEY],
'rb') as file:
return pipeline_pb2.Pipeline.FromString(file.read())
except json.JSONDecodeError:
return _base64_decode_pipeline(value)
def _get_pipeline_from_orchestrator_execution(
execution: metadata_store_pb2.Execution) -> pipeline_pb2.Pipeline:
pipeline_ir = data_types_utils.get_metadata_value(
execution.properties[_PIPELINE_IR])
return _PipelineIRCodec.get().decode(pipeline_ir) | null |
166,299 | import base64
import contextlib
import copy
import dataclasses
import functools
import json
import os
import threading
import time
from typing import Any, Callable, Dict, Iterator, List, Mapping, Optional, Set, Tuple
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import orchestration_options
from tfx.utils import metrics_utils
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import metadata_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.proto.orchestration import run_state_pb2
from tfx.utils import deprecation_utils
from tfx.utils import json_utils
from tfx.utils import status as status_lib
from tfx.utils import telemetry_utils
from google.protobuf import message
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
_ORCHESTRATOR_RESERVED_ID = '__ORCHESTRATOR__'
The provided code snippet includes necessary dependencies for implementing the `_get_orchestrator_context` function. Write a Python function `def _get_orchestrator_context(mlmd_handle: metadata.Metadata, pipeline_id: str, **kwargs) -> metadata_store_pb2.Context` to solve the following problem:
Returns the orchestrator context of a particular pipeline.
Here is the function:
def _get_orchestrator_context(mlmd_handle: metadata.Metadata, pipeline_id: str,
**kwargs) -> metadata_store_pb2.Context:
"""Returns the orchestrator context of a particular pipeline."""
context = mlmd_handle.store.get_context_by_type_and_name(
type_name=_ORCHESTRATOR_RESERVED_ID, context_name=pipeline_id, **kwargs)
if not context:
raise status_lib.StatusNotOkError(
code=status_lib.Code.NOT_FOUND,
message=f'No pipeline with id {pipeline_id} found.')
return context | Returns the orchestrator context of a particular pipeline. |
166,300 | import base64
import contextlib
import copy
import dataclasses
import functools
import json
import os
import threading
import time
from typing import Any, Callable, Dict, Iterator, List, Mapping, Optional, Set, Tuple
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import orchestration_options
from tfx.utils import metrics_utils
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import metadata_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.proto.orchestration import run_state_pb2
from tfx.utils import deprecation_utils
from tfx.utils import json_utils
from tfx.utils import status as status_lib
from tfx.utils import telemetry_utils
from google.protobuf import message
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
def _base64_encode(msg: message.Message) -> str:
return base64.b64encode(msg.SerializeToString()).decode('utf-8') | null |
166,301 | import base64
import contextlib
import copy
import dataclasses
import functools
import json
import os
import threading
import time
from typing import Any, Callable, Dict, Iterator, List, Mapping, Optional, Set, Tuple
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import orchestration_options
from tfx.utils import metrics_utils
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import metadata_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.proto.orchestration import run_state_pb2
from tfx.utils import deprecation_utils
from tfx.utils import json_utils
from tfx.utils import status as status_lib
from tfx.utils import telemetry_utils
from google.protobuf import message
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
def _base64_decode_pipeline(pipeline_encoded: str) -> pipeline_pb2.Pipeline:
result = pipeline_pb2.Pipeline()
result.ParseFromString(base64.b64decode(pipeline_encoded))
return result | null |
166,302 | import base64
import contextlib
import copy
import dataclasses
import functools
import json
import os
import threading
import time
from typing import Any, Callable, Dict, Iterator, List, Mapping, Optional, Set, Tuple
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import orchestration_options
from tfx.utils import metrics_utils
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import metadata_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.proto.orchestration import run_state_pb2
from tfx.utils import deprecation_utils
from tfx.utils import json_utils
from tfx.utils import status as status_lib
from tfx.utils import telemetry_utils
from google.protobuf import message
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
def _base64_decode_update_options(
update_options_encoded: str) -> pipeline_pb2.UpdateOptions:
result = pipeline_pb2.UpdateOptions()
result.ParseFromString(base64.b64decode(update_options_encoded))
return result | null |
166,303 | import base64
import contextlib
import copy
import dataclasses
import functools
import json
import os
import threading
import time
from typing import Any, Callable, Dict, Iterator, List, Mapping, Optional, Set, Tuple
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import orchestration_options
from tfx.utils import metrics_utils
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import metadata_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.proto.orchestration import run_state_pb2
from tfx.utils import deprecation_utils
from tfx.utils import json_utils
from tfx.utils import status as status_lib
from tfx.utils import telemetry_utils
from google.protobuf import message
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
_NODE_STATES = 'node_states'
_PREVIOUS_NODE_STATES = 'previous_node_states'
class NodeState(json_utils.Jsonable):
"""Records node state.
Attributes:
state: Current state of the node.
status: Status of the node in state STOPPING or STOPPED.
"""
STARTED = 'started' # Node is ready for execution.
STOPPING = 'stopping' # Pending work before state can change to STOPPED.
STOPPED = 'stopped' # Node execution is stopped.
RUNNING = 'running' # Node is under active execution (i.e. triggered).
COMPLETE = 'complete' # Node execution completed successfully.
# Node execution skipped due to condition not satisfied when pipeline has
# conditionals.
SKIPPED = 'skipped'
# Node execution skipped due to partial run.
SKIPPED_PARTIAL_RUN = 'skipped_partial_run'
FAILED = 'failed' # Node execution failed due to errors.
state: str = attr.ib(
default=STARTED,
validator=attr.validators.in_([
STARTED,
STOPPING,
STOPPED,
RUNNING,
COMPLETE,
SKIPPED,
SKIPPED_PARTIAL_RUN,
FAILED,
]),
on_setattr=attr.setters.validate,
)
backfill_token: str = ''
status_code: Optional[int] = None
status_msg: str = ''
last_updated_time: float = attr.ib(factory=lambda: time.time()) # pylint:disable=unnecessary-lambda
state_history: List[StateRecord] = attr.ib(default=attr.Factory(list))
def status(self) -> Optional[status_lib.Status]:
if self.status_code is not None:
return status_lib.Status(code=self.status_code, message=self.status_msg)
return None
def update(
self,
state: str,
status: Optional[status_lib.Status] = None,
backfill_token: str = '',
) -> None:
if self.state != state:
self.state_history.append(
StateRecord(
state=self.state,
backfill_token=self.backfill_token,
status_code=self.status_code,
update_time=self.last_updated_time,
)
)
if len(self.state_history) > _MAX_STATE_HISTORY_LEN:
self.state_history = self.state_history[-_MAX_STATE_HISTORY_LEN:]
self.last_updated_time = time.time()
self.state = state
self.backfill_token = backfill_token
self.status_code = status.code if status is not None else None
self.status_msg = (status.message or '') if status is not None else ''
def is_startable(self) -> bool:
"""Returns True if the node can be started."""
return self.state in set([self.STOPPING, self.STOPPED, self.FAILED])
def is_stoppable(self) -> bool:
"""Returns True if the node can be stopped."""
return self.state in set([self.STARTED, self.RUNNING])
def is_backfillable(self) -> bool:
"""Returns True if the node can be backfilled."""
return self.state in set([self.STOPPED, self.FAILED])
def is_programmatically_skippable(self) -> bool:
"""Returns True if the node can be skipped via programmatic operation."""
return self.state in set([self.STARTED, self.STOPPED])
def is_success(self) -> bool:
return is_node_state_success(self.state)
def is_failure(self) -> bool:
return is_node_state_failure(self.state)
def to_run_state(self) -> run_state_pb2.RunState:
"""Returns this NodeState converted to a RunState."""
status_code_value = None
if self.status_code is not None:
status_code_value = run_state_pb2.RunState.StatusCodeValue(
value=self.status_code)
return run_state_pb2.RunState(
state=_NODE_STATE_TO_RUN_STATE_MAP.get(
self.state, run_state_pb2.RunState.UNKNOWN
),
status_code=status_code_value,
status_msg=self.status_msg,
update_time=int(self.last_updated_time * 1000),
)
def to_run_state_history(self) -> List[run_state_pb2.RunState]:
run_state_history = []
for state in self.state_history:
# STARTING, PAUSING and PAUSED has been deprecated but may still be
# present in state_history.
if (
state.state == 'starting'
or state.state == 'pausing'
or state.state == 'paused'
):
continue
run_state_history.append(
NodeState(
state=state.state,
status_code=state.status_code,
last_updated_time=state.update_time).to_run_state())
return run_state_history
# By default, json_utils.Jsonable serializes and deserializes objects using
# obj.__dict__, which prevents attr.ib from populating default fields.
# Overriding this function to ensure default fields are populated.
def from_json_dict(cls, dict_data: Dict[str, Any]) -> Any:
"""Convert from dictionary data to an object."""
return cls(**dict_data)
def latest_predicate_time_s(self, predicate: Callable[[StateRecord], bool],
include_current_state: bool) -> Optional[int]:
"""Returns the latest time the StateRecord satisfies the given predicate.
Args:
predicate: Predicate that takes the state string.
include_current_state: Whether to include the current node state when
checking the node state history (the node state history doesn't include
the current node state).
Returns:
The latest time (in the state history) the StateRecord satisfies the given
predicate, or None if the predicate is never satisfied.
"""
if include_current_state:
current_record = StateRecord(
state=self.state,
backfill_token=self.backfill_token,
status_code=self.status_code,
update_time=self.last_updated_time,
)
if predicate(current_record):
return int(current_record.update_time)
for s in reversed(self.state_history):
if predicate(s):
return int(s.update_time)
return None
def latest_running_time_s(self) -> Optional[int]:
"""Returns the latest time the node entered a RUNNING state.
Returns:
The latest time (in the state history) the node entered a RUNNING
state, or None if the node never entered a RUNNING state.
"""
return self.latest_predicate_time_s(
lambda s: is_node_state_running(s.state), include_current_state=True)
class _NodeStatesProxy:
"""Proxy for reading and updating deserialized NodeState dicts from Execution.
This proxy contains an internal write-back cache. Changes are not saved back
to the `Execution` until `save()` is called; cache would not be updated if
changes were made outside of the proxy, either. This is primarily used to
reduce JSON serialization/deserialization overhead for getting node state
execution property from pipeline execution.
"""
def __init__(self, execution: metadata_store_pb2.Execution):
self._custom_properties = execution.custom_properties
self._deserialized_cache: Dict[str, Dict[str, NodeState]] = {}
self._changed_state_types: Set[str] = set()
def get(self, state_type: str = _NODE_STATES) -> Dict[str, NodeState]:
"""Gets node states dict from pipeline execution with the specified type."""
if state_type not in [_NODE_STATES, _PREVIOUS_NODE_STATES]:
raise status_lib.StatusNotOkError(
code=status_lib.Code.INVALID_ARGUMENT,
message=(
f'Expected state_type is {_NODE_STATES} or'
f' {_PREVIOUS_NODE_STATES}, got {state_type}.'
),
)
if state_type not in self._deserialized_cache:
node_states_json = _get_metadata_value(
self._custom_properties.get(state_type)
)
self._deserialized_cache[state_type] = (
json_utils.loads(node_states_json) if node_states_json else {}
)
return self._deserialized_cache[state_type]
def set(
self, node_states: Dict[str, NodeState], state_type: str = _NODE_STATES
) -> None:
"""Sets node states dict with the specified type."""
self._deserialized_cache[state_type] = node_states
self._changed_state_types.add(state_type)
def save(self) -> None:
"""Saves all changed node states dicts to pipeline execution."""
max_mlmd_str_value_len = env.get_env().max_mlmd_str_value_length()
for state_type in self._changed_state_types:
node_states = self._deserialized_cache[state_type]
node_states_json = json_utils.dumps(node_states)
# Removes state history from node states if it's too large to avoid
# hitting MLMD limit.
if (
max_mlmd_str_value_len
and len(node_states_json) > max_mlmd_str_value_len
):
logging.info(
'Node states length %d is too large (> %d); Removing state history'
' from it.',
len(node_states_json),
max_mlmd_str_value_len,
)
node_states_no_history = {}
for node, old_state in node_states.items():
new_state = copy.deepcopy(old_state)
new_state.state_history.clear()
node_states_no_history[node] = new_state
node_states_json = json_utils.dumps(node_states_no_history)
logging.info(
'Node states length after removing state history: %d',
len(node_states_json),
)
data_types_utils.set_metadata_value(
self._custom_properties[state_type], node_states_json
)
class PipelineView:
"""Class for reading active or inactive pipeline view."""
def __init__(self, pipeline_id: str, context: metadata_store_pb2.Context,
execution: metadata_store_pb2.Execution):
self.pipeline_id = pipeline_id
self.context = context
self.execution = execution
self._node_states_proxy = _NodeStatesProxy(execution)
self._pipeline = None # lazily set
def load_all(
cls,
mlmd_handle: metadata.Metadata,
pipeline_id: str,
list_options: Optional[mlmd.ListOptions] = None,
**kwargs,
) -> List['PipelineView']:
"""Loads all pipeline views from MLMD.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline_id: Id of the pipeline state to load.
list_options: List options to customize the query for getting executions.
**kwargs: Extra option to pass into mlmd store functions.
Returns:
A list of `PipelineView` objects.
Raises:
status_lib.StatusNotOkError: With code=NOT_FOUND if no pipeline
with the given pipeline uid exists in MLMD.
"""
context = _get_orchestrator_context(mlmd_handle, pipeline_id, **kwargs)
# TODO(b/279798582):
# Uncomment the following when the slow sorting MLMD query is fixed.
# list_options = mlmd.ListOptions(
# order_by=mlmd.OrderByField.CREATE_TIME, is_asc=True)
executions = mlmd_handle.store.get_executions_by_context(
context.id, list_options=list_options, **kwargs
)
executions = sorted(executions, key=lambda x: x.create_time_since_epoch)
return [cls(pipeline_id, context, execution) for execution in executions]
def load(cls,
mlmd_handle: metadata.Metadata,
pipeline_id: str,
pipeline_run_id: Optional[str] = None,
non_active_only: Optional[bool] = False,
**kwargs) -> 'PipelineView':
"""Loads pipeline view from MLMD.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline_id: Id of the pipeline state to load.
pipeline_run_id: Run id of the pipeline for the synchronous pipeline.
non_active_only: Whether to only load from a non-active pipeline.
**kwargs: Extra option to pass into mlmd store functions.
Returns:
A `PipelineView` object.
Raises:
status_lib.StatusNotOkError: With code=NOT_FOUND if no pipeline
with the given pipeline uid exists in MLMD.
"""
context = _get_orchestrator_context(mlmd_handle, pipeline_id, **kwargs)
filter_query = ''
if non_active_only:
filter_query = 'last_known_state != RUNNING AND last_known_state != NEW'
list_options = mlmd.ListOptions(
order_by=mlmd.OrderByField.CREATE_TIME,
is_asc=False,
filter_query=filter_query,
limit=1,
)
if pipeline_run_id:
# Note(b/281478984):
# This optimization is done for requests with pipeline run id
# by specifying which pipeline run is queried.
# Order by with this filter query is slow with large # of runs.
list_options = mlmd.ListOptions(
filter_query=(
'custom_properties.pipeline_run_id.string_value ='
f' "{pipeline_run_id}"'
)
)
executions = mlmd_handle.store.get_executions_by_context(
context.id, list_options=list_options, **kwargs
)
if pipeline_run_id is None and executions:
return cls(pipeline_id, context, executions[0])
for execution in executions:
if execution.custom_properties[
_PIPELINE_RUN_ID].string_value == pipeline_run_id:
return cls(pipeline_id, context, execution)
non_active_msg = 'non active ' if non_active_only else ''
raise status_lib.StatusNotOkError(
code=status_lib.Code.NOT_FOUND,
message=(
f'No {non_active_msg} pipeline with run_id {pipeline_run_id} found.'
),
)
def pipeline(self) -> pipeline_pb2.Pipeline:
if self._pipeline is None:
try:
self._pipeline = _get_pipeline_from_orchestrator_execution(
self.execution
)
except Exception: # pylint: disable=broad-except
logging.exception('Failed to load pipeline IR for %s', self.pipeline_id)
self._pipeline = pipeline_pb2.Pipeline()
return self._pipeline
def pipeline_execution_mode(self) -> pipeline_pb2.Pipeline.ExecutionMode:
return _retrieve_pipeline_exec_mode(self.execution)
def pipeline_run_id(self) -> str:
if _PIPELINE_RUN_ID in self.execution.custom_properties:
return self.execution.custom_properties[_PIPELINE_RUN_ID].string_value
return self.pipeline.runtime_spec.pipeline_run_id.field_value.string_value
def pipeline_status_code(
self) -> Optional[run_state_pb2.RunState.StatusCodeValue]:
if _PIPELINE_STATUS_CODE in self.execution.custom_properties:
return run_state_pb2.RunState.StatusCodeValue(
value=self.execution.custom_properties[_PIPELINE_STATUS_CODE]
.int_value)
return None
def pipeline_status_message(self) -> str:
if _PIPELINE_STATUS_MSG in self.execution.custom_properties:
return self.execution.custom_properties[_PIPELINE_STATUS_MSG].string_value
return ''
def pipeline_run_metadata(self) -> Dict[str, types.Property]:
pipeline_run_metadata = _get_metadata_value(
self.execution.custom_properties.get(_PIPELINE_RUN_METADATA))
return json_utils.loads(
pipeline_run_metadata) if pipeline_run_metadata else {}
def get_pipeline_run_state(self) -> run_state_pb2.RunState:
"""Returns current pipeline run state."""
state = run_state_pb2.RunState.UNKNOWN
if self.execution.last_known_state in _EXECUTION_STATE_TO_RUN_STATE_MAP:
state = _EXECUTION_STATE_TO_RUN_STATE_MAP[self.execution.last_known_state]
return run_state_pb2.RunState(
state=state,
status_code=self.pipeline_status_code,
status_msg=self.pipeline_status_message,
update_time=self.execution.last_update_time_since_epoch)
def get_node_run_states(self) -> Dict[str, run_state_pb2.RunState]:
"""Returns a dict mapping node id to current run state."""
result = {}
node_states_dict = self._node_states_proxy.get()
for node in get_all_nodes(self.pipeline):
node_state = node_states_dict.get(node.node_info.id, NodeState())
result[node.node_info.id] = node_state.to_run_state()
return result
def get_node_run_states_history(
self) -> Dict[str, List[run_state_pb2.RunState]]:
"""Returns the history of node run states and timestamps."""
node_states_dict = self._node_states_proxy.get()
result = {}
for node in get_all_nodes(self.pipeline):
node_state = node_states_dict.get(node.node_info.id, NodeState())
result[node.node_info.id] = node_state.to_run_state_history()
return result
def get_previous_node_run_states(self) -> Dict[str, run_state_pb2.RunState]:
"""Returns a dict mapping node id to previous run state."""
result = {}
node_states_dict = self._node_states_proxy.get(_PREVIOUS_NODE_STATES)
for node in get_all_nodes(self.pipeline):
if node.node_info.id not in node_states_dict:
continue
node_state = node_states_dict[node.node_info.id]
result[node.node_info.id] = node_state.to_run_state()
return result
def get_previous_node_run_states_history(
self) -> Dict[str, List[run_state_pb2.RunState]]:
"""Returns a dict mapping node id to previous run state and timestamps."""
prev_node_states_dict = self._node_states_proxy.get(_PREVIOUS_NODE_STATES)
result = {}
for node in get_all_nodes(self.pipeline):
if node.node_info.id not in prev_node_states_dict:
continue
node_state = prev_node_states_dict[node.node_info.id]
result[node.node_info.id] = node_state.to_run_state_history()
return result
def get_property(self, property_key: str) -> Optional[types.Property]:
"""Returns custom property value from the pipeline execution."""
return _get_metadata_value(
self.execution.custom_properties.get(property_key))
def get_node_states_dict(self) -> Dict[str, NodeState]:
"""Returns a dict mapping node id to node state."""
result = {}
node_states_dict = self._node_states_proxy.get()
for node in get_all_nodes(self.pipeline):
result[node.node_info.id] = node_states_dict.get(node.node_info.id,
NodeState())
return result
def get_previous_node_states_dict(self) -> Dict[str, NodeState]:
"""Returns a dict mapping node id to node state in previous run."""
result = {}
node_states_dict = self._node_states_proxy.get(_PREVIOUS_NODE_STATES)
for node in get_all_nodes(self.pipeline):
if node.node_info.id not in node_states_dict:
continue
result[node.node_info.id] = node_states_dict[node.node_info.id]
return result
def get_all_nodes(
pipeline: pipeline_pb2.Pipeline) -> List[node_proto_view.NodeProtoView]:
"""Returns the views of nodes or inner pipelines in the given pipeline."""
# TODO(goutham): Handle system nodes.
return [
node_proto_view.get_view(pipeline_or_node)
for pipeline_or_node in pipeline.nodes
]
The provided code snippet includes necessary dependencies for implementing the `_save_skipped_node_states` function. Write a Python function `def _save_skipped_node_states(pipeline: pipeline_pb2.Pipeline, reused_pipeline_view: PipelineView, execution: metadata_store_pb2.Execution) -> None` to solve the following problem:
Records (previous) node states for nodes that are skipped in partial run.
Here is the function:
def _save_skipped_node_states(pipeline: pipeline_pb2.Pipeline,
reused_pipeline_view: PipelineView,
execution: metadata_store_pb2.Execution) -> None:
"""Records (previous) node states for nodes that are skipped in partial run.
"""
# Set the node state to SKIPPED_PARTIAL_RUN for any nodes that are marked
# to be skipped in a partial pipeline run.
node_states_dict = {}
previous_node_states_dict = {}
reused_pipeline_node_states_dict = reused_pipeline_view.get_node_states_dict(
) if reused_pipeline_view else {}
reused_pipeline_previous_node_states_dict = (
reused_pipeline_view.get_previous_node_states_dict()
if reused_pipeline_view
else {}
)
for node in get_all_nodes(pipeline):
node_id = node.node_info.id
if node.execution_options.HasField('skip'):
logging.info('Node %s is skipped in this partial run.', node_id)
node_states_dict[node_id] = NodeState(state=NodeState.SKIPPED_PARTIAL_RUN)
if node_id in reused_pipeline_node_states_dict:
# Indicates a node's in any base run when skipped. If a user makes
# a chain of partial runs, we record the latest time when the
# skipped node has a different state.
reused_node_state = reused_pipeline_node_states_dict[node_id]
if reused_node_state.state == NodeState.SKIPPED_PARTIAL_RUN:
previous_node_states_dict[
node_id] = reused_pipeline_previous_node_states_dict.get(
node_id, NodeState())
else:
previous_node_states_dict[node_id] = reused_node_state
node_states_proxy = _NodeStatesProxy(execution)
if node_states_dict:
node_states_proxy.set(node_states_dict, _NODE_STATES)
if previous_node_states_dict:
node_states_proxy.set(previous_node_states_dict, _PREVIOUS_NODE_STATES)
node_states_proxy.save() | Records (previous) node states for nodes that are skipped in partial run. |
166,304 | import base64
import contextlib
import copy
import dataclasses
import functools
import json
import os
import threading
import time
from typing import Any, Callable, Dict, Iterator, List, Mapping, Optional, Set, Tuple
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import orchestration_options
from tfx.utils import metrics_utils
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import metadata_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.proto.orchestration import run_state_pb2
from tfx.utils import deprecation_utils
from tfx.utils import json_utils
from tfx.utils import status as status_lib
from tfx.utils import telemetry_utils
from google.protobuf import message
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
_PIPELINE_EXEC_MODE = 'pipeline_exec_mode'
_PIPELINE_EXEC_MODE_SYNC = 'sync'
_PIPELINE_EXEC_MODE_ASYNC = 'async'
def _get_metadata_value(
value: Optional[metadata_store_pb2.Value]) -> Optional[types.Property]:
if value is None:
return None
return data_types_utils.get_metadata_value(value)
The provided code snippet includes necessary dependencies for implementing the `_retrieve_pipeline_exec_mode` function. Write a Python function `def _retrieve_pipeline_exec_mode( execution: metadata_store_pb2.Execution ) -> pipeline_pb2.Pipeline.ExecutionMode` to solve the following problem:
Returns pipeline execution mode given pipeline-level execution.
Here is the function:
def _retrieve_pipeline_exec_mode(
execution: metadata_store_pb2.Execution
) -> pipeline_pb2.Pipeline.ExecutionMode:
"""Returns pipeline execution mode given pipeline-level execution."""
pipeline_exec_mode = _get_metadata_value(
execution.custom_properties.get(_PIPELINE_EXEC_MODE))
if pipeline_exec_mode == _PIPELINE_EXEC_MODE_SYNC:
return pipeline_pb2.Pipeline.SYNC
elif pipeline_exec_mode == _PIPELINE_EXEC_MODE_ASYNC:
return pipeline_pb2.Pipeline.ASYNC
else:
return pipeline_pb2.Pipeline.EXECUTION_MODE_UNSPECIFIED | Returns pipeline execution mode given pipeline-level execution. |
166,305 | import base64
import contextlib
import copy
import dataclasses
import functools
import json
import os
import threading
import time
from typing import Any, Callable, Dict, Iterator, List, Mapping, Optional, Set, Tuple
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import orchestration_options
from tfx.utils import metrics_utils
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import metadata_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.proto.orchestration import run_state_pb2
from tfx.utils import deprecation_utils
from tfx.utils import json_utils
from tfx.utils import status as status_lib
from tfx.utils import telemetry_utils
from google.protobuf import message
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
def _log_pipeline_execution_state_change(
old_state: metadata_store_pb2.Execution.State,
new_state: metadata_store_pb2.Execution.State,
pipeline_uid: task_lib.PipelineUid) -> None:
logging.info('Changed pipeline execution state: %s -> %s; pipeline uid: %s',
metadata_store_pb2.Execution.State.Name(old_state),
metadata_store_pb2.Execution.State.Name(new_state), pipeline_uid) | null |
166,306 | import base64
import contextlib
import copy
import dataclasses
import functools
import json
import os
import threading
import time
from typing import Any, Callable, Dict, Iterator, List, Mapping, Optional, Set, Tuple
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import orchestration_options
from tfx.utils import metrics_utils
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import metadata_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.proto.orchestration import run_state_pb2
from tfx.utils import deprecation_utils
from tfx.utils import json_utils
from tfx.utils import status as status_lib
from tfx.utils import telemetry_utils
from google.protobuf import message
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
def _log_node_state_change(old_state: str, new_state: str,
node_uid: task_lib.NodeUid) -> None:
logging.info('Changed node state: %s -> %s; node uid: %s', old_state,
new_state, node_uid) | null |
166,307 | import base64
import contextlib
import copy
import dataclasses
import functools
import json
import os
import threading
import time
from typing import Any, Callable, Dict, Iterator, List, Mapping, Optional, Set, Tuple
import uuid
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import env
from tfx.orchestration.experimental.core import event_observer
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import orchestration_options
from tfx.utils import metrics_utils
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import metadata_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.proto.orchestration import run_state_pb2
from tfx.utils import deprecation_utils
from tfx.utils import json_utils
from tfx.utils import status as status_lib
from tfx.utils import telemetry_utils
from google.protobuf import message
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
class NodeState(json_utils.Jsonable):
"""Records node state.
Attributes:
state: Current state of the node.
status: Status of the node in state STOPPING or STOPPED.
"""
STARTED = 'started' # Node is ready for execution.
STOPPING = 'stopping' # Pending work before state can change to STOPPED.
STOPPED = 'stopped' # Node execution is stopped.
RUNNING = 'running' # Node is under active execution (i.e. triggered).
COMPLETE = 'complete' # Node execution completed successfully.
# Node execution skipped due to condition not satisfied when pipeline has
# conditionals.
SKIPPED = 'skipped'
# Node execution skipped due to partial run.
SKIPPED_PARTIAL_RUN = 'skipped_partial_run'
FAILED = 'failed' # Node execution failed due to errors.
state: str = attr.ib(
default=STARTED,
validator=attr.validators.in_([
STARTED,
STOPPING,
STOPPED,
RUNNING,
COMPLETE,
SKIPPED,
SKIPPED_PARTIAL_RUN,
FAILED,
]),
on_setattr=attr.setters.validate,
)
backfill_token: str = ''
status_code: Optional[int] = None
status_msg: str = ''
last_updated_time: float = attr.ib(factory=lambda: time.time()) # pylint:disable=unnecessary-lambda
state_history: List[StateRecord] = attr.ib(default=attr.Factory(list))
def status(self) -> Optional[status_lib.Status]:
if self.status_code is not None:
return status_lib.Status(code=self.status_code, message=self.status_msg)
return None
def update(
self,
state: str,
status: Optional[status_lib.Status] = None,
backfill_token: str = '',
) -> None:
if self.state != state:
self.state_history.append(
StateRecord(
state=self.state,
backfill_token=self.backfill_token,
status_code=self.status_code,
update_time=self.last_updated_time,
)
)
if len(self.state_history) > _MAX_STATE_HISTORY_LEN:
self.state_history = self.state_history[-_MAX_STATE_HISTORY_LEN:]
self.last_updated_time = time.time()
self.state = state
self.backfill_token = backfill_token
self.status_code = status.code if status is not None else None
self.status_msg = (status.message or '') if status is not None else ''
def is_startable(self) -> bool:
"""Returns True if the node can be started."""
return self.state in set([self.STOPPING, self.STOPPED, self.FAILED])
def is_stoppable(self) -> bool:
"""Returns True if the node can be stopped."""
return self.state in set([self.STARTED, self.RUNNING])
def is_backfillable(self) -> bool:
"""Returns True if the node can be backfilled."""
return self.state in set([self.STOPPED, self.FAILED])
def is_programmatically_skippable(self) -> bool:
"""Returns True if the node can be skipped via programmatic operation."""
return self.state in set([self.STARTED, self.STOPPED])
def is_success(self) -> bool:
return is_node_state_success(self.state)
def is_failure(self) -> bool:
return is_node_state_failure(self.state)
def to_run_state(self) -> run_state_pb2.RunState:
"""Returns this NodeState converted to a RunState."""
status_code_value = None
if self.status_code is not None:
status_code_value = run_state_pb2.RunState.StatusCodeValue(
value=self.status_code)
return run_state_pb2.RunState(
state=_NODE_STATE_TO_RUN_STATE_MAP.get(
self.state, run_state_pb2.RunState.UNKNOWN
),
status_code=status_code_value,
status_msg=self.status_msg,
update_time=int(self.last_updated_time * 1000),
)
def to_run_state_history(self) -> List[run_state_pb2.RunState]:
run_state_history = []
for state in self.state_history:
# STARTING, PAUSING and PAUSED has been deprecated but may still be
# present in state_history.
if (
state.state == 'starting'
or state.state == 'pausing'
or state.state == 'paused'
):
continue
run_state_history.append(
NodeState(
state=state.state,
status_code=state.status_code,
last_updated_time=state.update_time).to_run_state())
return run_state_history
# By default, json_utils.Jsonable serializes and deserializes objects using
# obj.__dict__, which prevents attr.ib from populating default fields.
# Overriding this function to ensure default fields are populated.
def from_json_dict(cls, dict_data: Dict[str, Any]) -> Any:
"""Convert from dictionary data to an object."""
return cls(**dict_data)
def latest_predicate_time_s(self, predicate: Callable[[StateRecord], bool],
include_current_state: bool) -> Optional[int]:
"""Returns the latest time the StateRecord satisfies the given predicate.
Args:
predicate: Predicate that takes the state string.
include_current_state: Whether to include the current node state when
checking the node state history (the node state history doesn't include
the current node state).
Returns:
The latest time (in the state history) the StateRecord satisfies the given
predicate, or None if the predicate is never satisfied.
"""
if include_current_state:
current_record = StateRecord(
state=self.state,
backfill_token=self.backfill_token,
status_code=self.status_code,
update_time=self.last_updated_time,
)
if predicate(current_record):
return int(current_record.update_time)
for s in reversed(self.state_history):
if predicate(s):
return int(s.update_time)
return None
def latest_running_time_s(self) -> Optional[int]:
"""Returns the latest time the node entered a RUNNING state.
Returns:
The latest time (in the state history) the node entered a RUNNING
state, or None if the node never entered a RUNNING state.
"""
return self.latest_predicate_time_s(
lambda s: is_node_state_running(s.state), include_current_state=True)
def _notify_node_state_change(execution: metadata_store_pb2.Execution,
node_uid: task_lib.NodeUid, pipeline_run_id: str,
old_state: NodeState,
new_state: NodeState) -> None:
event_observer.notify(
event_observer.NodeStateChange(
execution=execution,
pipeline_uid=node_uid.pipeline_uid,
pipeline_run=pipeline_run_id,
node_id=node_uid.node_id,
old_state=old_state,
new_state=new_state)) | null |
166,308 | import copy
import threading
from typing import Callable, Optional
from absl import flags
from absl import logging
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import pipeline_ops
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_scheduler
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import status as status_lib
from ml_metadata.proto import metadata_store_pb2
def _update_pipeline_run_id(pipeline: pipeline_pb2.Pipeline, execution_id: int):
"""Rewrites pipeline run id in a given pipeline IR."""
old_pipeline_run_id = pipeline.runtime_spec.pipeline_run_id.field_value.string_value
new_pipeline_run_id = old_pipeline_run_id + f'_{execution_id}'
def _node_updater(node: pipeline_pb2.PipelineNode):
for context_spec in node.contexts.contexts:
if (context_spec.type.name == 'pipeline_run' and
context_spec.name.field_value.string_value == old_pipeline_run_id):
context_spec.name.field_value.string_value = new_pipeline_run_id
for input_spec in node.inputs.inputs.values():
for channel in input_spec.channels:
for context_query in channel.context_queries:
if (context_query.type.name == 'pipeline_run' and
context_query.name.field_value.string_value
== old_pipeline_run_id):
context_query.name.field_value.string_value = new_pipeline_run_id
_visit_pipeline_nodes_recursively(pipeline, _node_updater)
pipeline.runtime_spec.pipeline_run_id.field_value.string_value = new_pipeline_run_id
The provided code snippet includes necessary dependencies for implementing the `subpipeline_ir_rewrite` function. Write a Python function `def subpipeline_ir_rewrite(original_ir: pipeline_pb2.Pipeline, execution_id: int) -> pipeline_pb2.Pipeline` to solve the following problem:
Rewrites the subpipeline IR so that it can be run independently. Args: original_ir: Original subpipeline IR that is produced by compiler. execution_id: The ID of Subpipeline task scheduler Execution. It is used to generated a new pipeline run id. Returns: An updated subpipeline IR that can be run independently.
Here is the function:
def subpipeline_ir_rewrite(original_ir: pipeline_pb2.Pipeline,
execution_id: int) -> pipeline_pb2.Pipeline:
"""Rewrites the subpipeline IR so that it can be run independently.
Args:
original_ir: Original subpipeline IR that is produced by compiler.
execution_id: The ID of Subpipeline task scheduler Execution. It is used to
generated a new pipeline run id.
Returns:
An updated subpipeline IR that can be run independently.
"""
pipeline = copy.deepcopy(original_ir)
pipeline.nodes[0].pipeline_node.ClearField('upstream_nodes')
pipeline.nodes[-1].pipeline_node.ClearField('downstream_nodes')
_update_pipeline_run_id(pipeline, execution_id)
return pipeline | Rewrites the subpipeline IR so that it can be run independently. Args: original_ir: Original subpipeline IR that is produced by compiler. execution_id: The ID of Subpipeline task scheduler Execution. It is used to generated a new pipeline run id. Returns: An updated subpipeline IR that can be run independently. |
166,309 | import collections
import textwrap
from typing import Callable, Dict, List, Mapping, Optional, Set
from absl import logging
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import status as status_lib
from tfx.utils import topsort
from ml_metadata.proto import metadata_store_pb2
The provided code snippet includes necessary dependencies for implementing the `_skipped_node_ids` function. Write a Python function `def _skipped_node_ids( node_states_dict: Dict[task_lib.NodeUid, pstate.NodeState] ) -> Set[str]` to solve the following problem:
Returns the nodes that are marked as skipped in partial run or by user.
Here is the function:
def _skipped_node_ids(
node_states_dict: Dict[task_lib.NodeUid, pstate.NodeState]
) -> Set[str]:
"""Returns the nodes that are marked as skipped in partial run or by user."""
skipped_node_ids = set()
for node_uid, node_state in node_states_dict.items():
if node_state.state in (
pstate.NodeState.SKIPPED,
pstate.NodeState.SKIPPED_PARTIAL_RUN,
):
skipped_node_ids.add(node_uid.node_id)
return skipped_node_ids | Returns the nodes that are marked as skipped in partial run or by user. |
166,310 | import collections
import textwrap
from typing import Callable, Dict, List, Mapping, Optional, Set
from absl import logging
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import status as status_lib
from tfx.utils import topsort
from ml_metadata.proto import metadata_store_pb2
def _node_by_id(
pipeline: pipeline_pb2.Pipeline
) -> Dict[str, node_proto_view.NodeProtoView]:
result = {}
for node in pipeline.nodes:
view = node_proto_view.get_view(node)
result[view.node_info.id] = view
return result
The provided code snippet includes necessary dependencies for implementing the `_topsorted_layers` function. Write a Python function `def _topsorted_layers( pipeline: pipeline_pb2.Pipeline ) -> List[List[node_proto_view.NodeProtoView]]` to solve the following problem:
Returns pipeline nodes in topologically sorted layers.
Here is the function:
def _topsorted_layers(
pipeline: pipeline_pb2.Pipeline
) -> List[List[node_proto_view.NodeProtoView]]:
"""Returns pipeline nodes in topologically sorted layers."""
node_by_id = _node_by_id(pipeline)
return topsort.topsorted_layers(
[node_proto_view.get_view(node) for node in pipeline.nodes],
get_node_id_fn=lambda node: node.node_info.id,
get_parent_nodes=(
lambda node: [node_by_id[n] for n in node.upstream_nodes]),
get_child_nodes=(
lambda node: [node_by_id[n] for n in node.downstream_nodes])) | Returns pipeline nodes in topologically sorted layers. |
166,311 | import collections
import textwrap
from typing import Callable, Dict, List, Mapping, Optional, Set
from absl import logging
from tfx.orchestration import node_proto_view
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import status as status_lib
from tfx.utils import topsort
from ml_metadata.proto import metadata_store_pb2
_LAZY_TRIGGER_STRATEGIES = frozenset({
pipeline_pb2.NodeExecutionOptions.LAZILY_ALL_UPSTREAM_NODES_SUCCEEDED,
pipeline_pb2.NodeExecutionOptions.LAZILY_ALL_UPSTREAM_NODES_COMPLETED,
})
_UPSTREAM_SUCCESS_OPTIONAL_STRATEGIES = frozenset({
pipeline_pb2.NodeExecutionOptions.ALL_UPSTREAM_NODES_COMPLETED,
pipeline_pb2.NodeExecutionOptions.LAZILY_ALL_UPSTREAM_NODES_COMPLETED,
})
The provided code snippet includes necessary dependencies for implementing the `_unrunnable_nodes` function. Write a Python function `def _unrunnable_nodes( node_by_id: collections.OrderedDict[str, node_proto_view.NodeProtoView], failed_node_ids: Set[str], ) -> Set[str]` to solve the following problem:
Returns node_ids of all unrunnable descendant nodes for each member of the given failed_node_ids set.
Here is the function:
def _unrunnable_nodes(
node_by_id: collections.OrderedDict[str, node_proto_view.NodeProtoView],
failed_node_ids: Set[str],
) -> Set[str]:
"""Returns node_ids of all unrunnable descendant nodes for each member of the given failed_node_ids set."""
unrunnable = set()
queue = collections.deque()
for failed_node_id in failed_node_ids:
for node_with_upstream_failure in node_by_id[
failed_node_id
].downstream_nodes:
# Nodes with a upstream success optional trigger strategy can make
# progress despite a failed upstream node.
if (
node_by_id[node_with_upstream_failure].execution_options.strategy
not in _UPSTREAM_SUCCESS_OPTIONAL_STRATEGIES
):
queue.append(node_with_upstream_failure)
while queue:
q_node_id = queue.popleft()
node = node_by_id[q_node_id]
start_node = node.execution_options.resource_lifetime.lifetime_start
if (
node.execution_options.strategy
== pipeline_pb2.NodeExecutionOptions.LIFETIME_END_WHEN_SUBGRAPH_CANNOT_PROGRESS
and not (start_node in failed_node_ids or start_node in unrunnable)
):
logging.info(
'%s is an end node that may still be run since its start node %s'
' was neither failed nor unrunnable. Not marking the end node nor'
' its descendants as unrunnable due to the failures of %s.',
q_node_id,
start_node,
', '.join(failed_node_ids),
)
continue
if q_node_id not in unrunnable:
queue.extend(node_by_id[q_node_id].downstream_nodes)
unrunnable.add(q_node_id)
# Lazy nodes whose descendents are all unrunnable are also unrunnable, so we
# need to add them here.
# We go over the dictionary in reverse order so that lazy nodes that are
# downstream of other lazy nodes are checked in (reverse) order.
for node_id, node in reversed(node_by_id.items()):
if (
node.execution_options.strategy in _LAZY_TRIGGER_STRATEGIES
and node.downstream_nodes
and all(
downstream in unrunnable for downstream in node.downstream_nodes
)
):
unrunnable.add(node_id)
return unrunnable | Returns node_ids of all unrunnable descendant nodes for each member of the given failed_node_ids set. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.