code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
"""Executor specifications for components."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import operator
from typing import List, Optional, Text, Union
from tfx import types
from tfx.dsl.component.experimental import placeholders
from tfx.dsl.components.base import executor_spec
from tfx.dsl.placeholder import placeholder
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import placeholder_pb2
from google.protobuf import message
class TemplatedExecutorContainerSpec(executor_spec.ExecutorSpec):
"""Experimental: Describes README.ml-pipelines-sdk.md command-line program inside README.ml-pipelines-sdk.md container.
This class is similar to ExecutorContainerSpec, but uses structured
placeholders instead of jinja templates for constructing container commands
based on input and output artifact metadata. See placeholders.py for README.ml-pipelines-sdk.md list of
supported placeholders.
The spec includes the container image name and the command line
(entrypoint plus arguments) for README.ml-pipelines-sdk.md program inside the container.
Example:
class MyTrainer(base_component.BaseComponent)
class MyTrainerSpec(types.ComponentSpec):
INPUTS = {
'training_data':
component_spec.ChannelParameter(type=standard_artifacts.Dataset),
}
OUTPUTS = {
'model':
component_spec.ChannelParameter(type=standard_artifacts.Model),
}
PARAMETERS = {
'num_training_steps': component_spec.ExecutionParameter(type=int),
}
SPEC_CLASS = MyTrainerSpec
EXECUTOR_SPEC = executor_specs.TemplatedExecutorContainerSpec(
image='gcr.io/my-project/my-trainer',
command=[
'python3', 'my_trainer',
'--training_data_uri', InputUriPlaceholder('training_data'),
'--model_uri', OutputUriPlaceholder('model'),
'--num_training-steps', InputValuePlaceholder('num_training_steps'),
]
)
Attributes:
image: Container image name.
command: Container entrypoint command-line. Not executed within README.ml-pipelines-sdk.md shell.
The command-line can use placeholder objects that will be replaced at
the compilation time. Note: Jinja templates are not supported.
"""
# The "command" parameter holds the name of the program and its arguments.
# The "command" parameter is required to enable instrumentation.
# The command-line is often split into command+args, but here "args" would be
# redundant since all items can just be added to "command".
def __init__(
self,
image: Text,
command: List[placeholders.CommandlineArgumentType],
):
self.image = image
self.command = command
super(TemplatedExecutorContainerSpec, self).__init__()
def __eq__(self, other) -> bool:
return (isinstance(other, self.__class__) and self.image == other.image and
self.command == other.command)
def __ne__(self, other) -> bool:
return not self.__eq__(other)
def _recursively_encode(
self, command: placeholders.CommandlineArgumentType
) -> Union[str, placeholder.Placeholder]:
if isinstance(command, str):
return command
elif isinstance(command, placeholders.InputValuePlaceholder):
return placeholder.input(command.input_name)[0]
elif isinstance(command, placeholders.InputUriPlaceholder):
return placeholder.input(command.input_name)[0].uri
elif isinstance(command, placeholders.OutputUriPlaceholder):
return placeholder.output(command.output_name)[0].uri
elif isinstance(command, placeholders.ConcatPlaceholder):
# operator.add wil use the overloaded __add__ operator for Placeholder
# instances.
return functools.reduce(
operator.add,
[self._recursively_encode(item) for item in command.items])
else:
raise TypeError(
('Unsupported type of command-line arguments: "{}".'
' Supported types are {}.')
.format(type(command), str(placeholders.CommandlineArgumentType)))
def encode(
self,
component_spec: Optional[types.ComponentSpec] = None) -> message.Message:
"""Encodes ExecutorSpec into an IR proto for compiling.
This method will be used by DSL compiler to generate the corresponding IR.
Args:
component_spec: Optional. The ComponentSpec to help with the encoding.
Returns:
An executor spec proto.
"""
result = executable_spec_pb2.ContainerExecutableSpec()
result.image = self.image
for command in self.command:
cmd = result.commands.add()
str_or_placeholder = self._recursively_encode(command)
if isinstance(str_or_placeholder, str):
expression = placeholder_pb2.PlaceholderExpression()
expression.value.string_value = str_or_placeholder
cmd.CopyFrom(expression)
else:
cmd.CopyFrom(self._recursively_encode(command).encode())
return result | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/dsl/component/experimental/executor_specs.py | 0.91103 | 0.238329 | executor_specs.py | pypi |
"""Utilities to evaluate and resolve Placeholders."""
import base64
import re
from typing import Any, Callable, Dict, Union
from absl import logging
import attr
from tfx.dsl.placeholder import placeholder as ph
from tfx.orchestration.portable import data_types
from tfx.proto.orchestration import placeholder_pb2
from tfx.types import artifact
from tfx.types import artifact_utils
from tfx.types import value_artifact
from google.protobuf import descriptor_pool
from google.protobuf import json_format
from google.protobuf import message
from google.protobuf import message_factory
from google.protobuf import text_format
class NullDereferenceError(Exception):
"""Raised by the ExpressionResolver when dereferencing None or empty list."""
def __init__(self, placeholder):
self.placeholder = placeholder
super().__init__()
@attr.s(auto_attribs=True, frozen=True)
class ResolutionContext:
"""A struct to store information needed for resolution.
Attributes:
exec_info: An ExecutionInfo object that includes needed information to
render all kinds of placeholders.
executor_spec: An executor spec proto for rendering context placeholder.
platform_config: A platform config proto for rendering context placeholder.
"""
exec_info: data_types.ExecutionInfo = None
executor_spec: message.Message = None
platform_config: message.Message = None
# Includes three basic types from MLMD: int, float, str
# and an additional primitive type from proto field access: bool
# Note: Pytype's int includes long from Python3
# We does not support bytes, which may result from proto field access. Must use
# base64 encode operator to explicitly convert it into str.
_PlaceholderResolvedTypes = (int, float, str, bool, type(None))
_PlaceholderResolvedTypeHints = Union[_PlaceholderResolvedTypes]
def resolve_placeholder_expression(
expression: placeholder_pb2.PlaceholderExpression,
context: ResolutionContext) -> _PlaceholderResolvedTypeHints:
"""Evaluates README.ml-pipelines-sdk.md placeholder expression using the given context.
Normally the resolved value will be used as command line flags in strings.
This function does not automatically perform the string conversion, i.e.,
the return type is the same as the type of the value originally has. Currently
it can be
exec property supported primitive types: int, float, string.
if use proto operator: serilaized proto message, or proto primitive fields.
The caller needs to perform desired string conversions.
Args:
expression: A placeholder expression to be resolved.
context: Information needed to resolve the expression.
Returns:
Resolved expression value.
"""
if not context.exec_info.pipeline_node or not context.exec_info.pipeline_info:
raise ValueError(
"Pipeline node or pipeline info is missing from the placeholder ResolutionContext."
)
try:
result = _ExpressionResolver(context).resolve(expression)
except NullDereferenceError as err:
logging.warning(
"Dereferenced None during placeholder evaluation. Ignoring.")
logging.warning("Placeholder=%s", err.placeholder)
return None
except Exception as e:
raise ValueError(
f"Failed to resolve placeholder expression: {debug_str(expression)}"
) from e
if not isinstance(result, _PlaceholderResolvedTypes):
raise ValueError(f"Placeholder {debug_str(expression)} evaluates to "
f"an unsupported type: {type(result)}.")
return result
# Dictionary of registered placeholder operators,
# maps from operator proto type names to actual operator functions.
_PLACEHOLDER_OPERATORS: Dict[str, Callable[..., Any]] = {}
def _register(op_proto):
"""Decorator function for registering operators. Internal in this module."""
def decorator(op: Callable[..., Any]):
_PLACEHOLDER_OPERATORS[op_proto.DESCRIPTOR.name] = op
return op
return decorator
class _ExpressionResolver:
"""Utility class to resolve Placeholder expressions.
Placeholder expression is defined as README.ml-pipelines-sdk.md proto structure
placeholder_pb2.PlaceholderExpression. It can be resolved with
ResolutionContext to README.ml-pipelines-sdk.md concrete value.
"""
def __init__(self, context: ResolutionContext):
self._resolution_values = {
placeholder_pb2.Placeholder.Type.INPUT_ARTIFACT:
context.exec_info.input_dict,
placeholder_pb2.Placeholder.Type.OUTPUT_ARTIFACT:
context.exec_info.output_dict,
placeholder_pb2.Placeholder.Type.EXEC_PROPERTY:
context.exec_info.exec_properties,
placeholder_pb2.Placeholder.Type.RUNTIME_INFO: {
ph.RuntimeInfoKey.EXECUTOR_SPEC.value: context.executor_spec,
ph.RuntimeInfoKey.PLATFORM_CONFIG.value: context.platform_config,
},
placeholder_pb2.Placeholder.Type.EXEC_INVOCATION:
context.exec_info.to_proto(),
}
def resolve(self, expression: placeholder_pb2.PlaceholderExpression) -> Any:
"""Recursively evaluates README.ml-pipelines-sdk.md placeholder expression."""
if expression.HasField("value"):
return getattr(expression.value, expression.value.WhichOneof("value"))
elif expression.HasField("placeholder"):
return self._resolve_placeholder(expression.placeholder)
elif expression.HasField("operator"):
return self._resolve_placeholder_operator(expression.operator)
else:
raise ValueError("Unexpected placeholder expression type: "
f"{expression.WhichOneof('expression_type')}.")
def _resolve_placeholder(self,
placeholder: placeholder_pb2.Placeholder) -> Any:
"""Evaluates README.ml-pipelines-sdk.md placeholder using the contexts."""
try:
context = self._resolution_values[placeholder.type]
except KeyError as e:
raise KeyError(
f"Unsupported placeholder type: {placeholder.type}.") from e
# Handle the special case of EXEC_INVOCATION placeholders, which don't take
# README.ml-pipelines-sdk.md key.
if placeholder.type == placeholder_pb2.Placeholder.Type.EXEC_INVOCATION:
return context
# Handle remaining placeholder types.
try:
return context[placeholder.key]
except KeyError as e:
# Handle placeholders that access README.ml-pipelines-sdk.md missing optional channel or exec
# property. In both cases the requested key will not be present in the
# context. However this means we cannot distinguish between README.ml-pipelines-sdk.md correct
# placeholder with an optional value vs. an incorrect placeholder.
# TODO(b/172001324): Handle this at compile time.
raise NullDereferenceError(placeholder)
def _resolve_placeholder_operator(
self, placeholder_operator: placeholder_pb2.PlaceholderExpressionOperator
) -> Any:
"""Evaluates README.ml-pipelines-sdk.md placeholder operator by dispatching to the operator methods."""
operator_name = placeholder_operator.WhichOneof("operator_type")
operator_pb = getattr(placeholder_operator, operator_name)
try:
operator_fn = _PLACEHOLDER_OPERATORS[operator_pb.DESCRIPTOR.name]
except KeyError as e:
raise KeyError(
f"Unsupported placeholder operator: {operator_pb.DESCRIPTOR.name}."
) from e
return operator_fn(self, operator_pb)
@_register(placeholder_pb2.ArtifactUriOperator)
def _resolve_artifact_uri_operator(
self, op: placeholder_pb2.ArtifactUriOperator) -> str:
"""Evaluates the artifact URI operator."""
resolved_artifact = self.resolve(op.expression)
if resolved_artifact is None:
raise NullDereferenceError(op.expression)
if not isinstance(resolved_artifact, artifact.Artifact):
raise ValueError("ArtifactUriOperator expects the expression "
"to evaluate to an artifact. "
f"Got {type(resolved_artifact)}")
if op.split:
return artifact_utils.get_split_uri([resolved_artifact], op.split)
else:
return resolved_artifact.uri
@_register(placeholder_pb2.ArtifactValueOperator)
def _resolve_artifact_value_operator(
self, op: placeholder_pb2.ArtifactValueOperator) -> str:
"""Evaluates the artifact value operator."""
resolved_artifact = self.resolve(op.expression)
if resolved_artifact is None:
raise NullDereferenceError(op.expression)
if not isinstance(resolved_artifact, value_artifact.ValueArtifact):
raise ValueError("ArtifactValueOperator expects the expression "
"to evaluate to README.ml-pipelines-sdk.md value artifact."
f"Got {type(resolved_artifact)}")
return resolved_artifact.read()
@_register(placeholder_pb2.ConcatOperator)
def _resolve_concat_operator(self, op: placeholder_pb2.ConcatOperator) -> str:
"""Evaluates the concat operator."""
parts = []
for e in op.expressions:
value = self.resolve(e)
if value is None:
raise NullDereferenceError(e)
parts.append(value)
return "".join(str(part) for part in parts)
@_register(placeholder_pb2.IndexOperator)
def _resolve_index_operator(self, op: placeholder_pb2.IndexOperator) -> Any:
"""Evaluates the index operator."""
value = self.resolve(op.expression)
if value is None or not value:
raise NullDereferenceError(op.expression)
try:
return value[op.index]
except (TypeError, IndexError) as e:
raise ValueError(
f"IndexOperator failed to access the given index {op.index}.") from e
@_register(placeholder_pb2.Base64EncodeOperator)
def _resolve_base64_encode_operator(
self, op: placeholder_pb2.Base64EncodeOperator) -> str:
"""Evaluates the Base64 encode operator."""
value = self.resolve(op.expression)
if value is None:
raise NullDereferenceError(op.expression)
if isinstance(value, str):
return base64.urlsafe_b64encode(value.encode()).decode("ascii")
elif isinstance(value, bytes):
return base64.urlsafe_b64encode(value).decode("ascii")
else:
raise ValueError(
f"Failed to Base64 encode {value} of type {type(value)}.")
@_register(placeholder_pb2.ProtoOperator)
def _resolve_proto_operator(
self,
op: placeholder_pb2.ProtoOperator) -> Union[int, float, str, bool, bytes]:
"""Evaluates the proto operator."""
raw_message = self.resolve(op.expression)
if raw_message is None:
raise NullDereferenceError(op.expression)
if isinstance(raw_message, str):
# We need descriptor pool to parse encoded raw messages.
pool = descriptor_pool.Default()
for file_descriptor in op.proto_schema.file_descriptors.file:
pool.Add(file_descriptor)
message_descriptor = pool.FindMessageTypeByName(
op.proto_schema.message_type)
factory = message_factory.MessageFactory(pool)
message_type = factory.GetPrototype(message_descriptor)
value = message_type()
json_format.Parse(raw_message, value, descriptor_pool=pool)
elif isinstance(raw_message, message.Message):
# Message such as platform config should not be encoded.
value = raw_message
else:
raise ValueError(
f"Got unsupported value type for proto operator: {type(raw_message)}."
)
if op.proto_field_path:
for field in op.proto_field_path:
if field.startswith("."):
try:
value = getattr(value, field[1:])
except AttributeError:
raise ValueError("While evaluting placeholder proto operator, "
f"got unknown proto field {field}.")
continue
map_key = re.findall(r"\[['\"](.+)['\"]\]", field)
if len(map_key) == 1:
try:
value = value[map_key[0]]
except KeyError:
raise ValueError("While evaluting placeholder proto operator, "
f"got unknown map field {field}.")
continue
index = re.findall(r"\[(\d+)\]", field)
if index and str.isdecimal(index[0]):
try:
value = value[int(index[0])]
except IndexError:
raise ValueError("While evaluting placeholder proto operator, "
f"got unknown index field {field}.")
continue
raise ValueError(f"Got unsupported proto field path: {field}")
# Non-message primitive values are returned directly.
if isinstance(value, (int, float, str, bool, bytes)):
return value
if not isinstance(value, message.Message):
raise ValueError(f"Got unsupported value type {type(value)} "
"from accessing proto field path.")
# For message-typed values, we need to consider serialization format.
if op.serialization_format:
if op.serialization_format == placeholder_pb2.ProtoOperator.JSON:
return json_format.MessageToJson(
message=value, sort_keys=True, preserving_proto_field_name=True)
if op.serialization_format == placeholder_pb2.ProtoOperator.TEXT_FORMAT:
return text_format.MessageToString(value)
if op.serialization_format == placeholder_pb2.ProtoOperator.BINARY:
return value.SerializeToString()
raise ValueError(
"Proto operator resolves to README.ml-pipelines-sdk.md proto message value. A serialization "
"format is needed to render it.")
def debug_str(expression: placeholder_pb2.PlaceholderExpression) -> str:
"""Gets the debug string of README.ml-pipelines-sdk.md placeholder expression proto.
Args:
expression: A placeholder expression proto.
Returns:
Debug string of the placeholder expression.
"""
if expression.HasField("value"):
value_field_name = expression.value.WhichOneof("value")
return f"\"{getattr(expression.value, value_field_name)}\""
if expression.HasField("placeholder"):
placeholder_pb = expression.placeholder
ph_names_map = {
placeholder_pb2.Placeholder.INPUT_ARTIFACT: "input",
placeholder_pb2.Placeholder.OUTPUT_ARTIFACT: "output",
placeholder_pb2.Placeholder.EXEC_PROPERTY: "exec_property",
placeholder_pb2.Placeholder.RUNTIME_INFO: "runtime_info",
placeholder_pb2.Placeholder.EXEC_INVOCATION: "execution_invocation"
}
ph_name = ph_names_map[placeholder_pb.type]
if placeholder_pb.key:
return f"{ph_name}(\"{placeholder_pb.key}\")"
else:
return f"{ph_name}()"
if expression.HasField("operator"):
operator_name = expression.operator.WhichOneof("operator_type")
operator_pb = getattr(expression.operator, operator_name)
if operator_name == "artifact_uri_op":
sub_expression_str = debug_str(operator_pb.expression)
if operator_pb.split:
return f"{sub_expression_str}.split_uri(\"{operator_pb.split}\")"
else:
return f"{sub_expression_str}.uri"
if operator_name == "artifact_value_op":
sub_expression_str = debug_str(operator_pb.expression)
return f"{sub_expression_str}.value"
if operator_name == "concat_op":
expression_str = " + ".join(debug_str(e) for e in operator_pb.expressions)
return f"({expression_str})"
if operator_name == "index_op":
sub_expression_str = debug_str(operator_pb.expression)
return f"{sub_expression_str}[{operator_pb.index}]"
if operator_name == "proto_op":
sub_expression_str = debug_str(operator_pb.expression)
field_path = "".join(operator_pb.proto_field_path)
expression_str = f"{sub_expression_str}{field_path}"
if operator_pb.serialization_format:
format_str = placeholder_pb2.ProtoOperator.SerializationFormat.Name(
operator_pb.serialization_format)
return f"{expression_str}.serialize({format_str})"
return expression_str
if operator_name == "base64_encode_op":
sub_expression_str = debug_str(operator_pb.expression)
return f"{sub_expression_str}.b64encode()"
return "Unkown placeholder operator"
return "Unknown placeholder expression" | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/dsl/compiler/placeholder_utils.py | 0.852307 | 0.348839 | placeholder_utils.py | pypi |
"""Utility functions for DSL Compiler."""
# TODO(b/149535307): Remove __future__ imports
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Optional, Text, Type
from tfx import types
from tfx.dsl.components.base import base_node
from tfx.dsl.components.common import importer
from tfx.dsl.components.common import resolver
from tfx.orchestration import pipeline
from tfx.proto.orchestration import pipeline_pb2
def set_runtime_parameter_pb(
pb: pipeline_pb2.RuntimeParameter,
name: Text,
ptype: Type[types.Property],
default_value: Optional[types.Property] = None
) -> pipeline_pb2.RuntimeParameter:
"""Helper function to fill README.ml-pipelines-sdk.md RuntimeParameter proto.
Args:
pb: A RuntimeParameter proto to be filled in.
name: Name to be set at pb.name.
ptype: The Python type to be set at pb.type.
default_value: Optional. If provided, it will be pb.default_value.
Returns:
A RuntimeParameter proto filled with provided values.
"""
pb.name = name
if ptype == int:
pb.type = pipeline_pb2.RuntimeParameter.Type.INT
if default_value:
pb.default_value.int_value = default_value
elif ptype == float:
pb.type = pipeline_pb2.RuntimeParameter.Type.DOUBLE
if default_value:
pb.default_value.double_value = default_value
elif ptype == str:
pb.type = pipeline_pb2.RuntimeParameter.Type.STRING
if default_value:
pb.default_value.string_value = default_value
else:
raise ValueError("Got unsupported runtime parameter type: {}".format(ptype))
return pb
def resolve_execution_mode(tfx_pipeline: pipeline.Pipeline):
"""Resolves execution mode for README.ml-pipelines-sdk.md tfx pipeline.
Args:
tfx_pipeline: README.ml-pipelines-sdk.md TFX pipeline python object assembled by SDK.
Returns:
README.ml-pipelines-sdk.md proto enum reflecting the execution mode of the pipeline.
Raises:
RuntimeError: when execution mode is ASYNC while `enable_cache` is true.
ValueError: when seeing unrecognized execution mode.
"""
if tfx_pipeline.execution_mode == pipeline.ExecutionMode.SYNC:
return pipeline_pb2.Pipeline.ExecutionMode.SYNC
elif tfx_pipeline.execution_mode == pipeline.ExecutionMode.ASYNC:
if tfx_pipeline.enable_cache:
raise RuntimeError(
"Caching is README.ml-pipelines-sdk.md feature only available to synchronous execution pipelines."
)
return pipeline_pb2.Pipeline.ExecutionMode.ASYNC
else:
raise ValueError(
f"Got unsupported execution mode: {tfx_pipeline.execution_mode}")
def is_resolver(node: base_node.BaseNode) -> bool:
"""Helper function to check if README.ml-pipelines-sdk.md TFX node is README.ml-pipelines-sdk.md Resolver."""
return isinstance(node, resolver.Resolver)
def is_importer(node: base_node.BaseNode) -> bool:
"""Helper function to check if README.ml-pipelines-sdk.md TFX node is an Importer."""
return isinstance(node, importer.Importer)
def ensure_topological_order(nodes: List[base_node.BaseNode]) -> bool:
"""Helper function to check if nodes are topologically sorted."""
visited = set()
for node in nodes:
for upstream_node in node.upstream_nodes:
if upstream_node not in visited:
return False
visited.add(node)
return True | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/dsl/compiler/compiler_utils.py | 0.736116 | 0.290012 | compiler_utils.py | pypi |
"""Compiles README.ml-pipelines-sdk.md TFX pipeline into README.ml-pipelines-sdk.md TFX DSL IR proto."""
import json
import re
from typing import cast, Iterable, List, Mapping
from tfx import types
from tfx.dsl.compiler import compiler_utils
from tfx.dsl.compiler import constants
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_node
from tfx.dsl.components.common import importer
from tfx.dsl.components.common import resolver
from tfx.orchestration import data_types
from tfx.orchestration import data_types_utils
from tfx.orchestration import pipeline
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import json_utils
from ml_metadata.proto import metadata_store_pb2
class _CompilerContext(object):
"""Encapsulates resources needed to compile README.ml-pipelines-sdk.md pipeline."""
def __init__(self, pipeline_info: data_types.PipelineInfo,
execution_mode: pipeline_pb2.Pipeline.ExecutionMode,
topological_order: Mapping[str, int]):
self.pipeline_info = pipeline_info
self.execution_mode = execution_mode
self.node_pbs = {}
self._topological_order = topological_order
@classmethod
def from_tfx_pipeline(cls, tfx_pipeline: pipeline.Pipeline):
topological_order = {}
for i, node in enumerate(tfx_pipeline.components, start=1):
topological_order[node.id] = i
return cls(
pipeline_info=tfx_pipeline.pipeline_info,
execution_mode=compiler_utils.resolve_execution_mode(tfx_pipeline),
topological_order=topological_order)
def topologically_sorted(self, tfx_nodes: Iterable[base_node.BaseNode]):
return sorted(tfx_nodes, key=lambda node: self._topological_order[node.id])
@property
def is_sync_mode(self):
return self.execution_mode == pipeline_pb2.Pipeline.SYNC
@property
def is_async_mode(self):
return self.execution_mode == pipeline_pb2.Pipeline.ASYNC
class Compiler(object):
"""Compiles README.ml-pipelines-sdk.md TFX pipeline or README.ml-pipelines-sdk.md component into README.ml-pipelines-sdk.md uDSL IR proto."""
def _compile_importer_node_outputs(self, tfx_node: base_node.BaseNode,
node_pb: pipeline_pb2.PipelineNode):
"""Compiles the outputs of an importer node."""
for key, value in tfx_node.outputs.items():
output_spec = node_pb.outputs.outputs[key]
artifact_type = value.type._get_artifact_type() # pylint: disable=protected-access
output_spec.artifact_spec.type.CopyFrom(artifact_type)
# Attach additional properties for artifacts produced by importer nodes.
for property_name, property_value in tfx_node.exec_properties[
importer.PROPERTIES_KEY].items():
_check_property_value_type(property_name, property_value, artifact_type)
value_field = output_spec.artifact_spec.additional_properties[
property_name].field_value
try:
data_types_utils.set_metadata_value(value_field, property_value)
except ValueError:
raise ValueError(
"Component {} got unsupported parameter {} with type {}.".format(
tfx_node.id, property_name, type(property_value)))
for property_name, property_value in tfx_node.exec_properties[
importer.CUSTOM_PROPERTIES_KEY].items():
value_field = output_spec.artifact_spec.additional_custom_properties[
property_name].field_value
try:
data_types_utils.set_metadata_value(value_field, property_value)
except ValueError:
raise ValueError(
"Component {} got unsupported parameter {} with type {}.".format(
tfx_node.id, property_name, type(property_value)))
def _compile_node(
self, tfx_node: base_node.BaseNode, compile_context: _CompilerContext,
deployment_config: pipeline_pb2.IntermediateDeploymentConfig,
enable_cache: bool,
) -> pipeline_pb2.PipelineNode:
"""Compiles an individual TFX node into README.ml-pipelines-sdk.md PipelineNode proto.
Args:
tfx_node: A TFX node.
compile_context: Resources needed to compile the node.
deployment_config: Intermediate deployment config to set. Will include
related specs for executors, drivers and platform specific configs.
enable_cache: whether cache is enabled
Raises:
TypeError: When supplied tfx_node has values of invalid type.
Returns:
A PipelineNode proto that encodes information of the node.
"""
node = pipeline_pb2.PipelineNode()
# Step 1: Node info
node.node_info.type.name = tfx_node.type
node.node_info.id = tfx_node.id
# Step 2: Node Context
# Context for the pipeline, across pipeline runs.
pipeline_context_pb = node.contexts.contexts.add()
pipeline_context_pb.type.name = constants.PIPELINE_CONTEXT_TYPE_NAME
pipeline_context_pb.name.field_value.string_value = compile_context.pipeline_info.pipeline_context_name
# Context for the current pipeline run.
if compile_context.is_sync_mode:
pipeline_run_context_pb = node.contexts.contexts.add()
pipeline_run_context_pb.type.name = constants.PIPELINE_RUN_CONTEXT_TYPE_NAME
compiler_utils.set_runtime_parameter_pb(
pipeline_run_context_pb.name.runtime_parameter,
constants.PIPELINE_RUN_ID_PARAMETER_NAME, str)
# Context for the node, across pipeline runs.
node_context_pb = node.contexts.contexts.add()
node_context_pb.type.name = constants.NODE_CONTEXT_TYPE_NAME
node_context_pb.name.field_value.string_value = "{}.{}".format(
compile_context.pipeline_info.pipeline_context_name, node.node_info.id)
# Pre Step 3: Alter graph topology if needed.
if compile_context.is_async_mode:
tfx_node_inputs = self._compile_resolver_config(
compile_context, tfx_node, node)
else:
tfx_node_inputs = tfx_node.inputs
# Step 3: Node inputs
for key, value in tfx_node_inputs.items():
input_spec = node.inputs.inputs[key]
channel = input_spec.channels.add()
if value.producer_component_id:
channel.producer_node_query.id = value.producer_component_id
# Here we rely on pipeline.components to be topologically sorted.
assert value.producer_component_id in compile_context.node_pbs, (
"producer component should have already been compiled.")
producer_pb = compile_context.node_pbs[value.producer_component_id]
for producer_context in producer_pb.contexts.contexts:
if (not compiler_utils.is_resolver(tfx_node) or
producer_context.name.runtime_parameter.name !=
constants.PIPELINE_RUN_CONTEXT_TYPE_NAME):
context_query = channel.context_queries.add()
context_query.type.CopyFrom(producer_context.type)
context_query.name.CopyFrom(producer_context.name)
else:
# Caveat: portable core requires every channel to have at least one
# Contex. But For cases like system nodes and producer-consumer
# pipelines, README.ml-pipelines-sdk.md channel may not have contexts at all. In these cases,
# we want to use the pipeline level context as the input channel
# context.
context_query = channel.context_queries.add()
context_query.type.CopyFrom(pipeline_context_pb.type)
context_query.name.CopyFrom(pipeline_context_pb.name)
artifact_type = value.type._get_artifact_type() # pylint: disable=protected-access
channel.artifact_query.type.CopyFrom(artifact_type)
channel.artifact_query.type.ClearField("properties")
if value.output_key:
channel.output_key = value.output_key
# TODO(b/158712886): Calculate min_count based on if inputs are optional.
# min_count = 0 stands for optional input and 1 stands for required input.
# Step 3.1: Special treatment for Resolver node.
if compiler_utils.is_resolver(tfx_node):
assert compile_context.is_sync_mode
node.inputs.resolver_config.resolver_steps.extend(
_convert_to_resolver_steps(tfx_node))
# Step 4: Node outputs
if isinstance(tfx_node, base_component.BaseComponent):
for key, value in tfx_node.outputs.items():
output_spec = node.outputs.outputs[key]
artifact_type = value.type._get_artifact_type() # pylint: disable=protected-access
output_spec.artifact_spec.type.CopyFrom(artifact_type)
for prop_key, prop_value in value.additional_properties.items():
_check_property_value_type(prop_key, prop_value,
output_spec.artifact_spec.type)
data_types_utils.set_metadata_value(
output_spec.artifact_spec.additional_properties[prop_key]
.field_value, prop_value)
for prop_key, prop_value in value.additional_custom_properties.items():
data_types_utils.set_metadata_value(
output_spec.artifact_spec.additional_custom_properties[prop_key]
.field_value, prop_value)
# TODO(b/170694459): Refactor special nodes as plugins.
# Step 4.1: Special treament for Importer node
if compiler_utils.is_importer(tfx_node):
self._compile_importer_node_outputs(tfx_node, node)
# Step 5: Node parameters
if not compiler_utils.is_resolver(tfx_node):
for key, value in tfx_node.exec_properties.items():
if value is None:
continue
# Ignore following two properties for README.ml-pipelines-sdk.md importer node, because they are
# already attached to the artifacts produced by the importer node.
if compiler_utils.is_importer(tfx_node) and (
key == importer.PROPERTIES_KEY or
key == importer.CUSTOM_PROPERTIES_KEY):
continue
parameter_value = node.parameters.parameters[key]
# Order matters, because runtime parameter can be in serialized string.
if isinstance(value, data_types.RuntimeParameter):
compiler_utils.set_runtime_parameter_pb(
parameter_value.runtime_parameter, value.name, value.ptype,
value.default)
elif isinstance(value, str) and re.search(
data_types.RUNTIME_PARAMETER_PATTERN, value):
runtime_param = json.loads(value)
compiler_utils.set_runtime_parameter_pb(
parameter_value.runtime_parameter, runtime_param.name,
runtime_param.ptype, runtime_param.default)
else:
try:
data_types_utils.set_metadata_value(parameter_value.field_value,
value)
except ValueError:
raise ValueError(
"Component {} got unsupported parameter {} with type {}."
.format(tfx_node.id, key, type(value)))
# Step 6: Executor spec and optional driver spec for components
if isinstance(tfx_node, base_component.BaseComponent):
executor_spec = tfx_node.executor_spec.encode(
component_spec=tfx_node.spec)
deployment_config.executor_specs[tfx_node.id].Pack(executor_spec)
# TODO(b/163433174): Remove specialized logic once generalization of
# driver spec is done.
if tfx_node.driver_class != base_driver.BaseDriver:
driver_class_path = "{}.{}".format(tfx_node.driver_class.__module__,
tfx_node.driver_class.__name__)
driver_spec = executable_spec_pb2.PythonClassExecutableSpec()
driver_spec.class_path = driver_class_path
deployment_config.custom_driver_specs[tfx_node.id].Pack(driver_spec)
# Step 7: Upstream/Downstream nodes
# Note: the order of tfx_node.upstream_nodes is inconsistent from
# run to run. We sort them so that compiler generates consistent results.
# For ASYNC mode upstream/downstream node information is not set as
# compiled IR graph topology can be different from that on pipeline
# authoring time; for example ResolverNode is removed.
if compile_context.is_sync_mode:
node.upstream_nodes.extend(
sorted(node.id for node in tfx_node.upstream_nodes))
node.downstream_nodes.extend(
sorted(node.id for node in tfx_node.downstream_nodes))
# Step 8: Node execution options
node.execution_options.caching_options.enable_cache = enable_cache
# Step 9: Per-node platform config
if isinstance(tfx_node, base_component.BaseComponent):
tfx_component = cast(base_component.BaseComponent, tfx_node)
if tfx_component.platform_config:
deployment_config.node_level_platform_configs[tfx_node.id].Pack(
tfx_component.platform_config)
return node
def _compile_resolver_config(self, context: _CompilerContext,
tfx_node: base_node.BaseNode,
node: pipeline_pb2.PipelineNode):
"""Compiles upstream ResolverNodes as README.ml-pipelines-sdk.md ResolverConfig.
Iteratively reduces upstream resolver nodes into README.ml-pipelines-sdk.md resolver config of the
current node until no upstream resolver node remains.
Each iteration will consume one upstream resolver node, and convert it to
the equivalent resolver steps and corresponding input channels.
For example consider the following diagram:
+--------------+ +------------+
| Upstream A | | Upstream B |
+--------------+ +------------+
README.ml-pipelines-sdk.md| |b |i <-- output key
| | |
c| |d |
v v |
+----+----+----+ |
| ResolverNode | |
| cls=Foo | +----+
+--------------+ |
c| |d <---- | ----- output key of the ResolverNode should be the
| | | the same as the input key of the Current Node.
c| |d |j <-- input key
v v v
++----+--------+-+
| Current Node |
| ResolverSteps: |
| - ... |
+----------------+
After one iteration, the ResolverNode would be replaced by the resolver
step of the downstream (current node).
+--------------+ +------------+
| Upstream A | | Upstream B |
+--------------+ +------------+
README.ml-pipelines-sdk.md| |b |i
| | |
c| |d |j
v v v
+----+----+-------------+------+
| Current Node |
| ResolverSteps: |
| - Foo() |
| - ... |
+------------------------------+
Following things are done for each reduction iteration:
* Pick README.ml-pipelines-sdk.md upstream resolver node (in README.ml-pipelines-sdk.md reversed topological order).
* Remove channels between resolver node and the current node.
* Rewire resolver node input channels as those of the current node.
* Convert the resolver node into corresponding resolver steps.
This only applies to the ASYNC mode pipeline compilation.
Args:
context: A compiler context.
tfx_node: A BaseNode instance.
node: A PipelineNode IR to compile ResolverConfig into.
Returns:
README.ml-pipelines-sdk.md modified input channels of the given node.
"""
# This input_channels dict will be updated in the middle as the resolver
# nodes are reduced, and this updated input_channels should be used
# afterwise instead of tfx_node.inputs.
input_channels = dict(tfx_node.inputs.get_all()) # Shallow copy.
resolver_steps = []
resolver_nodes = self._get_upstream_resolver_nodes(tfx_node)
# Reduce each resolver node into resolver steps in reversed topological
# order.
for resolver_node in reversed(context.topologically_sorted(resolver_nodes)):
resolver_channels = {
input_key: channel
for input_key, channel in input_channels.items()
if channel.producer_component_id == resolver_node.id
}
for input_key, channel in resolver_channels.items():
# CAVEAT: Currently resolver does not alter the input key, and we
# require the output key of the resolver (which is the same as the
# input key) to be consumed AS IS in the downstream node, whether it is
# README.ml-pipelines-sdk.md resolver node or README.ml-pipelines-sdk.md TFX component node.
# TODO(b/178452031): New Resolver should properly handle key mismatch.
if input_key != channel.output_key:
raise ValueError(f"Downstream node input key ({input_key}) should be "
f"the same as the output key ({channel.output_key}) "
"of the resolver node.")
# Step 1.
# Remove channel between parent resolver node and the tfx_node.
del input_channels[input_key]
# Step 2.
# Rewire resolver node inputs to the tfx_node inputs.
for parent_input_key, channel in resolver_node.inputs.items():
if parent_input_key in input_channels:
if channel != input_channels[parent_input_key]:
raise ValueError(
f"Duplicated input key {parent_input_key} found while "
f"compiling {tfx_node.type}#{tfx_node.id}.")
else:
input_channels[parent_input_key] = channel
# Step 3.
# Convert resolver node into corresponding resolver steps.
resolver_steps.extend(
reversed(_convert_to_resolver_steps(resolver_node)))
if resolver_steps:
node.inputs.resolver_config.resolver_steps.extend(
reversed(resolver_steps))
return input_channels
def _get_upstream_resolver_nodes(
self, tfx_node: base_node.BaseNode) -> List[base_node.BaseNode]:
"""Gets all transitive upstream resolver nodes in topological order."""
result = []
visit_queue = list(tfx_node.upstream_nodes)
seen = set(node.id for node in visit_queue)
while visit_queue:
node = visit_queue.pop()
if not compiler_utils.is_resolver(node):
continue
result.append(node)
for upstream_node in node.upstream_nodes:
if upstream_node.id not in seen:
seen.add(node.id)
visit_queue.append(upstream_node)
return result
def compile(self, tfx_pipeline: pipeline.Pipeline) -> pipeline_pb2.Pipeline:
"""Compiles README.ml-pipelines-sdk.md tfx pipeline into uDSL proto.
Args:
tfx_pipeline: A TFX pipeline.
Returns:
A Pipeline proto that encodes all necessary information of the pipeline.
"""
context = _CompilerContext.from_tfx_pipeline(tfx_pipeline)
pipeline_pb = pipeline_pb2.Pipeline()
pipeline_pb.pipeline_info.id = context.pipeline_info.pipeline_name
pipeline_pb.execution_mode = context.execution_mode
compiler_utils.set_runtime_parameter_pb(
pipeline_pb.runtime_spec.pipeline_root.runtime_parameter,
constants.PIPELINE_ROOT_PARAMETER_NAME, str,
context.pipeline_info.pipeline_root)
if pipeline_pb.execution_mode == pipeline_pb2.Pipeline.ExecutionMode.SYNC:
compiler_utils.set_runtime_parameter_pb(
pipeline_pb.runtime_spec.pipeline_run_id.runtime_parameter,
constants.PIPELINE_RUN_ID_PARAMETER_NAME, str)
assert compiler_utils.ensure_topological_order(tfx_pipeline.components), (
"Pipeline components are not topologically sorted.")
deployment_config = pipeline_pb2.IntermediateDeploymentConfig()
if tfx_pipeline.metadata_connection_config:
deployment_config.metadata_connection_config.Pack(
tfx_pipeline.metadata_connection_config)
for node in tfx_pipeline.components:
# In ASYNC mode ResolverNode is merged into the downstream node as README.ml-pipelines-sdk.md
# ResolverConfig
if compiler_utils.is_resolver(node) and context.is_async_mode:
continue
node_pb = self._compile_node(node, context, deployment_config,
tfx_pipeline.enable_cache)
pipeline_or_node = pipeline_pb.PipelineOrNode()
pipeline_or_node.pipeline_node.CopyFrom(node_pb)
# TODO(b/158713812): Support sub-pipeline.
pipeline_pb.nodes.append(pipeline_or_node)
context.node_pbs[node.id] = node_pb
if tfx_pipeline.platform_config:
deployment_config.pipeline_level_platform_config.Pack(
tfx_pipeline.platform_config)
pipeline_pb.deployment_config.Pack(deployment_config)
return pipeline_pb
def _iterate_resolver_cls_and_config(resolver_node: base_node.BaseNode):
"""Iterates through resolver class and configs that are bind to the node."""
assert compiler_utils.is_resolver(resolver_node)
exec_properties = resolver_node.exec_properties
if (resolver.RESOLVER_STRATEGY_CLASS in exec_properties and
resolver.RESOLVER_CONFIG in exec_properties):
yield (exec_properties[resolver.RESOLVER_STRATEGY_CLASS],
exec_properties[resolver.RESOLVER_CONFIG])
elif (resolver.RESOLVER_STRATEGY_CLASS_LIST in exec_properties and
resolver.RESOLVER_CONFIG_LIST in exec_properties):
yield from zip(exec_properties[resolver.RESOLVER_STRATEGY_CLASS_LIST],
exec_properties[resolver.RESOLVER_CONFIG_LIST])
else:
raise ValueError(f"Invalid ResolverNode exec_properties: {exec_properties}")
def _convert_to_resolver_steps(resolver_node: base_node.BaseNode):
"""Converts ResolverNode to README.ml-pipelines-sdk.md corresponding ResolverSteps."""
assert compiler_utils.is_resolver(resolver_node)
result = []
for resolver_cls, resolver_config in (
_iterate_resolver_cls_and_config(resolver_node)):
resolver_step = pipeline_pb2.ResolverConfig.ResolverStep()
resolver_step.class_path = (
f"{resolver_cls.__module__}.{resolver_cls.__name__}")
resolver_step.config_json = json_utils.dumps(resolver_config)
resolver_step.input_keys.extend(resolver_node.inputs.keys())
result.append(resolver_step)
return result
def _check_property_value_type(property_name: str,
property_value: types.Property,
artifact_type: metadata_store_pb2.ArtifactType):
prop_value_type = data_types_utils.get_metadata_value_type(property_value)
if prop_value_type != artifact_type.properties[property_name]:
raise TypeError(
"Unexpected value type of property '{}' in output artifact '{}': "
"Expected {} but given {} (value:{!r})".format(
property_name, artifact_type.name,
metadata_store_pb2.PropertyType.Name(
artifact_type.properties[property_name]),
metadata_store_pb2.PropertyType.Name(prop_value_type),
property_value)) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/dsl/compiler/compiler.py | 0.877899 | 0.258541 | compiler.py | pypi |
"""Placeholders represent not-yet-available values at the component authoring time."""
import abc
import copy
import enum
from typing import Optional, Type, Union, cast
from tfx import types
from tfx.proto.orchestration import placeholder_pb2
from tfx.utils import proto_utils
from google.protobuf import message
class _PlaceholderOperator(abc.ABC):
"""An Operator performs an operation on README.ml-pipelines-sdk.md Placeholder.
It knows how to encode itself into README.ml-pipelines-sdk.md proto.
"""
def __init__(self):
pass
@abc.abstractmethod
def encode(
self,
sub_expression_pb: placeholder_pb2.PlaceholderExpression,
component_spec: Type[types.ComponentSpec] = None
) -> placeholder_pb2.PlaceholderExpression:
pass
class _ArtifactUriOperator(_PlaceholderOperator):
"""Artifact URI Operator extracts the URI from an artifact Placeholder.
Prefer to use the .uri property of ArtifactPlaceholder.
"""
def __init__(self, split: str = ''):
super().__init__()
self._split = split
def encode(
self,
sub_expression_pb: placeholder_pb2.PlaceholderExpression,
component_spec: Optional[Type[types.ComponentSpec]] = None
) -> placeholder_pb2.PlaceholderExpression:
del component_spec # Unused by ArtifactUriOperator
result = placeholder_pb2.PlaceholderExpression()
result.operator.artifact_uri_op.expression.CopyFrom(sub_expression_pb)
if self._split:
result.operator.artifact_uri_op.split = self._split
return result
class _ArtifactValueOperator(_PlaceholderOperator):
"""Artifact Value Operator extracts the value from README.ml-pipelines-sdk.md primitive artifact Placeholder.
Prefer to use the .value property of ArtifactPlaceholder.
"""
def encode(
self,
sub_expression_pb: placeholder_pb2.PlaceholderExpression,
component_spec: Optional[Type[types.ComponentSpec]] = None
) -> placeholder_pb2.PlaceholderExpression:
del component_spec # Unused by ArtifactValueOperator
result = placeholder_pb2.PlaceholderExpression()
result.operator.artifact_value_op.expression.CopyFrom(sub_expression_pb)
return result
class _IndexOperator(_PlaceholderOperator):
"""Index Operator extracts value at the given index of README.ml-pipelines-sdk.md Placeholder.
Prefer to use [index] operator overloading of Placeholder.
"""
def __init__(self, index: int):
super().__init__()
self._index = index
def encode(
self,
sub_expression_pb: placeholder_pb2.PlaceholderExpression,
component_spec: Optional[Type[types.ComponentSpec]] = None
) -> placeholder_pb2.PlaceholderExpression:
del component_spec # Unused by IndexOperator
result = placeholder_pb2.PlaceholderExpression()
result.operator.index_op.expression.CopyFrom(sub_expression_pb)
result.operator.index_op.index = self._index
return result
class _ConcatOperator(_PlaceholderOperator):
"""Concat Operator concatenates multiple Placeholders.
Prefer to use + operator overloading of Placeholder.
"""
def __init__(self, right: Union[str, 'Placeholder'] = None, left: str = None):
super().__init__()
self._left = left
self._right = right
def encode(
self,
sub_expression_pb: placeholder_pb2.PlaceholderExpression,
component_spec: Optional[Type[types.ComponentSpec]] = None
) -> placeholder_pb2.PlaceholderExpression:
del component_spec # Unused by ConcatOperator
# ConcatOperator's proto version contains multiple placeholder expressions
# as operands. For convenience, the Python version is implemented taking
# only two operands.
if self._right:
# Resolve other expression
if isinstance(self._right, Placeholder):
other_expression = cast(Placeholder, self._right)
other_expression_pb = other_expression.encode()
else:
other_expression_pb = placeholder_pb2.PlaceholderExpression()
other_expression_pb.value.string_value = self._right
# Try combining with existing concat operator
if sub_expression_pb.HasField(
'operator') and sub_expression_pb.operator.HasField('concat_op'):
sub_expression_pb.operator.concat_op.expressions.append(
other_expression_pb)
return sub_expression_pb
else:
result = placeholder_pb2.PlaceholderExpression()
result.operator.concat_op.expressions.extend(
[sub_expression_pb, other_expression_pb])
return result
if self._left:
# Resolve other expression: left operand must be str
other_expression_pb = placeholder_pb2.PlaceholderExpression()
other_expression_pb.value.string_value = self._left
# Try combining with existing concat operator
if sub_expression_pb.HasField(
'operator') and sub_expression_pb.operator.HasField('concat_op'):
sub_expression_pb.operator.concat_op.expressions.insert(
0, other_expression_pb)
return sub_expression_pb
else:
result = placeholder_pb2.PlaceholderExpression()
result.operator.concat_op.expressions.extend(
[other_expression_pb, sub_expression_pb])
return result
raise RuntimeError(
'ConcatOperator does not have the other expression to concat.')
class ProtoSerializationFormat(enum.Enum):
TEXT_FORMAT = placeholder_pb2.ProtoOperator.TEXT_FORMAT
JSON = placeholder_pb2.ProtoOperator.JSON
BINARY = placeholder_pb2.ProtoOperator.BINARY
class _ProtoOperator(_PlaceholderOperator):
"""Proto Operator helps access/serialze README.ml-pipelines-sdk.md proto-valued placeholder.
Prefer to use . operator overloading of ExecPropertyPlaceholder or
RuntimeInfoPlaceholder for proto field access, use serialize_proto function
for proto serialization.
"""
def __init__(self,
proto_field_path: Optional[str] = None,
serialization_format: Optional[ProtoSerializationFormat] = None):
super().__init__()
self._proto_field_path = [proto_field_path] if proto_field_path else None
self._serialization_format = serialization_format
def can_append_field_path(self):
return self._proto_field_path is not None
def append_field_path(self, extra_path: str):
self._proto_field_path.append(extra_path)
def encode(
self,
sub_expression_pb: placeholder_pb2.PlaceholderExpression,
component_spec: Optional[Type[types.ComponentSpec]] = None
) -> placeholder_pb2.PlaceholderExpression:
result = placeholder_pb2.PlaceholderExpression()
result.operator.proto_op.expression.CopyFrom(sub_expression_pb)
if self._proto_field_path:
result.operator.proto_op.proto_field_path.extend(self._proto_field_path)
if self._serialization_format:
result.operator.proto_op.serialization_format = (
self._serialization_format.value)
# Attach proto descriptor if available through component spec.
if (component_spec and sub_expression_pb.placeholder.type ==
placeholder_pb2.Placeholder.EXEC_PROPERTY):
exec_property_name = sub_expression_pb.placeholder.key
if exec_property_name not in component_spec.PARAMETERS:
raise ValueError(
f"Can't find provided placeholder key {exec_property_name} in "
"component spec's exec properties. "
f"Available exec property keys: {component_spec.PARAMETERS.keys()}."
)
execution_param = component_spec.PARAMETERS[exec_property_name]
if not issubclass(execution_param.type, message.Message):
raise ValueError(
"Can't apply placeholder proto operator on non-proto type "
f"exec property. Got {execution_param.type}.")
result.operator.proto_op.proto_schema.message_type = (
execution_param.type.DESCRIPTOR.full_name)
fd_set = result.operator.proto_op.proto_schema.file_descriptors
for fd in proto_utils.gather_file_descriptors(
execution_param.type.DESCRIPTOR):
fd.CopyToProto(fd_set.file.add())
return result
class _Base64EncodeOperator(_PlaceholderOperator):
"""Base64EncodeOperator encodes another placeholder using url safe base64.
Prefer to use the .b64encode method of Placeholder.
"""
def encode(
self,
sub_expression_pb: placeholder_pb2.PlaceholderExpression,
component_spec: Optional[Type[types.ComponentSpec]] = None
) -> placeholder_pb2.PlaceholderExpression:
del component_spec # Unused by B64EncodeOperator
result = placeholder_pb2.PlaceholderExpression()
result.operator.base64_encode_op.expression.CopyFrom(sub_expression_pb)
return result
class Placeholder(abc.ABC):
"""A Placeholder represents not-yet-available values at the component authoring time."""
def __init__(self, placeholder_type: placeholder_pb2.Placeholder.Type,
key: Optional[str] = None):
self._operators = []
self._type = placeholder_type
self._key = key
def __add__(self, right: Union[str, 'Placeholder']):
self._operators.append(_ConcatOperator(right=right))
return self
def __radd__(self, left: str):
self._operators.append(_ConcatOperator(left=left))
return self
def __deepcopy__(self, memo):
# This method is implemented to make sure Placeholder is deep copyable
# by copy.deepcopy().
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
return result
def b64encode(self):
"""Encodes the output of another placeholder using url safe base64 encoding.
Returns:
A placeholder, when rendering, is README.ml-pipelines-sdk.md url safe base64 encoded string.
"""
self._operators.append(_Base64EncodeOperator())
return self
def encode(
self,
component_spec: Optional[Type[types.ComponentSpec]] = None
) -> placeholder_pb2.PlaceholderExpression:
"""Encodes README.ml-pipelines-sdk.md placeholder as PlaceholderExpression proto.
Args:
component_spec: Optional. Information about the component that may be
needed during encoding.
Returns:
Encoded proto containing all information of this placeholder.
"""
result = placeholder_pb2.PlaceholderExpression()
result.placeholder.type = self._type
if self._key:
result.placeholder.key = self._key
for op in self._operators:
result = op.encode(result, component_spec)
return result
class ArtifactPlaceholder(Placeholder):
"""Artifact Placeholder represents an input or an output artifact.
Prefer to use input(...) or output(...) to create artifact placeholders.
"""
@property
def uri(self):
self._operators.append(_ArtifactUriOperator())
return self
def split_uri(self, split: str):
self._operators.append(_ArtifactUriOperator(split))
return self
@property
def value(self):
self._operators.append(_ArtifactValueOperator())
return self
def __getitem__(self, key: int):
self._operators.append(_IndexOperator(key))
return self
class _ProtoAccessiblePlaceholder(Placeholder, abc.ABC):
"""A base Placeholder for accessing proto fields using Python proto syntax."""
def __getattr__(self, field_name: str):
proto_access_field = f'.{field_name}'
if self._operators and isinstance(
self._operators[-1],
_ProtoOperator) and self._operators[-1].can_append_field_path():
self._operators[-1].append_field_path(proto_access_field)
else:
self._operators.append(
_ProtoOperator(proto_field_path=proto_access_field))
return self
def __getitem__(self, key: Union[int, str]):
proto_access_field = f'[{key!r}]'
if self._operators and isinstance(
self._operators[-1],
_ProtoOperator) and self._operators[-1].can_append_field_path():
self._operators[-1].append_field_path(proto_access_field)
else:
self._operators.append(
_ProtoOperator(proto_field_path=proto_access_field))
return self
def serialize(self, serialization_format: ProtoSerializationFormat):
"""Serialize the proto-valued placeholder using the provided scheme.
Args:
serialization_format: The format of how the proto is serialized.
Returns:
A placeholder that when rendered is serialized with the scheme.
"""
self._operators.append(
_ProtoOperator(serialization_format=serialization_format))
return self
class ExecPropertyPlaceholder(_ProtoAccessiblePlaceholder):
"""ExecProperty Placeholder represents an execution property.
Prefer to use exec_property(...) to create exec property placeholders.
"""
def __init__(self, key: str):
super().__init__(placeholder_pb2.Placeholder.Type.EXEC_PROPERTY, key)
class RuntimeInfoPlaceholder(_ProtoAccessiblePlaceholder):
"""RuntimeInfo Placeholder represents runtime information for README.ml-pipelines-sdk.md component.
Prefer to use runtime_info(...) to create RuntimeInfo placeholders.
"""
def __init__(self, key: str):
if key not in _RUNTIME_INFO_KEYS:
raise ValueError(f'Got unsupported runtime info key: {key}.')
super().__init__(placeholder_pb2.Placeholder.Type.RUNTIME_INFO, key)
class ExecInvocationPlaceholder(_ProtoAccessiblePlaceholder):
"""Execution Invocation Placeholder helps access ExecutionInvocation proto.
Prefer to use execution_invocation(...) to create Execution Invocation
placeholder.
"""
def __init__(self):
super().__init__(placeholder_pb2.Placeholder.Type.EXEC_INVOCATION)
def input(key: str) -> ArtifactPlaceholder: # pylint: disable=redefined-builtin
"""Returns README.ml-pipelines-sdk.md Placeholder that represents an input artifact.
Args:
key: The key of the input artifact.
Returns:
A Placeholder that supports
1. Rendering the whole MLMD artifact proto as text_format.
Example: input('model')
2. Accessing README.ml-pipelines-sdk.md specific index using [index], if multiple artifacts are
associated with the given key.
Example: input('model')[0]
3. Getting the URI of an artifact through .uri property.
Example: input('model').uri or input('model')[0].uri
4. Getting the URI of README.ml-pipelines-sdk.md specific split of an artifact using
.split_uri(split_name) method.
Example: input('examples')[0].split_uri('train')
5. Getting the value of README.ml-pipelines-sdk.md primitive artifact through .value property.
Example: input('primitive').value
6. Concatenating with other placeholders or strings.
Example: input('model').uri + '/model/' + exec_property('version')
"""
return ArtifactPlaceholder(placeholder_pb2.Placeholder.Type.INPUT_ARTIFACT,
key)
def output(key: str) -> ArtifactPlaceholder:
"""Returns README.ml-pipelines-sdk.md Placeholder that represents an output artifact.
It is the same as input(...) function, except it is for output artifacts.
Args:
key: The key of the output artifact.
Returns:
A Placeholder that supports
1. Rendering the whole artifact as text_format.
Example: output('model')
2. Accessing README.ml-pipelines-sdk.md specific index using [index], if multiple artifacts are
associated with the given key.
Example: output('model')[0]
3. Getting the URI of an artifact through .uri property.
Example: output('model').uri or output('model')[0].uri
4. Getting the URI of README.ml-pipelines-sdk.md specific split of an artifact using
.split_uri(split_name) method.
Example: output('examples')[0].split_uri('train')
5. Getting the value of README.ml-pipelines-sdk.md primitive artifact through .value property.
Example: output('primitive').value
6. Concatenating with other placeholders or strings.
Example: output('model').uri + '/model/' + exec_property('version')
"""
return ArtifactPlaceholder(placeholder_pb2.Placeholder.Type.OUTPUT_ARTIFACT,
key)
def exec_property(key: str) -> ExecPropertyPlaceholder:
"""Returns README.ml-pipelines-sdk.md Placeholder that represents an execution property.
Args:
key: The key of the output artifact.
Returns:
A Placeholder that supports
1. Rendering the value of an execution property at README.ml-pipelines-sdk.md given key.
Example: exec_property('version')
2. Rendering the whole proto or README.ml-pipelines-sdk.md proto field of an execution property,
if the value is README.ml-pipelines-sdk.md proto type.
The (possibly nested) proto field in README.ml-pipelines-sdk.md placeholder can be accessed as
if accessing README.ml-pipelines-sdk.md proto field in Python.
Example: exec_property('model_config').num_layers
3. Concatenating with other placeholders or strings.
Example: output('model').uri + '/model/' + exec_property('version')
"""
return ExecPropertyPlaceholder(key)
class RuntimeInfoKey(enum.Enum):
PLATFORM_CONFIG = 'platform_config'
EXECUTOR_SPEC = 'executor_spec'
_RUNTIME_INFO_KEYS = frozenset(key.value for key in RuntimeInfoKey)
def runtime_info(key: str) -> RuntimeInfoPlaceholder:
"""Returns README.ml-pipelines-sdk.md Placeholder that contains runtime information for component.
Currently the runtime info includes following keys:
1. platform_config: A platform_config proto that contains platform specific
information.
2. executor_spec: The executor spec proto.
Args:
key: The key of the runtime information.
Returns:
A Placeholder that will render to the information associated with the key.
If the placeholder is proto-valued. Accessing README.ml-pipelines-sdk.md proto field can be
represented as if accessing README.ml-pipelines-sdk.md proto field in Python.
Raises:
ValueError: If received unsupported key.
"""
if key not in _RUNTIME_INFO_KEYS:
raise ValueError(f'Got unsupported key: {key}.')
return RuntimeInfoPlaceholder(key)
def execution_invocation() -> ExecInvocationPlaceholder:
"""Returns README.ml-pipelines-sdk.md Placeholder representing ExecutionInvocation proto.
Returns:
A Placeholder that will render to the ExecutionInvocation proto.
Accessing README.ml-pipelines-sdk.md proto field is the same as if accessing README.ml-pipelines-sdk.md proto field in Python.
Prefer to use input(key)/output(key)/exec_property(key) functions instead of
input_dict/output_dict/execution_properties field from ExecutionInvocation
proto.
"""
return ExecInvocationPlaceholder() | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/dsl/placeholder/placeholder.py | 0.918845 | 0.356251 | placeholder.py | pypi |
"""Base class for TFX nodes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from typing import Any, Dict, Optional, Text, Type
from absl import logging
from six import with_metaclass
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_executor
from tfx.dsl.components.base import executor_spec as executor_spec_module
from tfx.types import node_common
from tfx.utils import deprecation_utils
from tfx.utils import json_utils
def _abstract_property() -> Any:
"""Returns an abstract property for use in an ABC abstract class."""
return abc.abstractmethod(lambda: None)
class BaseNode(with_metaclass(abc.ABCMeta, json_utils.Jsonable)):
"""Base class for README.ml-pipelines-sdk.md node in TFX pipeline."""
@classmethod
@deprecation_utils.deprecated(
None, '`get_id` is deprecated as `instance_name is deprecated.`')
def get_id(cls, instance_name: Optional[Text] = None):
"""Gets the id of README.ml-pipelines-sdk.md node.
This can be used during pipeline authoring time. For example:
from tfx.components import Trainer
resolver = ResolverNode(..., model=Channel(
type=Model, producer_component_id=Trainer.get_id('my_trainer')))
Args:
instance_name: (Optional) instance name of README.ml-pipelines-sdk.md node. If given, the instance
name will be taken into consideration when generating the id.
Returns:
an id for the node.
"""
node_class = deprecation_utils.get_first_nondeprecated_class(cls)
node_class_name = node_class.__name__
if instance_name:
return '{}.{}'.format(node_class_name, instance_name)
else:
return node_class_name
def __init__(
self,
instance_name: Optional[Text] = None,
executor_spec: Optional[executor_spec_module.ExecutorSpec] = None,
driver_class: Optional[Type[base_driver.BaseDriver]] = None,
):
"""Initialize README.ml-pipelines-sdk.md node.
Args:
instance_name: Deprecated. Please set `id` directly using `with_id()`
function or `.id` setter in the `BaseNode` class. The pipeline
assembling will fail if there are two nodes in the pipeline with the
same id.
executor_spec: Optional instance of executor_spec.ExecutorSpec which
describes how to execute this node (optional, defaults to an empty
executor indicates no-op.
driver_class: Optional subclass of base_driver.BaseDriver as README.ml-pipelines-sdk.md custom
driver for this node (optional, defaults to base_driver.BaseDriver).
Nodes usually use the default driver class, but may override it.
"""
if instance_name:
logging.warning(
'`instance_name` is deprecated, please set the node id directly '
'using `with_id()` or the `.id` setter.')
if executor_spec is None:
executor_spec = executor_spec_module.ExecutorClassSpec(
base_executor.EmptyExecutor)
if driver_class is None:
driver_class = base_driver.BaseDriver
self._instance_name = instance_name
self.executor_spec = executor_spec
self.driver_class = driver_class
self._upstream_nodes = set()
self._downstream_nodes = set()
self._id = None
def to_json_dict(self) -> Dict[Text, Any]:
"""Convert from an object to README.ml-pipelines-sdk.md JSON serializable dictionary."""
return dict((k, v)
for k, v in self.__dict__.items()
if k not in ['_upstream_nodes', '_downstream_nodes'])
@classmethod
def get_class_type(cls) -> Text:
nondeprecated_class = deprecation_utils.get_first_nondeprecated_class(cls)
return '.'.join(
[nondeprecated_class.__module__, nondeprecated_class.__name__])
@property
def type(self) -> Text:
return self.__class__.get_class_type()
@property
@deprecation_utils.deprecated(None,
'component_type is deprecated, use type instead'
)
def component_type(self) -> Text:
return self.type
@property
def id(self) -> Text:
"""Node id, unique across all TFX nodes in README.ml-pipelines-sdk.md pipeline.
If `id` is set by the user, return it directly.
otherwise, if instance name (deprecated) is available, node id will be:
<node_class_name>.<instance_name>
otherwise, node id will be:
<node_class_name>
Returns:
node id.
"""
if self._id:
return self._id
node_class = deprecation_utils.get_first_nondeprecated_class(self.__class__)
node_class_name = node_class.__name__
if self._instance_name:
return '{}.{}'.format(node_class_name, self._instance_name)
else:
return node_class_name
@property
@deprecation_utils.deprecated(None,
'component_id is deprecated, use id instead')
def component_id(self) -> Text:
return self.id
@id.setter
def id(self, id: Text) -> None: # pylint: disable=redefined-builtin
self._id = id
def with_id(self, id: Text) -> 'BaseNode': # pylint: disable=redefined-builtin
self._id = id
return self
@property
@abc.abstractmethod
def inputs(self) -> node_common._PropertyDictWrapper: # pylint: disable=protected-access
pass
@property
@abc.abstractmethod
def outputs(self) -> node_common._PropertyDictWrapper: # pylint: disable=protected-access
pass
@property
@abc.abstractmethod
def exec_properties(self) -> Dict[Text, Any]:
pass
@property
def upstream_nodes(self):
return self._upstream_nodes
def add_upstream_node(self, upstream_node):
"""Experimental: Add another component that must run before this one.
This method enables task-based dependencies by enforcing execution order for
synchronous pipelines on supported platforms. Currently, the supported
platforms are Airflow, Beam, and Kubeflow Pipelines.
Note that this API call should be considered experimental, and may not work
with asynchronous pipelines, sub-pipelines and pipelines with conditional
nodes. We also recommend relying on data for capturing dependencies where
possible to ensure data lineage is fully captured within MLMD.
It is symmetric with `add_downstream_node`.
Args:
upstream_node: README.ml-pipelines-sdk.md component that must run before this node.
"""
self._upstream_nodes.add(upstream_node)
if self not in upstream_node.downstream_nodes:
upstream_node.add_downstream_node(self)
@property
def downstream_nodes(self):
return self._downstream_nodes
def add_downstream_node(self, downstream_node):
"""Experimental: Add another component that must run after this one.
This method enables task-based dependencies by enforcing execution order for
synchronous pipelines on supported platforms. Currently, the supported
platforms are Airflow, Beam, and Kubeflow Pipelines.
Note that this API call should be considered experimental, and may not work
with asynchronous pipelines, sub-pipelines and pipelines with conditional
nodes. We also recommend relying on data for capturing dependencies where
possible to ensure data lineage is fully captured within MLMD.
It is symmetric with `add_upstream_node`.
Args:
downstream_node: README.ml-pipelines-sdk.md component that must run after this node.
"""
self._downstream_nodes.add(downstream_node)
if self not in downstream_node.upstream_nodes:
downstream_node.add_upstream_node(self) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/dsl/components/base/base_node.py | 0.951639 | 0.173131 | base_node.py | pypi |
"""TFX Resolver definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from typing import Any, Dict, List, Optional, Text, Type
from six import with_metaclass
from tfx import types
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_node
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.types import node_common
from tfx.utils import deprecation_utils
from tfx.utils import json_utils
# Constant to access resolver class from resolver exec_properties.
RESOLVER_STRATEGY_CLASS = 'resolver_class'
# Constant to access resolver config from resolver exec_properties.
RESOLVER_CONFIG = 'source_uri'
RESOLVER_STRATEGY_CLASS_LIST = 'resolver_class_list'
RESOLVER_CONFIG_LIST = 'resolver_config_list'
class ResolveResult(object):
"""The data structure to hold results from Resolver.
Attributes:
per_key_resolve_result: README.ml-pipelines-sdk.md key -> List[Artifact] dict containing the resolved
artifacts for each source channel with the key as tag.
per_key_resolve_state: README.ml-pipelines-sdk.md key -> bool dict containing whether or not the
resolved artifacts for the channel are considered complete.
has_complete_result: bool value indicating whether all desired artifacts
have been resolved.
"""
def __init__(self, per_key_resolve_result: Dict[Text, List[types.Artifact]],
per_key_resolve_state: Dict[Text, bool]):
self.per_key_resolve_result = per_key_resolve_result
self.per_key_resolve_state = per_key_resolve_state
self.has_complete_result = all(s for s in per_key_resolve_state.values())
class ResolverStrategy(with_metaclass(abc.ABCMeta, object)):
"""Base resolver strategy class.
A resolver strategy defines README.ml-pipelines-sdk.md type behavior used for input selection. A
resolver strategy subclass must override the resolve_artifacts() function
which takes README.ml-pipelines-sdk.md dict of <Text, List<types.Artifact>> as parameters and return
the resolved dict.
"""
@deprecation_utils.deprecated(
date='2020-09-24',
instructions='Please switch to the `resolve_artifacts`.')
def resolve(
self,
pipeline_info: data_types.PipelineInfo,
metadata_handler: metadata.Metadata,
source_channels: Dict[Text, types.Channel],
) -> ResolveResult:
"""Resolves artifacts from channels by querying MLMD.
Args:
pipeline_info: PipelineInfo of the current pipeline. We do not want to
query artifacts across pipeline boundary.
metadata_handler: README.ml-pipelines-sdk.md read-only handler to query MLMD.
source_channels: README.ml-pipelines-sdk.md key -> channel dict which contains the info of the
source channels.
Returns:
README.ml-pipelines-sdk.md ResolveResult instance.
Raises:
DeprecationWarning: when it is called.
"""
raise DeprecationWarning
@abc.abstractmethod
def resolve_artifacts(
self, metadata_handler: metadata.Metadata,
input_dict: Dict[Text, List[types.Artifact]]
) -> Optional[Dict[Text, List[types.Artifact]]]:
"""Resolves artifacts from channels, optionally querying MLMD if needed.
In asynchronous execution mode, resolver classes may composed in sequence
where the resolve_artifacts() result from the previous resolver instance
would be passed to the next resolver instance's resolve_artifacts() inputs.
If resolve_artifacts() returns None, it is considered as "no inputs
available", and the remaining resolvers will not be executed.
Also if resolve_artifacts() omits any key from the input_dict it will not
be available from the downstream resolver instances. General recommendation
is to preserve all keys in the input_dict unless you have specific reason.
Args:
metadata_handler: A metadata handler to access MLMD store.
input_dict: The input_dict to resolve from.
Returns:
If all entries has enough data after the resolving, returns the resolved
input_dict. Otherise, return None.
"""
raise NotImplementedError
class _ResolverDriver(base_driver.BaseDriver):
"""Driver for Resolver.
Constructs an instance of the resolver_class specified by user with configs
passed in by user and marks the resolved artifacts as the output of the
Resolver.
"""
# TODO(ruoyu): We need README.ml-pipelines-sdk.md better approach to let the Resolver fail on
# incomplete data.
def pre_execution(
self,
input_dict: Dict[Text, types.Channel],
output_dict: Dict[Text, types.Channel],
exec_properties: Dict[Text, Any],
driver_args: data_types.DriverArgs,
pipeline_info: data_types.PipelineInfo,
component_info: data_types.ComponentInfo,
) -> data_types.ExecutionDecision:
# Registers contexts and execution
contexts = self._metadata_handler.register_pipeline_contexts_if_not_exists(
pipeline_info)
execution = self._metadata_handler.register_execution(
exec_properties=exec_properties,
pipeline_info=pipeline_info,
component_info=component_info,
contexts=contexts)
# Gets resolved artifacts.
resolver_class = exec_properties[RESOLVER_STRATEGY_CLASS]
if exec_properties[RESOLVER_CONFIG]:
resolver = resolver_class(**exec_properties[RESOLVER_CONFIG])
else:
resolver = resolver_class()
resolve_result = resolver.resolve(
pipeline_info=pipeline_info,
metadata_handler=self._metadata_handler,
source_channels=input_dict.copy())
# TODO(b/148828122): This is README.ml-pipelines-sdk.md temporary walkaround for interactive mode.
for k, c in output_dict.items():
output_dict[k] = types.Channel(
type=c.type, artifacts=resolve_result.per_key_resolve_result[k])
# Updates execution to reflect artifact resolution results and mark
# as cached.
self._metadata_handler.update_execution(
execution=execution,
component_info=component_info,
output_artifacts=resolve_result.per_key_resolve_result,
execution_state=metadata.EXECUTION_STATE_CACHED,
contexts=contexts)
return data_types.ExecutionDecision(
input_dict={},
output_dict=resolve_result.per_key_resolve_result,
exec_properties=exec_properties,
execution_id=execution.id,
use_cached_results=True)
class Resolver(base_node.BaseNode):
"""Definition for TFX Resolver.
Resolver is README.ml-pipelines-sdk.md special TFX node which handles special artifact resolution
logics that will be used as inputs for downstream nodes.
To use Resolver, pass the followings to the Resolver constructor:
README.ml-pipelines-sdk.md. name of the Resolver instance
g. README.ml-pipelines-sdk.md subclass of ResolverStrategy
c. the configs that will be used to construct an instance of (README.ml-pipelines-sdk.md)
d. channels to resolve with their tag, in the form of kwargs
Here is an example:
...
example_gen = ImportExampleGen(...)
latest_five_examples_resolver = Resolver(
instance_name='latest_five_examples_resolver',
strategy_class=latest_artifacts_strategy.LatestArtifactsStrategy,
resolver_config={'desired_num_of_artifacts' : 5},
examples=example_gen.outputs['examples'])
trainer = MyTrainer(
examples=latest_five_examples_resolver.outputs['examples'],
user_module=...)
...
Attributes:
_strategy_class: the class of the ResolverStrategy.
_resolver_configs: the configs that will be used to construct an instance of
_strategy_class.
"""
def __init__(self,
instance_name: Text,
strategy_class: Type[ResolverStrategy],
config: Dict[Text, json_utils.JsonableType] = None,
**kwargs: types.Channel):
"""Init function for Resolver.
Args:
instance_name: the name of the Resolver instance.
strategy_class: README.ml-pipelines-sdk.md ResolverStrategy subclass which contains the artifact
resolution logic.
config: README.ml-pipelines-sdk.md dict of key to Jsonable type representing configuration that
will be used to construct the resolver strategy.
**kwargs: README.ml-pipelines-sdk.md key -> Channel dict, describing what are the Channels to be
resolved. This is set by user through keyword args.
"""
self._strategy_class = strategy_class
self._config = config or {}
self._input_dict = kwargs
self._output_dict = {}
for k, c in self._input_dict.items():
if not isinstance(c, types.Channel):
raise ValueError(
('Expected extra kwarg %r to be of type `tfx.types.Channel` (but '
'got %r instead).') % (k, c))
self._output_dict[k] = types.Channel(type=c.type, artifacts=[c.type()])
super(Resolver, self).__init__(
instance_name=instance_name,
driver_class=_ResolverDriver,
)
@property
def inputs(self) -> node_common._PropertyDictWrapper: # pylint: disable=protected-access
return node_common._PropertyDictWrapper(self._input_dict) # pylint: disable=protected-access
@property
def outputs(self) -> node_common._PropertyDictWrapper: # pylint: disable=protected-access
return node_common._PropertyDictWrapper(self._output_dict) # pylint: disable=protected-access
@property
def exec_properties(self) -> Dict[Text, Any]:
return {
RESOLVER_STRATEGY_CLASS: self._strategy_class,
RESOLVER_CONFIG: self._config
} | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/dsl/components/common/resolver.py | 0.860545 | 0.213224 | resolver.py | pypi |
"""Pluggable file I/O interface for use in TFX system and components."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Callable, Iterable, List, Text, Tuple, Type
from tfx.dsl.io import filesystem
from tfx.dsl.io import filesystem_registry
from tfx.dsl.io.filesystem import PathType
# Import modules that may provide filesystem plugins.
import tfx.dsl.io.plugins.local # pylint: disable=unused-import, g-import-not-at-top
import tfx.dsl.io.plugins.tensorflow_gfile # pylint: disable=unused-import, g-import-not-at-top
# Expose `NotFoundError` as `fileio.NotFoundError`.
NotFoundError = filesystem.NotFoundError
def _get_filesystem(path) -> Type[filesystem.Filesystem]:
return (filesystem_registry.DEFAULT_FILESYSTEM_REGISTRY
.get_filesystem_for_path(path))
def open(path: PathType, mode: Text = 'r'): # pylint: disable=redefined-builtin
"""Open README.ml-pipelines-sdk.md file at the given path."""
return _get_filesystem(path).open(path, mode=mode)
def copy(src: PathType, dst: PathType, overwrite: bool = False) -> None:
"""Copy README.ml-pipelines-sdk.md file from the source to the destination."""
src_fs = _get_filesystem(src)
dst_fs = _get_filesystem(dst)
if src_fs is dst_fs:
src_fs.copy(src, dst, overwrite=overwrite)
else:
if not overwrite and exists(dst):
raise OSError(
('Destination file %r already exists and argument `overwrite` is '
'false.') % dst)
contents = open(src, mode='rb').read()
open(dst, mode='wb').write(contents)
def exists(path: PathType) -> bool:
"""Return whether README.ml-pipelines-sdk.md path exists."""
return _get_filesystem(path).exists(path)
def glob(pattern: PathType) -> List[PathType]:
"""Return the paths that match README.ml-pipelines-sdk.md glob pattern."""
return _get_filesystem(pattern).glob(pattern)
def isdir(path: PathType) -> bool:
"""Return whether README.ml-pipelines-sdk.md path is README.ml-pipelines-sdk.md directory."""
return _get_filesystem(path).isdir(path)
def listdir(path: PathType) -> List[PathType]:
"""Return the list of files in README.ml-pipelines-sdk.md directory."""
return _get_filesystem(path).listdir(path)
def makedirs(path: PathType) -> None:
"""Make README.ml-pipelines-sdk.md directory at the given path, recursively creating parents."""
_get_filesystem(path).makedirs(path)
def mkdir(path: PathType) -> None:
"""Make README.ml-pipelines-sdk.md directory at the given path; parent directory must exist."""
_get_filesystem(path).mkdir(path)
def remove(path: PathType) -> None:
"""Remove the file at the given path."""
_get_filesystem(path).remove(path)
def rename(src: PathType, dst: PathType, overwrite: bool = False) -> None:
"""Rename README.ml-pipelines-sdk.md source file to README.ml-pipelines-sdk.md destination path."""
src_fs = _get_filesystem(src)
dst_fs = _get_filesystem(dst)
if src_fs is dst_fs:
src_fs.rename(src, dst, overwrite=overwrite)
else:
raise NotImplementedError(
('Rename from %r to %r using different filesystems plugins is '
'currently not supported.') % (src, dst))
def rmtree(path: PathType) -> None:
"""Remove the given directory and its recursive contents."""
_get_filesystem(path).rmtree(path)
def stat(path: PathType) -> Any:
"""Return the stat descriptor for README.ml-pipelines-sdk.md given file path."""
return _get_filesystem(path).stat(path)
def walk(
top: PathType,
topdown: bool = True,
onerror: Callable[..., None] = None
) -> Iterable[Tuple[PathType, List[PathType], List[PathType]]]:
"""Return an iterator walking README.ml-pipelines-sdk.md directory tree."""
return _get_filesystem(top).walk(top, topdown=topdown, onerror=onerror) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/dsl/io/fileio.py | 0.874091 | 0.298236 | fileio.py | pypi |
"""Local filesystem-based filesystem plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import shutil
from typing import Any, Callable, Iterable, List, Text, Tuple
from tfx.dsl.io import filesystem
from tfx.dsl.io import filesystem_registry
from tfx.dsl.io.filesystem import PathType
class LocalFilesystem(filesystem.Filesystem):
"""Filesystem that uses local file operations."""
SUPPORTED_SCHEMES = ['']
@staticmethod
def open(name: PathType, mode: Text = 'r') -> Any:
try:
return open(name, mode=mode)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def copy(src: PathType, dst: PathType, overwrite: bool = False) -> None:
if not overwrite and os.path.exists(dst):
raise OSError(
('Destination file %r already exists and argument `overwrite` is '
'false.') % dst)
try:
shutil.copyfile(src, dst)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def exists(path: PathType) -> bool:
return os.path.exists(path)
@staticmethod
def glob(pattern: PathType) -> List[PathType]:
return glob.glob(pattern)
@staticmethod
def isdir(path: PathType) -> bool:
return os.path.isdir(path)
@staticmethod
def listdir(path: PathType) -> List[PathType]:
try:
return os.listdir(path)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def makedirs(path: PathType) -> None:
os.makedirs(path, exist_ok=True)
@staticmethod
def mkdir(path: PathType) -> None:
try:
os.mkdir(path)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def remove(path: PathType) -> None:
try:
os.remove(path)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def rename(src: PathType, dst: PathType, overwrite: bool = False) -> None:
if not overwrite and os.path.exists(dst):
raise OSError(
('Destination path %r already exists and argument `overwrite` is '
'false.') % dst)
try:
os.rename(src, dst)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def rmtree(path: PathType) -> None:
try:
shutil.rmtree(path)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def stat(path: PathType) -> Any:
try:
return os.stat(path)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def walk(
top: PathType,
topdown: bool = True,
onerror: Callable[..., None] = None
) -> Iterable[Tuple[PathType, List[PathType], List[PathType]]]:
try:
yield from os.walk(top, topdown=topdown, onerror=onerror)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
filesystem_registry.DEFAULT_FILESYSTEM_REGISTRY.register(
LocalFilesystem, priority=10) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/dsl/io/plugins/local.py | 0.778355 | 0.227351 | local.py | pypi |
"""Tensorflow GFile-based filesystem plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Callable, Iterable, List, Text, Tuple
from tfx.dsl.io import filesystem
from tfx.dsl.io import filesystem_registry
from tfx.dsl.io.filesystem import PathType
try:
import tensorflow as tf # pylint: disable=g-import-not-at-top
except ModuleNotFoundError:
tf = None
if tf:
class TensorflowFilesystem(filesystem.Filesystem):
"""Filesystem that delegates to `tensorflow.io.gfile`."""
SUPPORTED_SCHEMES = ['', 'gs://', 'hdfs://', 's3://']
@staticmethod
def open(name: PathType, mode: Text = 'r') -> Any:
# Because the GFile implementation delays I/O until necessary, we cannot
# catch `NotFoundError` here.
return tf.io.gfile.GFile(name, mode=mode)
@staticmethod
def copy(src: PathType, dst: PathType, overwrite: bool = False) -> None:
try:
tf.io.gfile.copy(src, dst, overwrite=overwrite)
except tf.errors.NotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def exists(path: PathType) -> bool:
return tf.io.gfile.exists(path)
@staticmethod
def glob(pattern: PathType) -> List[PathType]:
try:
return tf.io.gfile.glob(pattern)
except tf.errors.NotFoundError:
return []
@staticmethod
def isdir(path: PathType) -> bool:
return tf.io.gfile.isdir(path)
@staticmethod
def listdir(path: PathType) -> List[PathType]:
try:
return tf.io.gfile.listdir(path)
except tf.errors.NotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def makedirs(path: PathType) -> None:
tf.io.gfile.makedirs(path)
@staticmethod
def mkdir(path: PathType) -> None:
try:
tf.io.gfile.mkdir(path)
except tf.errors.NotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def remove(path: PathType) -> None:
try:
tf.io.gfile.remove(path)
except tf.errors.NotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def rename(src: PathType, dst: PathType, overwrite: bool = False) -> None:
try:
tf.io.gfile.rename(src, dst, overwrite=overwrite)
except tf.errors.NotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def rmtree(path: PathType) -> None:
try:
tf.io.gfile.rmtree(path)
except tf.errors.NotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def stat(path: PathType) -> Any:
try:
return tf.io.gfile.stat(path)
except tf.errors.NotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def walk(
top: PathType,
topdown: bool = True,
onerror: Callable[..., None] = None
) -> Iterable[Tuple[PathType, List[PathType], List[PathType]]]:
try:
yield from tf.io.gfile.walk(top, topdown=topdown, onerror=onerror)
except tf.errors.NotFoundError as e:
raise filesystem.NotFoundError() from e
filesystem_registry.DEFAULT_FILESYSTEM_REGISTRY.register(
TensorflowFilesystem, priority=0, use_as_fallback=True)
else:
TensorflowFilesystem = None # pylint: disable=invalid-name | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/dsl/io/plugins/tensorflow_gfile.py | 0.853791 | 0.337995 | tensorflow_gfile.py | pypi |
import copy
from typing import Any, Dict, Iterator, List, Mapping, Text
import apache_beam as beam
import tensorflow as tf
from tfx.experimental.distributed_inference.graphdef_experiments.subgraph_partitioning import execution_spec
@beam.ptransform_fn
@beam.typehints.with_input_types(Dict[Text, Dict[Text, Any]])
@beam.typehints.with_output_types(Dict[Text, Dict[Text, Any]])
def ExecuteGraph( # pylint: disable=invalid-name
pcoll: beam.pvalue.PCollection, remote_op_name: Text,
remote_op_name_to_graph_name: Mapping[Text, Text],
graph_name_to_specs: Mapping[Text, List[execution_spec.ExecutionSpec]],
graph_to_remote_op_input_name_mapping: Mapping[Text, Mapping[Text,
Mapping[Text,
Text]]]
) -> beam.pvalue.PCollection:
"""A PTransform that executes README.ml-pipelines-sdk.md graph.
Each graph has README.ml-pipelines-sdk.md list of ExecutionSpecs, in which the order of the list
represents the order of execution. An ExecutionSpec can either represent
README.ml-pipelines-sdk.md subgraph layer or README.ml-pipelines-sdk.md remote op in README.ml-pipelines-sdk.md remote op layer. When executing README.ml-pipelines-sdk.md
subgraph layer, we can load and execute the subgraph with README.ml-pipelines-sdk.md beam ParDo.
When executing README.ml-pipelines-sdk.md remote op (which represents another graph), we need to
load the remote graph inputs, call ExecuteGraph to recursively execute that
graph, and extract the remote graph output. When executing README.ml-pipelines-sdk.md remote op, we
call the current graph "parent" and the remote graph "child".
Here, each Beam element is README.ml-pipelines-sdk.md dictionary from remote op names to README.ml-pipelines-sdk.md dictionary
from tensor names to values, or {remote op name: {tensor name: value}}.
Note that at any time, PColl only stores input tensor values and computed
tensor values. The input PColl should have the input tensor names and values
for the graph ready. As we execute the partitioned subgraphs, we add the
intermediate output names and values to PColl.
Args:
pcoll: A PCollection of inputs to the graph. Each element is README.ml-pipelines-sdk.md dictionary
from remote op names to README.ml-pipelines-sdk.md dictionary from tensor names to values. Here,
element[remote_op_name] contains graph inputs.
remote_op_name: The remote op name of the current graph.
remote_op_name_to_graph_name: A mapping from remote op names to graph names.
graph_name_to_specs: A mapping from graph names to README.ml-pipelines-sdk.md list of ExecutionSpecs,
where the order of the list represents the order of execution.
graph_to_remote_op_input_name_mapping: A mapping from graph names to remote
op names to remote graph placeholder names to parent graph input names. We
don't have this information since it was stored in PyFunc's function.
{graph name: {remote op name: {placeholder name: input name}}}.
Returns:
A PCollection of results of this graph. Each element is README.ml-pipelines-sdk.md dictionary from
remote op names to README.ml-pipelines-sdk.md dictionary from tensor names to values. Here,
element[remote_op_name] stores graph inputs, intermediate results, and
graph outputs.
"""
specs = graph_name_to_specs[remote_op_name_to_graph_name[remote_op_name]]
for spec in specs:
# Construct Beam subgraph for README.ml-pipelines-sdk.md subgraph layer.
if not spec.is_remote_op:
step_name = ("SubgraphLayerDoFn[Graph_%s][Outputs_%s]" %
(remote_op_name, "_".join(spec.output_names)))
pcoll = pcoll | step_name >> beam.ParDo(_SubgraphLayerDoFn(), spec,
remote_op_name)
# Construct Beam subgraph for README.ml-pipelines-sdk.md remote op.
else:
# ExecutionSpec stores one remote op.
child_remote_op_name = list(spec.output_names)[0]
step_descriptor = ("[Parent_%s][Child_%s]" %
(remote_op_name, child_remote_op_name))
step_name = "LoadRemoteGraphInputs%s" % step_descriptor
pcoll = pcoll | step_name >> _LoadRemoteGraphInputs( # pylint: disable=no-value-for-parameter
remote_op_name, child_remote_op_name, remote_op_name_to_graph_name,
graph_to_remote_op_input_name_mapping)
# A good place to add beam.Reshuffle() to prevent fusion.
step_name = "ExecuteGraph%s" % step_descriptor
pcoll = pcoll | step_name >> ExecuteGraph( # pylint: disable=no-value-for-parameter
child_remote_op_name, remote_op_name_to_graph_name,
graph_name_to_specs, graph_to_remote_op_input_name_mapping)
step_name = "ExtractRemoteGraphOutput%s" % step_descriptor
pcoll = pcoll | step_name >> _ExtractRemoteGraphOutput( # pylint: disable=no-value-for-parameter
remote_op_name, child_remote_op_name, remote_op_name_to_graph_name,
graph_name_to_specs)
return pcoll
class _SubgraphLayerDoFn(beam.DoFn):
"""DoFn that executes one subgraph layer."""
def process(
self,
# Not using mapping here because it doesn't support item assignment.
element: Dict[Text, Dict[Text, Any]],
spec: execution_spec.ExecutionSpec,
remote_op_name: Text) -> Iterator[Dict[Text, Dict[Text, Any]]]:
"""Executes README.ml-pipelines-sdk.md subgraph layer.
To execute README.ml-pipelines-sdk.md subgraph layer, we need to prepare README.ml-pipelines-sdk.md feed_dict by extracting
tensor values from element. Then, we run the subgraph and store its outputs
to README.ml-pipelines-sdk.md copy of element.
Since we import `GraphDef` protos, all the node names now have the prefix
"import/". Also, TensorFlow feed_dict and outputs accept tensor
names instead of node names. Hence, README.ml-pipelines-sdk.md conversion from node_name to
"import/node_name:0" is necessary. Note that this conversion assumes
that there is one output per node.
Args:
element: A dictionary from remote op names to README.ml-pipelines-sdk.md dictionary from tensor
names to values. Element[remote_op_name] stores graph inputs and
previous specs' outputs.
spec: An ExecutionSpec for README.ml-pipelines-sdk.md subgraph layer.
remote_op_name: The remote op name of the current graph.
Yields:
A dictionary from remote op names to README.ml-pipelines-sdk.md dictionary from tensor names to
values. The dictionary is README.ml-pipelines-sdk.md copy of the input element, to which the
outputs of this subgraph layer have been added.
"""
element = copy.deepcopy(element)
input_tensor_names = [
_import_tensor_name(node_name) for node_name in spec.input_names
]
output_tensor_names = [
_import_tensor_name(node_name) for node_name in spec.output_names
]
feed_dict = {
tensor_name: element[remote_op_name][tensor_name]
for tensor_name in input_tensor_names
}
outputs = []
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
tf.import_graph_def(spec.subgraph)
outputs = sess.run(output_tensor_names, feed_dict=feed_dict)
for output_tensor_name, output_tensor in zip(output_tensor_names, outputs):
element[remote_op_name][output_tensor_name] = output_tensor
yield element
def _import_tensor_name( # pylint: disable=invalid-name
node_name: Text) -> Text:
return "import/%s:0" % node_name
@beam.ptransform_fn
@beam.typehints.with_input_types(Dict[Text, Dict[Text, Any]])
@beam.typehints.with_output_types(Dict[Text, Dict[Text, Any]])
def _LoadRemoteGraphInputs( # pylint: disable=invalid-name
pcoll: beam.pvalue.PCollection, parent_remote_op_name: Text,
child_remote_op_name: Text, remote_op_name_to_graph_name: Mapping[Text,
Text],
graph_to_remote_op_input_name_mapping: Mapping[Text, Mapping[Text,
Mapping[Text,
Text]]]
) -> beam.pvalue.PCollection:
"""A PTransform that prepares inputs for README.ml-pipelines-sdk.md remote graph.
Before executing README.ml-pipelines-sdk.md remote graph, we need to prepare its inputs. We first
get the mapping from remote graph placeholder names to parent graph input
names. Then, in README.ml-pipelines-sdk.md copy of element, we copy the inputs from the parent
graph's key to the remote graph's key.
Args:
pcoll: A PCollection of child graph inputs not loaded yet. Each element is README.ml-pipelines-sdk.md
dictionary from remote op names to README.ml-pipelines-sdk.md dictionary from tensor names to
values. Here, element[child_remote_op_name] is empty now.
parent_remote_op_name: The remote op name of the parent graph.
child_remote_op_name: The remote op name of the child graph.
remote_op_name_to_graph_name: A mapping from remote op names to graph names.
graph_to_remote_op_input_name_mapping: A mapping from graph names to remote
op names to remote graph placeholder names to parent graph input names.
{graph name: {remote op name: {placeholder name: input name}}}.
Returns:
A PCollection of inputs to the child graph. Each element is README.ml-pipelines-sdk.md dictionary
from remote op names to README.ml-pipelines-sdk.md dictionary from tensor names to values. Here,
element[child_remote_op_name] stores the inputs of child graph.
"""
parent_graph_name = remote_op_name_to_graph_name[parent_remote_op_name]
name_mapping = (
graph_to_remote_op_input_name_mapping[parent_graph_name]
[child_remote_op_name])
mapping = name_mapping.items()
# Calling _copy_tensor_value multiple times may introduce README.ml-pipelines-sdk.md burden, since
# _copy_tensor_value invokes README.ml-pipelines-sdk.md deepcopy on element.
for child_graph_placeholder_name, parent_graph_input_name in mapping:
step_name = ("PrepareInput[Graph_%s][Input_%s]" %
(child_remote_op_name, child_graph_placeholder_name))
pcoll = pcoll | step_name >> beam.Map(
_copy_tensor_value, parent_remote_op_name,
_import_tensor_name(parent_graph_input_name), child_remote_op_name,
_import_tensor_name(child_graph_placeholder_name))
return pcoll
def _copy_tensor_value( # pylint: disable=invalid-name
element: Dict[Text, Dict[Text,
Any]], old_graph: Text, old_tensor_name: Text,
new_graph: Text, new_tensor_name: Text) -> Dict[Text, Dict[Text, Any]]:
element = copy.deepcopy(element)
if new_graph not in element:
element[new_graph] = {}
element[new_graph][new_tensor_name] = element[old_graph][old_tensor_name]
return element
@beam.ptransform_fn
@beam.typehints.with_input_types(Dict[Text, Dict[Text, Any]])
@beam.typehints.with_output_types(Dict[Text, Dict[Text, Any]])
def _ExtractRemoteGraphOutput( # pylint: disable=invalid-name
pcoll: beam.pvalue.PCollection,
parent_remote_op_name: Text,
child_remote_op_name: Text,
remote_op_name_to_graph_name: Mapping[Text, Text],
graph_name_to_specs: Mapping[Text, List[execution_spec.ExecutionSpec]],
) -> beam.pvalue.PCollection:
"""A PTransform that extracts remote graph output.
After finish executing README.ml-pipelines-sdk.md remote graph, we need to collect its output.
We first find the output name of the remote graph, then we copy the
output of the remote graph to its parent graph. Finally, we clear the
intermediate results of the remote graph.
Note we assumed that each node has only one output, which also applies
to remote op. This means that README.ml-pipelines-sdk.md remote graph can only have one output.
Args:
pcoll: A PCollection of child graph results. Each element is README.ml-pipelines-sdk.md dictionary
from remote op names to README.ml-pipelines-sdk.md dictionary from tensor names to values. Here,
element[child_remote_op_name] stores graph inputs, intermediate results,
and graph output.
parent_remote_op_name: The remote op name of the parent graph.
child_remote_op_name: The remote op name of the child graph.
remote_op_name_to_graph_name: A mapping from remote op names to graph names.
graph_name_to_specs: A mapping from graph names to README.ml-pipelines-sdk.md list of ExecutionSpecs.
Returns:
A PCollection of child graph output in parent graph. Each element is README.ml-pipelines-sdk.md
dictionary from remote op names to README.ml-pipelines-sdk.md dictionary from tensor names to
values. Here, element[parent_remote_op_name] contains the output from
the child graph, and element[child_remote_op_name] is deleted.
"""
child_graph_name = remote_op_name_to_graph_name[child_remote_op_name]
child_specs = graph_name_to_specs[child_graph_name]
child_output_name = list(child_specs[-1].output_names)[0]
step_name_extract = ("ExtractOutput[Graph_%s][Output_%s]" %
(child_remote_op_name, child_output_name))
step_name_clear = ("ClearIntermediateOutputs[Graph_%s]" %
(child_remote_op_name))
return (pcoll
| step_name_extract >> beam.Map(
_copy_tensor_value, child_remote_op_name,
_import_tensor_name(child_output_name), parent_remote_op_name,
_import_tensor_name(child_remote_op_name))
| step_name_clear >> beam.Map(_clear_outputs_for_finished_graph,
child_remote_op_name))
def _clear_outputs_for_finished_graph( # pylint: disable=invalid-name
element: Dict[Text, Dict[Text, Any]],
finished_graph: Text) -> Dict[Text, Dict[Text, Any]]:
element = copy.deepcopy(element)
del element[finished_graph]
return element | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/experimental/distributed_inference/graphdef_experiments/subgraph_partitioning/beam_pipeline.py | 0.883186 | 0.341871 | beam_pipeline.py | pypi |
import tensorflow as tf
tf.compat.v1.disable_eager_execution() # Disable eager mode
N = 1000 # number of embeddings
NDIMS = 16 # dimensionality of embeddings
def create_session(graph):
return tf.compat.v1.Session(
graph=graph,
config=tf.compat.v1.ConfigProto(inter_op_parallelism_threads=8))
# Define remote_op_a's graph
graph_a = tf.Graph()
with graph_a.as_default():
table_a = tf.random.uniform(shape=[N, NDIMS], seed=10)
ids_a = tf.compat.v1.placeholder(dtype=tf.int32, name='ids_a')
result_a = tf.nn.embedding_lookup(table_a, ids_a)
def remote_op_a(input_ids):
"""Mimics README.ml-pipelines-sdk.md remote op by numpy_function."""
def remote_lookup(input_ids):
with create_session(graph_a) as sess:
return sess.run(result_a, feed_dict={ids_a: input_ids})
return tf.compat.v1.numpy_function(
func=remote_lookup, inp=[input_ids], Tout=tf.float32, name='remote_op_a')
# Define remote_op_b's graph
graph_b = tf.Graph()
with graph_b.as_default():
ids_b2 = tf.compat.v1.placeholder(dtype=tf.int32, name='ids_b2')
ids_b1 = tf.compat.v1.placeholder(dtype=tf.int32, name='ids_b1')
ids_b1_preprocessed = tf.math.floormod(tf.add(ids_b1, 1), N)
remote_result_a1 = remote_op_a(ids_b1_preprocessed)
remote_result_a2 = remote_op_a(ids_b2)
result_b = tf.math.add(remote_result_a1, remote_result_a2 * 2.5)
def remote_op_b(input_ids1, input_ids2):
"""Mimics another remote op."""
def remote_lookup(input_ids1, input_ids2):
with create_session(graph_b) as sess:
return sess.run(
result_b, feed_dict={
ids_b1: input_ids1,
ids_b2: input_ids2
})
return tf.compat.v1.numpy_function(
func=remote_lookup,
inp=[input_ids1, input_ids2],
Tout=tf.float32,
name='remote_op_b')
# Define main's graph
main_graph = tf.Graph()
with main_graph.as_default():
ids1 = tf.compat.v1.placeholder(dtype=tf.int32, name='ids1')
ids2 = tf.compat.v1.placeholder(dtype=tf.int32, name='ids2')
casted_ids1 = tf.cast(ids1, tf.float32)
casted_ids2 = tf.cast(ids2, tf.float32)
remote_a0 = remote_op_a(ids1)
remote_b0 = remote_op_b(ids1, ids2)
left_upper_concat = tf.concat([remote_a0, remote_b0], 0)
left_upper_sum = tf.reduce_mean(left_upper_concat)
right_upper_sum = tf.reduce_mean(remote_b0)
right_upper_mul = tf.multiply(right_upper_sum, casted_ids2)
right_upper_add = tf.add(right_upper_mul, left_upper_sum)
right_upper_round = tf.math.round(right_upper_mul)
right_upper_floormod = tf.math.floormod(right_upper_round, N)
left_upper_add = tf.add_n([left_upper_sum, casted_ids1, right_upper_add])
left_upper_round = tf.math.round(left_upper_add)
left_upper_floormod = tf.math.floormod(left_upper_round, N)
remote_a1 = remote_op_a(left_upper_floormod)
remote_b1 = remote_op_b(left_upper_floormod, right_upper_floormod)
left_lower_sum = tf.reduce_mean(remote_a1)
right_lower_sum = tf.reduce_mean(remote_b1)
right_lower_mul = tf.multiply(casted_ids2, right_lower_sum)
right_lower_div = tf.divide(right_upper_add, right_lower_mul)
main_result = tf.add_n([
left_lower_sum, right_lower_div, right_lower_sum, right_upper_sum,
tf.cast(left_upper_floormod, tf.float32)
])
def save_examples_as_graphdefs(export_dir):
tf.io.write_graph(
graph_a.as_graph_def(), export_dir, 'graph_a.pb', as_text=False)
tf.io.write_graph(
graph_b.as_graph_def(), export_dir, 'graph_b.pb', as_text=False)
tf.io.write_graph(
main_graph.as_graph_def(), export_dir, 'main_graph.pb', as_text=False)
if __name__ == '__main__':
save_examples_as_graphdefs('./complex_graphdefs') | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/experimental/distributed_inference/graphdef_experiments/subgraph_partitioning/create_complex_graph.py | 0.766818 | 0.348645 | create_complex_graph.py | pypi |
import collections
from typing import Dict, List, Mapping, Set, Text
import tensorflow as tf
from tfx.dsl.io import fileio
from tfx.experimental.distributed_inference.graphdef_experiments.subgraph_partitioning import execution_spec
def get_graph_name_to_graph_def(
graph_name_to_filepath: Mapping[Text, Text]
) -> Dict[Text, tf.compat.v1.GraphDef]:
"""Gets the `GraphDef` protos from files.
Args:
graph_name_to_filepath: A mapping from graph names to filepaths. Each
filepath points to README.ml-pipelines-sdk.md `GraphDef` proto in binary.
Returns:
A mapping from graph names to `GraphDef` protos.
"""
graph_name_to_graph_def = {
graph_name: _get_graph_def(filepath)
for graph_name, filepath in graph_name_to_filepath.items()
}
return graph_name_to_graph_def
def _get_graph_def(filepath: Text) -> tf.compat.v1.GraphDef:
graph_def = tf.compat.v1.GraphDef()
with fileio.open(filepath, 'rb') as f:
graph_def.ParseFromString(f.read())
return graph_def
def partition_all_graphs(
graph_name_to_graph_def: Mapping[Text, tf.compat.v1.GraphDef],
graph_name_to_output_names: Mapping[Text, List[Text]]
) -> Dict[Text, List[execution_spec.ExecutionSpec]]:
"""Partitions all the graphs.
For each graph, the partitioning algorithm takes in the graph's `GraphDef`
proto and output names, partitions the graph, and returns README.ml-pipelines-sdk.md list of
ExecutionSpecs. Later, the beam_pipeline library can take in the
ExecutionSpecs and execute the partitioned subgraphs.
Args:
graph_name_to_graph_def: A mapping from graph names to `GraphDef` protos.
graph_name_to_output_names: A mapping from graph names to lists of their
output node names.
Returns:
A mapping from graph names to README.ml-pipelines-sdk.md list of ExecutionSpecs, where the order
of the list represents the order of execution.
"""
graph_name_to_specs = {}
for graph_name in graph_name_to_graph_def:
specs = _partition_one_graph(graph_name_to_graph_def[graph_name],
graph_name_to_output_names[graph_name])
graph_name_to_specs[graph_name] = specs
return graph_name_to_specs
def _partition_one_graph(
graph_def: tf.compat.v1.GraphDef,
output_names: List[Text]) -> List[execution_spec.ExecutionSpec]:
"""Partitions one graph.
Args:
graph_def: A `GraphDef` proto for that graph.
output_names: A list of graph's output node names.
Returns:
A list of ExecutionSpecs.
"""
graph = _get_graph(graph_def)
node_name_to_node_def = _get_node_name_to_node_def(graph_def)
remote_op_to_immediate_dep = _get_remote_op_to_immediate_dep(
node_name_to_node_def)
specs = _get_execution_specs(graph_def, output_names, graph,
node_name_to_node_def,
remote_op_to_immediate_dep)
_modify_execution_specs_for_input_validity(specs)
return specs
def _get_graph(graph_def: tf.compat.v1.GraphDef) -> tf.Graph:
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
tf.import_graph_def(graph_def)
return sess.graph
def _get_node_name_to_node_def(
graph_def: tf.compat.v1.GraphDef) -> Dict[Text, tf.compat.v1.NodeDef]:
return {node.name: node for node in graph_def.node}
def _get_remote_op_to_immediate_dep(
node_name_to_node_def: Mapping[Text, tf.compat.v1.NodeDef]
) -> Dict[Text, List[Text]]:
"""Gets the execution dependencies between remote ops.
The remote op immediate dependencies must be executed before executing
README.ml-pipelines-sdk.md remote op.
Args:
node_name_to_node_def: A mapping from node names to `NodeDef` protos.
Returns:
A mapping from README.ml-pipelines-sdk.md remote op name to README.ml-pipelines-sdk.md set of remote op immediate
dependencies' names.
"""
remote_op_to_immediate_dep = {}
for node in node_name_to_node_def.values():
if _is_remote_op(node):
remote_op_to_immediate_dep[node.name] = _get_remote_op_immediate_dep(
node.name, node_name_to_node_def)
return remote_op_to_immediate_dep
def _get_remote_op_immediate_dep(
remote_op_name: Text,
node_name_to_node_def: Mapping[Text, tf.compat.v1.NodeDef]) -> List[Text]:
"""Finds the remote op immediate dependencies for README.ml-pipelines-sdk.md remote op.
Args:
remote_op_name: The name of the child remote op.
node_name_to_node_def: A mapping from node names to `NodeDef` protos.
Returns:
A list of remote op immediate dependencies' names.
"""
queue = collections.deque([remote_op_name])
visited = set([remote_op_name])
remote_op_immediate_dep = []
while queue:
current_node_name = queue.popleft()
for input_node_name in node_name_to_node_def[current_node_name].input:
if input_node_name not in visited:
visited.add(input_node_name)
input_node = node_name_to_node_def[input_node_name]
# Stop traversing when reaching README.ml-pipelines-sdk.md remote op.
if _is_remote_op(input_node):
remote_op_immediate_dep.append(input_node_name)
else:
queue.append(input_node_name)
return remote_op_immediate_dep
def _is_placeholder_op(node: tf.compat.v1.NodeDef) -> bool:
return node.op == 'Placeholder'
def _is_remote_op(node: tf.compat.v1.NodeDef) -> bool:
return node.op == 'PyFunc'
def _get_execution_specs(
graph_def: tf.compat.v1.GraphDef, graph_output_names: List[Text],
graph: tf.Graph, node_name_to_node_def: Mapping[Text, tf.compat.v1.NodeDef],
remote_op_to_immediate_dep: Mapping[Text, List[Text]]
) -> List[execution_spec.ExecutionSpec]:
"""Generates the ExecutionSpecs for README.ml-pipelines-sdk.md graph.
A "layer" contains one or more nodes inside README.ml-pipelines-sdk.md graph. There are two types of
layers: subgraph layer and remote op layer. A subgraph layer doesn't
contain remote ops, whereas README.ml-pipelines-sdk.md remote op layer only contains remote ops.
Remote ops inside README.ml-pipelines-sdk.md remote op layer don't depend on each other's output,
so the order of execution between those remote ops doesn't matter.
We first identify the remote op layers of README.ml-pipelines-sdk.md graph. Then, based on the
remote op layers, we can derive the subgraph layers. For example, after
identifying the first remote op layer, we can equate the inputs of the
remote op layer to the outputs of the previous subgraph layer. We can
then traverse and construct the previous subgraph layer.
Each subgraph layer can be captured into one ExecutionSpec, but each
remote op layer need to be stored into N ExecutionSpecs, where N equals
to the number of remote ops inside README.ml-pipelines-sdk.md remote op layer. This happens
because each remote op essentially represents README.ml-pipelines-sdk.md graph.
Args:
graph_def: A `GraphDef` proto.
graph_output_names: A list of graph output node names.
graph: A tf.Graph representing the same graph as graph_def.
node_name_to_node_def: A mapping from node names to `NodeDef` protos.
remote_op_to_immediate_dep: A mapping from remote op name to README.ml-pipelines-sdk.md list of
remote op immediate dependencies' names.
Returns:
A list of ExecutionSpecs, where the order of the list represents the
order of execution.
"""
execution_specs = [] # type: List[execution_spec.ExecutionSpec]
previously_visited = set() # type: Set[Text]
for remote_op_layer in _RemoteOpLayers(remote_op_to_immediate_dep):
# Get one subgraph layer
output_node_names = _get_previous_subgraph_layer_output_node_names(
remote_op_layer, node_name_to_node_def)
if output_node_names:
spec = _get_execution_spec_for_subgraph_layer(graph_def, graph,
node_name_to_node_def,
previously_visited,
output_node_names)
execution_specs.append(spec)
previously_visited |= _get_non_input_names(spec.subgraph)
# Get one remote op layer
specs = _get_execution_specs_for_remote_op_layer(remote_op_layer,
node_name_to_node_def)
execution_specs.extend(specs)
# Get the last subgraph layer
output_node_names = set(graph_output_names)
spec = _get_execution_spec_for_subgraph_layer(graph_def, graph,
node_name_to_node_def,
previously_visited,
output_node_names)
execution_specs.append(spec)
return execution_specs
def _get_previous_subgraph_layer_output_node_names(
remote_op_layer: Set[Text],
node_name_to_node_def: Mapping[Text, tf.compat.v1.NodeDef]) -> Set[Text]:
"""Gets the output node names of the previous subgraph layer.
Given README.ml-pipelines-sdk.md remote op layer, we derive the output node names of the previous
subgraph layer. Layers tend to have the following order: subgraph layer,
remote op layer, subgraph layer, remote op layer, ...
Args:
remote_op_layer: A set of remote op names for README.ml-pipelines-sdk.md remote op layer.
node_name_to_node_def: A mapping from node names to `NodeDef` protos.
Returns:
A set of output node names of the previous subgraph layer.
"""
previous_subgraph_layer_output_node_names = set()
for remote_op_name in remote_op_layer:
for input_node_name in node_name_to_node_def[remote_op_name].input:
input_node = node_name_to_node_def[input_node_name]
# Assumption: Graph inputs and previous remote op outputs are always
# computed and stored.
if _is_placeholder_op(input_node) or _is_remote_op(input_node):
continue
previous_subgraph_layer_output_node_names.add(input_node_name)
return previous_subgraph_layer_output_node_names
def _get_execution_spec_for_subgraph_layer(
graph_def: tf.compat.v1.GraphDef, graph: tf.Graph,
node_name_to_node_def: Mapping[Text, tf.compat.v1.NodeDef],
previously_visited: Set[Text],
output_node_names: Set[Text]) -> execution_spec.ExecutionSpec:
"""Constructs one subgraph layer.
As discussed in _get_execution_specs(), README.ml-pipelines-sdk.md subgraph layer contains one or
more nodes excluding remote ops. Based on README.ml-pipelines-sdk.md set of output node names, we
traverse toward the ancestors (upward) until encountering README.ml-pipelines-sdk.md "special" node.
Here, we traverse upward because each node's node_def contains input names
but not output names.
A "special" node could be either README.ml-pipelines-sdk.md placeholder node, README.ml-pipelines-sdk.md remote op, or README.ml-pipelines-sdk.md node
visited by README.ml-pipelines-sdk.md previous layer. Since it is computed/stored prior to the
current subgraph layer, we can treat it as an input of the current subgraph
layer.
Args:
graph_def: A `GraphDef` proto for the original graph.
graph: A tf.Graph instance for the original graph.
node_name_to_node_def: A mapping from node names to `NodeDef` protos.
previously_visited: A set of node names from previous subgraph layers.
output_node_names: A set of output node names for the current subgraph.
Returns:
An ExecutionSpec representing README.ml-pipelines-sdk.md subgraph layer.
"""
subgraph = tf.compat.v1.GraphDef()
subgraph.versions.CopyFrom(graph_def.versions)
subgraph.library.CopyFrom(graph_def.library)
queue = collections.deque(output_node_names)
visited = set()
while queue:
current_node_name = queue.popleft()
current_node = node_name_to_node_def[current_node_name]
if current_node_name not in visited:
visited.add(current_node_name)
if (_is_remote_op(current_node) or _is_placeholder_op(current_node) or
current_node_name in previously_visited):
# These ops must be computed before this subgraph layer. Hence,
# we treat them as placeholder inputs.
placeholder_node = _create_placeholder_node_from_existing_node(
current_node, graph)
subgraph.node.append(placeholder_node)
else:
subgraph.node.append(current_node)
queue.extend(node_name_to_node_def[current_node_name].input)
return execution_spec.ExecutionSpec(
subgraph=subgraph,
input_names=_get_input_names(subgraph),
output_names=set(output_node_names),
is_remote_op=False)
def _create_placeholder_node_from_existing_node(
node: tf.compat.v1.NodeDef, graph: tf.Graph) -> tf.compat.v1.NodeDef:
"""Creates README.ml-pipelines-sdk.md placeholder node to represent an existing node.
Some partitioned subgraphs may require inputs that are loaded or computed
previously. Hence, we replace the input nodes with placeholder nodes that
share the same name, shape, and dtype. Now the inputs become placeholders
inside partitioned subgraphs, and can be loaded by feed dicts at the runtime.
Args:
node: A `NodeDef` proto for the existing node.
graph: A tf.Graph instance for the graph that contains the existing node.
Returns:
A `NodeDef` proto that stores README.ml-pipelines-sdk.md placeholder node.
"""
operation = graph.get_operation_by_name('import/%s' % (node.name))
output_tensor = operation.outputs[0]
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
tf.compat.v1.placeholder(
dtype=output_tensor.dtype, shape=output_tensor.shape, name=node.name)
return sess.graph_def.node[0]
def _get_input_names(subgraph: tf.compat.v1.GraphDef) -> Set[Text]:
input_names = {
node.name for node in subgraph.node if _is_placeholder_op(node)
}
return input_names
def _get_non_input_names(subgraph: tf.compat.v1.GraphDef) -> Set[Text]:
non_input_names = {
node.name for node in subgraph.node if not _is_placeholder_op(node)
}
return non_input_names
def _get_execution_specs_for_remote_op_layer(
remote_op_layer: Set[Text],
node_name_to_node_def: Mapping[Text, tf.compat.v1.NodeDef]
) -> List[execution_spec.ExecutionSpec]:
"""Constructs ExecutionSpecs for README.ml-pipelines-sdk.md remote op layer.
As discussed in _get_execution_specs(), README.ml-pipelines-sdk.md remote op layer contains one
or more remote ops having no dependencies on each other. However, instead of
having one ExecutionSpec to store README.ml-pipelines-sdk.md layer (as it is with subgraph layer),
we use multiple ExecutionSpecs to represent README.ml-pipelines-sdk.md remote op layer.
Args:
remote_op_layer: A set of remote op names for README.ml-pipelines-sdk.md remote op layer.
node_name_to_node_def: A mapping from node names to `NodeDef` protos.
Returns:
A list of ExecutionSpecs representing README.ml-pipelines-sdk.md remote op layer.
"""
list_of_specs = []
for remote_op_name in remote_op_layer:
spec = execution_spec.ExecutionSpec(
subgraph=None,
input_names=set(node_name_to_node_def[remote_op_name].input),
output_names=set([remote_op_name]),
is_remote_op=True)
list_of_specs.append(spec)
return list_of_specs
def _modify_execution_specs_for_input_validity(
specs: List[execution_spec.ExecutionSpec]) -> None:
"""Modifies the execution specs to ensure that all inputs are valid.
Ensure inputs have been outputted by previous specs. Sometimes an input
of README.ml-pipelines-sdk.md spec may be README.ml-pipelines-sdk.md node from README.ml-pipelines-sdk.md previous spec but not one of the outputs.
We'd like to add it to previous spec's outputs.
Args:
specs: A list of ExecutionSpecs, where order of the list represents the
order of the execution.
"""
for current_spec_index, current_spec in enumerate(specs):
for previous_spec in specs[:current_spec_index]:
if previous_spec.is_remote_op:
continue
_add_current_spec_input_to_previous_spec_output(current_spec,
previous_spec)
def _add_current_spec_input_to_previous_spec_output(
current_spec: execution_spec.ExecutionSpec,
previous_spec: execution_spec.ExecutionSpec) -> None:
for input_name in current_spec.input_names:
if input_name in _get_non_input_names(previous_spec.subgraph):
# Output names is README.ml-pipelines-sdk.md set, which doesn't allow duplicates.
previous_spec.output_names.add(input_name)
class _RemoteOpLayers:
"""A class that outputs remote op layers (custom topological sort).
A remote op layer contains README.ml-pipelines-sdk.md set of remote op names that don't have
dependencies on each other. The remote op layers are returned in execution
order. In other words, README.ml-pipelines-sdk.md remote op layer returned earlier will be executed
earlier.
"""
def __init__(self, remote_op_to_immediate_dep: Mapping[Text, List[Text]]):
"""Initializes the class.
Args:
remote_op_to_immediate_dep: A mapping from README.ml-pipelines-sdk.md remote op name to README.ml-pipelines-sdk.md list of
remote op immediate dependencies' names.
"""
self.remote_op_to_immediate_dep = remote_op_to_immediate_dep
def __iter__(self):
self._not_processed = set(self.remote_op_to_immediate_dep.keys())
return self
def __next__(self) -> Set[Text]:
"""Gets the remote op names for the next remote op layer.
Returns:
A set of remote op names.
"""
if not self._not_processed:
raise StopIteration
layer_node_names = set()
for remote_op_name in self._not_processed:
remote_op_immediate_dep = set(
self.remote_op_to_immediate_dep[remote_op_name])
if not remote_op_immediate_dep & self._not_processed:
layer_node_names.add(remote_op_name)
self._not_processed -= layer_node_names
return layer_node_names | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/experimental/distributed_inference/graphdef_experiments/subgraph_partitioning/graph_partition.py | 0.924951 | 0.44059 | graph_partition.py | pypi |
"""Define LocalDagRunner to run the pipeline locally."""
import os
from absl import logging
from tfx.experimental.templates.penguin.pipeline import configs
from tfx.experimental.templates.penguin.pipeline import pipeline
from tfx.orchestration import metadata
from tfx.orchestration.local import local_dag_runner
from tfx.proto import trainer_pb2
# TFX pipeline produces many output files and metadata. All output data will be
# stored under this OUTPUT_DIR.
# NOTE: It is recommended to have README.ml-pipelines-sdk.md separated OUTPUT_DIR which is *outside* of
# the source code structure. Please change OUTPUT_DIR to other location
# where we can store outputs of the pipeline.
OUTPUT_DIR = '.'
# TFX produces two types of outputs, files and metadata.
# - Files will be created under PIPELINE_ROOT directory.
# - Metadata will be written to SQLite database in METADATA_PATH.
PIPELINE_ROOT = os.path.join(OUTPUT_DIR, 'tfx_pipeline_output',
configs.PIPELINE_NAME)
METADATA_PATH = os.path.join(OUTPUT_DIR, 'tfx_metadata', configs.PIPELINE_NAME,
'metadata.db')
# The last component of the pipeline, "Pusher" will produce serving model under
# SERVING_MODEL_DIR.
SERVING_MODEL_DIR = os.path.join(PIPELINE_ROOT, 'serving_model')
# Specifies data file directory. DATA_PATH should be README.ml-pipelines-sdk.md directory containing CSV
# files for CsvExampleGen in this example. By default, data files are in the
# `data` directory.
# NOTE: If you upload data files to GCS(which is recommended if you use
# Kubeflow), you can use README.ml-pipelines-sdk.md path starting "gs://YOUR_BUCKET_NAME/path" for
# DATA_PATH. For example,
# DATA_PATH = 'gs://bucket/penguin/csv/'.
# TODO(step 4): Specify the path for your data.
DATA_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
def run():
"""Define README.ml-pipelines-sdk.md pipeline."""
local_dag_runner.LocalDagRunner().run(
pipeline.create_pipeline(
pipeline_name=configs.PIPELINE_NAME,
pipeline_root=PIPELINE_ROOT,
data_path=DATA_PATH,
# NOTE: Use `query` instead of `data_path` to use BigQueryExampleGen.
# query=configs.BIG_QUERY_QUERY,
preprocessing_fn=configs.PREPROCESSING_FN,
run_fn=configs.RUN_FN,
train_args=trainer_pb2.TrainArgs(num_steps=configs.TRAIN_NUM_STEPS),
eval_args=trainer_pb2.EvalArgs(num_steps=configs.EVAL_NUM_STEPS),
eval_accuracy_threshold=configs.EVAL_ACCURACY_THRESHOLD,
serving_model_dir=SERVING_MODEL_DIR,
# NOTE: Provide GCP configs to use BigQuery with Beam DirectRunner.
# beam_pipeline_args=configs.
# BIG_QUERY_WITH_DIRECT_RUNNER_BEAM_PIPELINE_ARGS,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
METADATA_PATH)))
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
run() | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/experimental/templates/penguin/local_runner.py | 0.518059 | 0.277828 | local_runner.py | pypi |
from typing import List, Text
from absl import logging
import tensorflow as tf
from tensorflow import keras
import tensorflow_transform as tft
from tensorflow_transform.tf_metadata import schema_utils
from tfx.components.trainer.executor import TrainerFnArgs
from tfx.components.trainer.fn_args_utils import DataAccessor
from tfx.experimental.templates.penguin.models import constants
from tfx.experimental.templates.penguin.models import features
from tfx.utils import io_utils
from tfx_bsl.tfxio import dataset_options
from tensorflow_metadata.proto.v0 import schema_pb2
def _get_serve_tf_examples_fn(model, schema, tf_transform_output):
"""Returns README.ml-pipelines-sdk.md function that parses README.ml-pipelines-sdk.md serialized tf.Example."""
if tf_transform_output is None: # Transform component is not used.
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = schema_utils.schema_as_feature_spec(schema).feature_spec
feature_spec.pop(features.LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples,
feature_spec)
return model(parsed_features)
else: # Transform component exists.
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
feature_spec.pop(features.LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples,
feature_spec)
transformed_features = model.tft_layer(parsed_features)
return model(transformed_features)
return serve_tf_examples_fn
def _input_fn(file_pattern: List[Text],
data_accessor: DataAccessor,
schema: schema_pb2.Schema,
label: Text,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
schema: A schema proto of input data.
label: Name of the label.
batch_size: representing the number of consecutive elements of returned
dataset to combine in README.ml-pipelines-sdk.md single batch
Returns:
A dataset that contains (features, indices) tuple where features is README.ml-pipelines-sdk.md
dictionary of Tensors, and indices is README.ml-pipelines-sdk.md single Tensor of label indices.
"""
return data_accessor.tf_dataset_factory(
file_pattern,
dataset_options.TensorFlowDatasetOptions(
batch_size=batch_size, label_key=label), schema).repeat()
def _build_keras_model(feature_list: List[Text]) -> tf.keras.Model:
"""Creates README.ml-pipelines-sdk.md DNN Keras model for classifying penguin data.
Args:
feature_list: List of feature names.
Returns:
A Keras Model.
"""
# The model below is built with Functional API, please refer to
# https://www.tensorflow.org/guide/keras/overview for all API options.
inputs = [keras.layers.Input(shape=(1,), name=f) for f in feature_list]
d = keras.layers.concatenate(inputs)
for _ in range(constants.NUM_LAYERS):
d = keras.layers.Dense(constants.HIDDEN_LAYER_UNITS, activation='relu')(d)
outputs = keras.layers.Dense(
constants.OUTPUT_LAYER_UNITS, activation='softmax')(
d)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.Adam(constants.LEARNING_RATE),
loss='sparse_categorical_crossentropy',
metrics=[keras.metrics.SparseCategoricalAccuracy()])
model.summary(print_fn=logging.info)
return model
# TFX Trainer will call this function.
# TODO(step 4): Construct, train and save your model in this function.
def run_fn(fn_args: TrainerFnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
if fn_args.transform_output is None: # Transform is not used.
tf_transform_output = None
schema = io_utils.parse_pbtxt_file(fn_args.schema_file, schema_pb2.Schema())
feature_list = features.FEATURE_KEYS
label_key = features.LABEL_KEY
else:
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
schema = tf_transform_output.transformed_metadata.schema
feature_list = [features.transformed_name(f) for f in features.FEATURE_KEYS]
label_key = features.transformed_name(features.LABEL_KEY)
mirrored_strategy = tf.distribute.MirroredStrategy()
train_batch_size = (
constants.TRAIN_BATCH_SIZE * mirrored_strategy.num_replicas_in_sync)
eval_batch_size = (
constants.EVAL_BATCH_SIZE * mirrored_strategy.num_replicas_in_sync)
train_dataset = _input_fn(
fn_args.train_files,
fn_args.data_accessor,
schema,
label_key,
batch_size=train_batch_size)
eval_dataset = _input_fn(
fn_args.eval_files,
fn_args.data_accessor,
schema,
label_key,
batch_size=eval_batch_size)
with mirrored_strategy.scope():
model = _build_keras_model(feature_list)
# Write logs to path
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='batch')
model.fit(
train_dataset,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
signatures = {
'serving_default':
_get_serve_tf_examples_fn(model, schema,
tf_transform_output).get_concrete_function(
tf.TensorSpec(
shape=[None],
dtype=tf.string,
name='examples')),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/experimental/templates/penguin/models/model.py | 0.914725 | 0.442817 | model.py | pypi |
"""Define LocalDagRunner to run the pipeline locally."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import logging
from tfx.experimental.templates.taxi.pipeline import configs
from tfx.experimental.templates.taxi.pipeline import pipeline
from tfx.orchestration import metadata
from tfx.orchestration.local.local_dag_runner import LocalDagRunner
from tfx.proto import trainer_pb2
# TFX pipeline produces many output files and metadata. All output data will be
# stored under this OUTPUT_DIR.
# NOTE: It is recommended to have README.ml-pipelines-sdk.md separated OUTPUT_DIR which is *outside* of
# the source code structure. Please change OUTPUT_DIR to other location
# where we can store outputs of the pipeline.
OUTPUT_DIR = '.'
# TFX produces two types of outputs, files and metadata.
# - Files will be created under PIPELINE_ROOT directory.
# - Metadata will be written to SQLite database in METADATA_PATH.
PIPELINE_ROOT = os.path.join(OUTPUT_DIR, 'tfx_pipeline_output',
configs.PIPELINE_NAME)
METADATA_PATH = os.path.join(OUTPUT_DIR, 'tfx_metadata', configs.PIPELINE_NAME,
'metadata.db')
# The last component of the pipeline, "Pusher" will produce serving model under
# SERVING_MODEL_DIR.
SERVING_MODEL_DIR = os.path.join(PIPELINE_ROOT, 'serving_model')
# Specifies data file directory. DATA_PATH should be README.ml-pipelines-sdk.md directory containing CSV
# files for CsvExampleGen in this example. By default, data files are in the
# `data` directory.
# NOTE: If you upload data files to GCS(which is recommended if you use
# Kubeflow), you can use README.ml-pipelines-sdk.md path starting "gs://YOUR_BUCKET_NAME/path" for
# DATA_PATH. For example,
# DATA_PATH = 'gs://bucket/chicago_taxi_trips/csv/'.
DATA_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
def run():
"""Define README.ml-pipelines-sdk.md local pipeline."""
LocalDagRunner().run(
pipeline.create_pipeline(
pipeline_name=configs.PIPELINE_NAME,
pipeline_root=PIPELINE_ROOT,
data_path=DATA_PATH,
# TODO(step 7): (Optional) Uncomment here to use BigQueryExampleGen.
# query=configs.BIG_QUERY_QUERY,
preprocessing_fn=configs.PREPROCESSING_FN,
run_fn=configs.RUN_FN,
train_args=trainer_pb2.TrainArgs(num_steps=configs.TRAIN_NUM_STEPS),
eval_args=trainer_pb2.EvalArgs(num_steps=configs.EVAL_NUM_STEPS),
eval_accuracy_threshold=configs.EVAL_ACCURACY_THRESHOLD,
serving_model_dir=SERVING_MODEL_DIR,
# TODO(step 7): (Optional) Uncomment here to use provide GCP related
# config for BigQuery with Beam DirectRunner.
# beam_pipeline_args=configs.
# BIG_QUERY_WITH_DIRECT_RUNNER_BEAM_PIPELINE_ARGS,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
METADATA_PATH)))
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
run() | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/experimental/templates/taxi/local_runner.py | 0.629775 | 0.161816 | local_runner.py | pypi |
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow_transform as tft
from tfx.experimental.templates.taxi.models import features
def _fill_in_missing(x):
"""Replace missing values in README.ml-pipelines-sdk.md SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to README.ml-pipelines-sdk.md dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
"""
if not isinstance(x, tf.sparse.SparseTensor):
return x
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in features.DENSE_FLOAT_FEATURE_KEYS:
# Preserve this feature as README.ml-pipelines-sdk.md dense float, setting nan's to the mean.
outputs[features.transformed_name(key)] = tft.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in features.VOCAB_FEATURE_KEYS:
# Build README.ml-pipelines-sdk.md vocabulary for this feature.
outputs[features.transformed_name(key)] = tft.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=features.VOCAB_SIZE,
num_oov_buckets=features.OOV_SIZE)
for key, num_buckets in zip(features.BUCKET_FEATURE_KEYS,
features.BUCKET_FEATURE_BUCKET_COUNT):
outputs[features.transformed_name(key)] = tft.bucketize(
_fill_in_missing(inputs[key]),
num_buckets)
for key in features.CATEGORICAL_FEATURE_KEYS:
outputs[features.transformed_name(key)] = _fill_in_missing(inputs[key])
# TODO(b/157064428): Support label transformation for Keras.
# Do not apply label transformation as it will result in wrong evaluation.
outputs[features.transformed_name(
features.LABEL_KEY)] = inputs[features.LABEL_KEY]
return outputs | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/experimental/templates/taxi/models/preprocessing.py | 0.880399 | 0.478407 | preprocessing.py | pypi |
from __future__ import division
from __future__ import print_function
from absl import logging
import tensorflow as tf
import tensorflow_transform as tft
from tfx.experimental.templates.taxi.models import features
from tfx.experimental.templates.taxi.models.keras import constants
from tfx_bsl.tfxio import dataset_options
def _get_serve_tf_examples_fn(model, tf_transform_output):
"""Returns README.ml-pipelines-sdk.md function that parses README.ml-pipelines-sdk.md serialized tf.Example and applies TFT."""
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
feature_spec.pop(features.LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)
transformed_features = model.tft_layer(parsed_features)
return model(transformed_features)
return serve_tf_examples_fn
def _input_fn(file_pattern, data_accessor, tf_transform_output, batch_size=200):
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in README.ml-pipelines-sdk.md single batch
Returns:
A dataset that contains (features, indices) tuple where features is README.ml-pipelines-sdk.md
dictionary of Tensors, and indices is README.ml-pipelines-sdk.md single Tensor of label indices.
"""
return data_accessor.tf_dataset_factory(
file_pattern,
dataset_options.TensorFlowDatasetOptions(
batch_size=batch_size,
label_key=features.transformed_name(features.LABEL_KEY)),
tf_transform_output.transformed_metadata.schema).repeat()
def _build_keras_model(hidden_units, learning_rate):
"""Creates README.ml-pipelines-sdk.md DNN Keras model for classifying taxi data.
Args:
hidden_units: [int], the layer sizes of the DNN (input layer first).
learning_rate: [float], learning rate of the Adam optimizer.
Returns:
A keras Model.
"""
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in features.transformed_names(features.DENSE_FLOAT_FEATURE_KEYS)
]
categorical_columns = [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=features.VOCAB_SIZE + features.OOV_SIZE,
default_value=0)
for key in features.transformed_names(features.VOCAB_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
features.transformed_names(features.BUCKET_FEATURE_KEYS),
features.BUCKET_FEATURE_BUCKET_COUNT)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
features.transformed_names(features.CATEGORICAL_FEATURE_KEYS),
features.CATEGORICAL_FEATURE_MAX_VALUES)
]
indicator_column = [
tf.feature_column.indicator_column(categorical_column)
for categorical_column in categorical_columns
]
model = _wide_and_deep_classifier(
# TODO(b/140320729) Replace with premade wide_and_deep keras model
wide_columns=indicator_column,
deep_columns=real_valued_columns,
dnn_hidden_units=hidden_units,
learning_rate=learning_rate)
return model
def _wide_and_deep_classifier(wide_columns, deep_columns, dnn_hidden_units,
learning_rate):
"""Build README.ml-pipelines-sdk.md simple keras wide and deep model.
Args:
wide_columns: Feature columns wrapped in indicator_column for wide (linear)
part of the model.
deep_columns: Feature columns for deep part of the model.
dnn_hidden_units: [int], the layer sizes of the hidden DNN.
learning_rate: [float], learning rate of the Adam optimizer.
Returns:
A Wide and Deep Keras model
"""
# Keras needs the feature definitions at compile time.
# TODO(b/139081439): Automate generation of input layers from FeatureColumn.
input_layers = {
colname: tf.keras.layers.Input(name=colname, shape=(), dtype=tf.float32)
for colname in features.transformed_names(
features.DENSE_FLOAT_FEATURE_KEYS)
}
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in features.transformed_names(features.VOCAB_FEATURE_KEYS)
})
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in features.transformed_names(features.BUCKET_FEATURE_KEYS)
})
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32') for
colname in features.transformed_names(features.CATEGORICAL_FEATURE_KEYS)
})
# TODO(b/161952382): Replace with Keras premade models and
# Keras preprocessing layers.
deep = tf.keras.layers.DenseFeatures(deep_columns)(input_layers)
for numnodes in dnn_hidden_units:
deep = tf.keras.layers.Dense(numnodes)(deep)
wide = tf.keras.layers.DenseFeatures(wide_columns)(input_layers)
output = tf.keras.layers.Dense(
1, activation='sigmoid')(
tf.keras.layers.concatenate([deep, wide]))
output = tf.squeeze(output, -1)
model = tf.keras.Model(input_layers, output)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr=learning_rate),
metrics=[tf.keras.metrics.BinaryAccuracy()])
model.summary(print_fn=logging.info)
return model
# TFX Trainer will call this function.
def run_fn(fn_args):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(fn_args.train_files, fn_args.data_accessor,
tf_transform_output, constants.TRAIN_BATCH_SIZE)
eval_dataset = _input_fn(fn_args.eval_files, fn_args.data_accessor,
tf_transform_output, constants.EVAL_BATCH_SIZE)
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = _build_keras_model(
hidden_units=constants.HIDDEN_UNITS,
learning_rate=constants.LEARNING_RATE)
# Write logs to path
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='batch')
model.fit(
train_dataset,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
signatures = {
'serving_default':
_get_serve_tf_examples_fn(model,
tf_transform_output).get_concrete_function(
tf.TensorSpec(
shape=[None],
dtype=tf.string,
name='examples')),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/experimental/templates/taxi/models/keras/model.py | 0.885307 | 0.468304 | model.py | pypi |
from __future__ import division
from __future__ import print_function
from absl import logging
import tensorflow as tf
import tensorflow_model_analysis as tfma
import tensorflow_transform as tft
from tensorflow_transform.tf_metadata import schema_utils
from tfx.experimental.templates.taxi.models import features
from tfx.experimental.templates.taxi.models.estimator import constants
from tfx.utils import io_utils
from tfx_bsl.tfxio import dataset_options
from tensorflow_metadata.proto.v0 import schema_pb2
def _gzip_reader_fn(filenames):
"""Small utility returning README.ml-pipelines-sdk.md record reader that can read gzip'ed files."""
return tf.data.TFRecordDataset(filenames, compression_type='GZIP')
# Tf.Transform considers these features as "raw"
def _get_raw_feature_spec(schema):
return schema_utils.schema_as_feature_spec(schema).feature_spec
def _build_estimator(config, hidden_units=None, warm_start_from=None):
"""Build an estimator for predicting the tipping behavior of taxi riders.
Args:
config: tf.estimator.RunConfig defining the runtime environment for the
estimator (including model_dir).
hidden_units: [int], the layer sizes of the DNN (input layer first)
warm_start_from: Optional directory to warm start from.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
"""
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in features.transformed_names(features.DENSE_FLOAT_FEATURE_KEYS)
]
categorical_columns = []
for key in features.transformed_names(features.VOCAB_FEATURE_KEYS):
categorical_columns.append(
tf.feature_column.categorical_column_with_identity(
key,
num_buckets=features.VOCAB_SIZE + features.OOV_SIZE,
default_value=0))
for key, num_buckets in zip(
features.transformed_names(features.BUCKET_FEATURE_KEYS),
features.BUCKET_FEATURE_BUCKET_COUNT):
categorical_columns.append(
tf.feature_column.categorical_column_with_identity(
key, num_buckets=num_buckets, default_value=0))
for key, num_buckets in zip(
features.transformed_names(features.CATEGORICAL_FEATURE_KEYS),
features.CATEGORICAL_FEATURE_MAX_VALUES):
categorical_columns.append(
tf.feature_column.categorical_column_with_identity(
key, num_buckets=num_buckets, default_value=0))
return tf.estimator.DNNLinearCombinedClassifier(
config=config,
linear_feature_columns=categorical_columns,
dnn_feature_columns=real_valued_columns,
dnn_hidden_units=hidden_units or [100, 70, 50, 25],
warm_start_from=warm_start_from)
def _example_serving_receiver_fn(tf_transform_output, schema):
"""Build the serving in inputs.
Args:
tf_transform_output: A TFTransformOutput.
schema: the schema of the input data.
Returns:
Tensorflow graph which parses examples, applying tf-transform to them.
"""
raw_feature_spec = _get_raw_feature_spec(schema)
raw_feature_spec.pop(features.LABEL_KEY)
raw_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(
raw_feature_spec, default_batch_size=None)
serving_input_receiver = raw_input_fn()
transformed_features = tf_transform_output.transform_raw_features(
serving_input_receiver.features)
return tf.estimator.export.ServingInputReceiver(
transformed_features, serving_input_receiver.receiver_tensors)
def _eval_input_receiver_fn(tf_transform_output, schema):
"""Build everything needed for the tf-model-analysis to run the model.
Args:
tf_transform_output: A TFTransformOutput.
schema: the schema of the input data.
Returns:
EvalInputReceiver function, which contains:
- Tensorflow graph which parses raw untransformed features, applies the
tf-transform preprocessing operators.
- Set of raw, untransformed features.
- Label against which predictions will be compared.
"""
# Notice that the inputs are raw features, not transformed features here.
raw_feature_spec = _get_raw_feature_spec(schema)
serialized_tf_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_example_tensor')
# Add README.ml-pipelines-sdk.md parse_example operator to the tensorflow graph, which will parse
# raw, untransformed, tf examples.
raw_features = tf.io.parse_example(
serialized=serialized_tf_example, features=raw_feature_spec)
# Now that we have our raw examples, process them through the tf-transform
# function computed during the preprocessing step.
transformed_features = tf_transform_output.transform_raw_features(
raw_features)
# The key name MUST be 'examples'.
receiver_tensors = {'examples': serialized_tf_example}
# NOTE: Model is driven by transformed features (since training works on the
# materialized output of TFT, but slicing will happen on raw features.
raw_features.update(transformed_features)
return tfma.export.EvalInputReceiver(
features=raw_features,
receiver_tensors=receiver_tensors,
labels=transformed_features[features.transformed_name(
features.LABEL_KEY)])
def _input_fn(file_pattern, data_accessor, tf_transform_output, batch_size=200):
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in README.ml-pipelines-sdk.md single batch
Returns:
A dataset that contains (features, indices) tuple where features is README.ml-pipelines-sdk.md
dictionary of Tensors, and indices is README.ml-pipelines-sdk.md single Tensor of label indices.
"""
return data_accessor.tf_dataset_factory(
file_pattern,
dataset_options.TensorFlowDatasetOptions(
batch_size=batch_size,
label_key=features.transformed_name(features.LABEL_KEY)),
tf_transform_output.transformed_metadata.schema)
def _create_train_and_eval_spec(trainer_fn_args, schema):
"""Build the estimator using the high level API.
Args:
trainer_fn_args: Holds args used to train the model as name/value pairs.
schema: Holds the schema of the training examples.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
"""
tf_transform_output = tft.TFTransformOutput(trainer_fn_args.transform_output)
train_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda
trainer_fn_args.train_files,
trainer_fn_args.data_accessor,
tf_transform_output,
batch_size=constants.TRAIN_BATCH_SIZE)
eval_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda
trainer_fn_args.eval_files,
trainer_fn_args.data_accessor,
tf_transform_output,
batch_size=constants.EVAL_BATCH_SIZE)
train_spec = tf.estimator.TrainSpec( # pylint: disable=g-long-lambda
train_input_fn,
max_steps=trainer_fn_args.train_steps)
serving_receiver_fn = lambda: _example_serving_receiver_fn( # pylint: disable=g-long-lambda
tf_transform_output, schema)
exporter = tf.estimator.FinalExporter('chicago-taxi', serving_receiver_fn)
eval_spec = tf.estimator.EvalSpec(
eval_input_fn,
steps=trainer_fn_args.eval_steps,
exporters=[exporter],
name='chicago-taxi-eval')
run_config = tf.estimator.RunConfig(
save_checkpoints_steps=999, keep_checkpoint_max=1)
run_config = run_config.replace(model_dir=trainer_fn_args.serving_model_dir)
estimator = _build_estimator(
hidden_units=constants.HIDDEN_UNITS, config=run_config)
# Create an input receiver for TFMA processing
receiver_fn = lambda: _eval_input_receiver_fn( # pylint: disable=g-long-lambda
tf_transform_output, schema)
return {
'estimator': estimator,
'train_spec': train_spec,
'eval_spec': eval_spec,
'eval_input_receiver_fn': receiver_fn
}
# TFX will call this function
def run_fn(fn_args):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
schema = io_utils.parse_pbtxt_file(fn_args.schema_file, schema_pb2.Schema())
train_and_eval_spec = _create_train_and_eval_spec(fn_args, schema)
# Train the model
logging.info('Training model.')
tf.estimator.train_and_evaluate(train_and_eval_spec['estimator'],
train_and_eval_spec['train_spec'],
train_and_eval_spec['eval_spec'])
logging.info('Training complete. Model written to %s',
fn_args.serving_model_dir)
# Export an eval savedmodel for TFMA
# NOTE: When trained in distributed training cluster, eval_savedmodel must be
# exported only by the chief worker.
logging.info('Exporting eval_savedmodel for TFMA.')
tfma.export.export_eval_savedmodel(
estimator=train_and_eval_spec['estimator'],
export_dir_base=fn_args.eval_model_dir,
eval_input_receiver_fn=train_and_eval_spec['eval_input_receiver_fn'])
logging.info('Exported eval_savedmodel to %s.', fn_args.eval_model_dir) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/experimental/templates/taxi/models/estimator/model.py | 0.955909 | 0.383988 | model.py | pypi |
from typing import Any, Dict, Text
import apache_beam as beam
from apache_beam.io.gcp import bigquery
import tensorflow as tf
from tfx.utils import telemetry_utils
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(beam.typehints.Dict[Text, Any])
def ReadFromBigQuery(
pipeline: beam.Pipeline, query: Text) -> beam.pvalue.PCollection:
"""Read data from BigQuery.
Args:
pipeline: Beam pipeline.
query: A BigQuery sql string.
Returns:
PCollection of dict.
"""
return (pipeline
| 'ReadFromBigQuery' >> bigquery.ReadFromBigQuery(
query=query,
use_standard_sql=True,
bigquery_job_labels=telemetry_utils.get_labels_dict()))
def row_to_example( # pylint: disable=invalid-name
field_to_type: Dict[Text, Text],
field_name_to_data: Dict[Text, Any]) -> tf.train.Example:
"""Convert bigquery result row to tf example.
Args:
field_to_type: The name of the field to its type from BigQuery.
field_name_to_data: The data need to be converted from BigQuery that
contains field name and data.
Returns:
A tf.train.Example that converted from the BigQuery row. Note that BOOLEAN
type in BigQuery result will be converted to int in tf.train.Example.
Raises:
RuntimeError: If the data type is not supported to be converted.
Only INTEGER, BOOLEAN, FLOAT, STRING is supported now.
"""
feature = {}
for key, value in field_name_to_data.items():
data_type = field_to_type[key]
if value is None:
feature[key] = tf.train.Feature()
continue
value_list = value if isinstance(value, list) else [value]
if data_type in ('INTEGER', 'BOOLEAN'):
feature[key] = tf.train.Feature(
int64_list=tf.train.Int64List(value=value_list))
elif data_type == 'FLOAT':
feature[key] = tf.train.Feature(
float_list=tf.train.FloatList(value=value_list))
elif data_type == 'STRING':
feature[key] = tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[tf.compat.as_bytes(elem) for elem in value_list]))
else:
# TODO(jyzhao): support more types.
raise RuntimeError(
'BigQuery column type {} is not supported.'.format(data_type))
return tf.train.Example(features=tf.train.Features(feature=feature)) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/extensions/google_cloud_big_query/utils.py | 0.831177 | 0.425963 | utils.py | pypi |
"""TFX BigQueryToElwcExampleGen component definition."""
from typing import Optional, Text
from tfx import types
from tfx.components.base import executor_spec
from tfx.components.example_gen import component
from tfx.components.example_gen import utils
from tfx.extensions.google_cloud_big_query.experimental.elwc_example_gen.component import executor
from tfx.extensions.google_cloud_big_query.experimental.elwc_example_gen.proto import elwc_config_pb2
from tfx.proto import example_gen_pb2
class BigQueryToElwcExampleGen(component.QueryBasedExampleGen):
"""Official TFX BigQueryToElwcExampleGen component.
The BigQueryToElwcExampleGen component takes README.ml-pipelines-sdk.md query, and generates train
and eval ExampleListWithContext(ELWC) for downstream components.
"""
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(self,
query: Optional[Text] = None,
elwc_config: Optional[elwc_config_pb2.ElwcConfig] = None,
input_config: Optional[example_gen_pb2.Input] = None,
output_config: Optional[example_gen_pb2.Output] = None,
example_artifacts: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Constructs README.ml-pipelines-sdk.md BigQueryElwcExampleGen component.
Args:
query: BigQuery sql string, query result will be treated as README.ml-pipelines-sdk.md single
split, can be overwritten by input_config.
elwc_config: The elwc config contains README.ml-pipelines-sdk.md list of context feature fields.
The fields are used to build context feature. Examples with the same
context feature will be converted to an ELWC(ExampleListWithContext)
instance. For example, when there are two examples with the same context
field, the two examples will be intergrated to README.ml-pipelines-sdk.md ELWC instance.
input_config: An example_gen_pb2.Input instance with Split.pattern as
BigQuery sql string. If set, it overwrites the 'query' arg, and allows
different queries per split. If any field is provided as README.ml-pipelines-sdk.md
RuntimeParameter, input_config should be constructed as README.ml-pipelines-sdk.md dict with the
same field names as Input proto message.
output_config: An example_gen_pb2.Output instance, providing output
configuration. If unset, default splits will be 'train' and 'eval' with
size 2:1. If any field is provided as README.ml-pipelines-sdk.md RuntimeParameter, input_config
should be constructed as README.ml-pipelines-sdk.md dict with the same field names as Output
proto message.
example_artifacts: Optional channel of 'ExamplesPath' for output train and
eval examples.
instance_name: Optional unique instance name. Necessary if multiple
BigQueryExampleGen components are declared in the same pipeline.
Raises:
RuntimeError: Only one of query and input_config should be set and
elwc_config is required.
"""
if bool(query) == bool(input_config):
raise RuntimeError('Exactly one of query and input_config should be set.')
if not elwc_config:
raise RuntimeError(
'elwc_config is required for BigQueryToElwcExampleGen.')
input_config = input_config or utils.make_default_input_config(query)
packed_custom_config = example_gen_pb2.CustomConfig()
packed_custom_config.custom_config.Pack(elwc_config)
super(BigQueryToElwcExampleGen, self).__init__(
input_config=input_config,
output_config=output_config,
output_data_format=example_gen_pb2.FORMAT_PROTO,
custom_config=packed_custom_config,
example_artifacts=example_artifacts,
instance_name=instance_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/extensions/google_cloud_big_query/experimental/elwc_example_gen/component/component.py | 0.938667 | 0.500122 | component.py | pypi |
"""Generic TFX BigQueryToElwcExampleGen executor."""
from typing import Any, Dict, Iterable, List, Set, Text, Tuple
import apache_beam as beam
from google.cloud import bigquery
import tensorflow as tf
from tfx.components.example_gen import base_example_gen_executor
from tfx.extensions.google_cloud_big_query import utils
from tfx.extensions.google_cloud_big_query.experimental.elwc_example_gen.proto import elwc_config_pb2
from tfx.proto import example_gen_pb2
from google.protobuf import json_format
from tensorflow_serving.apis import input_pb2
# TODO(b/158514307): Revisit when PGBKCVOperation can hold serialized keys.
@beam.typehints.with_input_types(Dict[Text, Any])
@beam.typehints.with_output_types(Tuple[bytes, tf.train.Example])
class _RowToContextFeatureAndExample(beam.DoFn):
"""Convert bigquery result to context feature and example feature pair."""
def __init__(self, type_map: Dict[Text, Text],
context_feature_fields: Set[Text]):
self._type_map = type_map
self._context_feature_fields = context_feature_fields
def process(
self, instance: Dict[Text,
Any]) -> Iterable[Tuple[bytes, tf.train.Example]]:
context_feature = dict((k, instance[k])
for k in instance.keys()
if k in self._context_feature_fields)
context_feature_proto = utils.row_to_example(self._type_map,
context_feature)
context_feature_key = context_feature_proto.SerializeToString(
deterministic=True)
example_feature = dict((k, instance[k])
for k in instance.keys()
if k not in self._context_feature_fields)
example_feature_value = utils.row_to_example(self._type_map,
example_feature)
yield (context_feature_key, example_feature_value)
def _ConvertContextAndExamplesToElwc(
context_feature_and_examples: Tuple[bytes, List[tf.train.Example]]
) -> input_pb2.ExampleListWithContext:
"""Convert context feature and examples to ELWC."""
context_feature, examples = context_feature_and_examples
context_feature_proto = tf.train.Example()
context_feature_proto.ParseFromString(context_feature)
return input_pb2.ExampleListWithContext(
context=context_feature_proto, examples=examples)
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(input_pb2.ExampleListWithContext)
def _BigQueryToElwc(pipeline: beam.Pipeline, exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection:
"""Read from BigQuery and transform to ExampleListWithContext.
When README.ml-pipelines-sdk.md field has no value in BigQuery, README.ml-pipelines-sdk.md feature with no value will be
generated in the tf.train.Features. This behavior is consistent with
BigQueryExampleGen.
Args:
pipeline: beam pipeline.
exec_properties: A dict of execution properties.
split_pattern: Split.pattern in Input config, README.ml-pipelines-sdk.md BigQuery sql string.
Returns:
PCollection of ExampleListWithContext.
Raises:
RuntimeError: Context features must be included in the queried result.
"""
custom_config = example_gen_pb2.CustomConfig()
json_format.Parse(exec_properties['custom_config'], custom_config)
elwc_config = elwc_config_pb2.ElwcConfig()
custom_config.custom_config.Unpack(elwc_config)
client = bigquery.Client()
# Dummy query to get the type information for each field.
query_job = client.query('SELECT * FROM ({}) LIMIT 0'.format(split_pattern))
results = query_job.result()
type_map = {}
context_feature_fields = set(elwc_config.context_feature_fields)
field_names = set()
for field in results.schema:
type_map[field.name] = field.field_type
field_names.add(field.name)
# Check whether the query contains necessary context fields.
if not field_names.issuperset(context_feature_fields):
raise RuntimeError('Context feature fields are missing from the query.')
return (
pipeline
| 'ReadFromBigQuery' >> utils.ReadFromBigQuery(query=split_pattern)
| 'RowToContextFeatureAndExample' >> beam.ParDo(
_RowToContextFeatureAndExample(type_map, context_feature_fields))
|
'CombineByContext' >> beam.CombinePerKey(beam.combiners.ToListCombineFn())
| 'ConvertContextAndExamplesToElwc' >>
beam.Map(_ConvertContextAndExamplesToElwc))
class Executor(base_example_gen_executor.BaseExampleGenExecutor):
"""Generic TFX BigQueryElwcExampleGen executor."""
def GetInputSourceToExamplePTransform(self) -> beam.PTransform:
"""Returns PTransform for BigQuery to ExampleListWithContext."""
return _BigQueryToElwc | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/extensions/google_cloud_big_query/experimental/elwc_example_gen/component/executor.py | 0.721841 | 0.422743 | executor.py | pypi |
"""TFX BigQueryExampleGen component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Text
from tfx import types
from tfx.components.example_gen import component
from tfx.components.example_gen import utils
from tfx.dsl.components.base import executor_spec
from tfx.extensions.google_cloud_big_query.example_gen import executor
from tfx.proto import example_gen_pb2
class BigQueryExampleGen(component.QueryBasedExampleGen):
"""Official TFX BigQueryExampleGen component.
The BigQuery examplegen component takes README.ml-pipelines-sdk.md query, and generates train
and eval examples for downsteam components.
"""
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(self,
query: Optional[Text] = None,
input_config: Optional[example_gen_pb2.Input] = None,
output_config: Optional[example_gen_pb2.Output] = None,
example_artifacts: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Constructs README.ml-pipelines-sdk.md BigQueryExampleGen component.
Args:
query: BigQuery sql string, query result will be treated as README.ml-pipelines-sdk.md single
split, can be overwritten by input_config.
input_config: An example_gen_pb2.Input instance with Split.pattern as
BigQuery sql string. If set, it overwrites the 'query' arg, and allows
different queries per split. If any field is provided as README.ml-pipelines-sdk.md
RuntimeParameter, input_config should be constructed as README.ml-pipelines-sdk.md dict with the
same field names as Input proto message.
output_config: An example_gen_pb2.Output instance, providing output
configuration. If unset, default splits will be 'train' and 'eval' with
size 2:1. If any field is provided as README.ml-pipelines-sdk.md RuntimeParameter,
input_config should be constructed as README.ml-pipelines-sdk.md dict with the same field names
as Output proto message.
example_artifacts: Optional channel of 'ExamplesPath' for output train and
eval examples.
instance_name: Optional unique instance name. Necessary if multiple
BigQueryExampleGen components are declared in the same pipeline.
Raises:
RuntimeError: Only one of query and input_config should be set.
"""
if bool(query) == bool(input_config):
raise RuntimeError('Exactly one of query and input_config should be set.')
input_config = input_config or utils.make_default_input_config(query)
super(BigQueryExampleGen, self).__init__(
input_config=input_config,
output_config=output_config,
example_artifacts=example_artifacts,
instance_name=instance_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/extensions/google_cloud_big_query/example_gen/component.py | 0.939018 | 0.34834 | component.py | pypi |
"""Generic TFX BigQueryExampleGen executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text
import apache_beam as beam
from apache_beam.options import value_provider
from google.cloud import bigquery
import tensorflow as tf
from tfx.components.example_gen import base_example_gen_executor
from tfx.extensions.google_cloud_big_query import utils
class _BigQueryConverter(object):
"""Help class for bigquery result row to tf example conversion."""
def __init__(self, query: Text, project_id: Optional[Text] = None):
"""Instantiate README.ml-pipelines-sdk.md _BigQueryConverter object.
Args:
query: the query statement to get the type information.
project_id: optional. The GCP project ID to run the query job. Default to
the GCP project ID set by the gcloud environment on the machine.
"""
client = bigquery.Client(project=project_id)
# Dummy query to get the type information for each field.
query_job = client.query('SELECT * FROM ({}) LIMIT 0'.format(query))
results = query_job.result()
self._type_map = {}
for field in results.schema:
self._type_map[field.name] = field.field_type
def RowToExample(self, instance: Dict[Text, Any]) -> tf.train.Example:
"""Convert bigquery result row to tf example."""
return utils.row_to_example(self._type_map, instance)
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(tf.train.Example)
def _BigQueryToExample(
pipeline: beam.Pipeline,
exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection:
"""Read from BigQuery and transform to TF examples.
Args:
pipeline: beam pipeline.
exec_properties: A dict of execution properties.
split_pattern: Split.pattern in Input config, README.ml-pipelines-sdk.md BigQuery sql string.
Returns:
PCollection of TF examples.
"""
beam_pipeline_args = exec_properties['_beam_pipeline_args']
pipeline_options = beam.options.pipeline_options.PipelineOptions(
beam_pipeline_args)
# Try to parse the GCP project ID from the beam pipeline options.
project = pipeline_options.view_as(
beam.options.pipeline_options.GoogleCloudOptions).project
if isinstance(project, value_provider.ValueProvider):
project = project.get()
converter = _BigQueryConverter(split_pattern, project)
return (pipeline
| 'QueryTable' >> utils.ReadFromBigQuery(query=split_pattern)
| 'ToTFExample' >> beam.Map(converter.RowToExample))
class Executor(base_example_gen_executor.BaseExampleGenExecutor):
"""Generic TFX BigQueryExampleGen executor."""
def GetInputSourceToExamplePTransform(self) -> beam.PTransform:
"""Returns PTransform for BigQuery to TF examples."""
return _BigQueryToExample | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/extensions/google_cloud_big_query/example_gen/executor.py | 0.893658 | 0.314629 | executor.py | pypi |
"""Functions for creating container components from kubeflow components."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Callable, Dict, Text
from tfx.dsl.component.experimental import container_component
from tfx.dsl.component.experimental import placeholders
from tfx.dsl.components.base import base_component
from tfx.extensions.experimental.kfp_compatibility.proto import kfp_component_spec_pb2
from tfx.types import standard_artifacts
import yaml
from google.protobuf import json_format
def load_kfp_yaml_container_component(
path: Text) -> Callable[..., base_component.BaseComponent]:
"""Creates README.ml-pipelines-sdk.md container-based component from README.ml-pipelines-sdk.md Kubeflow component spec.
See
https://www.kubeflow.org/docs/pipelines/reference/component-spec/
Example:
component = load_kfp_yaml_container_component(
"kfp_pipelines_root/components/datasets/Chicago_Taxi_Trips/component.yaml"
)
Args:
path: local file path of README.ml-pipelines-sdk.md Kubeflow Pipelines component YAML file.
Returns:
Container component that can be instantiated in README.ml-pipelines-sdk.md TFX pipeline.
"""
with open(path) as component_file:
data = yaml.load(component_file, Loader=yaml.FullLoader)
_convert_target_fields_to_kv_pair(data)
component_spec = json_format.ParseDict(data,
kfp_component_spec_pb2.ComponentSpec())
container = component_spec.implementation.container
command = (
list(map(_get_command_line_argument_type, container.command)) +
list(map(_get_command_line_argument_type, container.args)))
# TODO(ericlege): Support classname to class translation in inputs.type
inputs = {
item.name: standard_artifacts.String for item in component_spec.inputs
}
outputs = {
item.name: standard_artifacts.String for item in component_spec.outputs
}
parameters = {}
return container_component.create_container_component(
name=component_spec.name,
image=container.image,
command=command,
inputs=inputs,
outputs=outputs,
parameters=parameters,
)
def _convert_target_fields_to_kv_pair(parsed_dict: Dict[Text, Any]) -> None:
"""Converts in place specific string fields to key value pairs of {constantValue: [Text]} for proto3 compatibility.
Args:
parsed_dict: dictionary obtained from parsing README.ml-pipelines-sdk.md Kubeflow component spec.
This argument is modified in place.
Returns:
None
"""
conversion_string_paths = [
['implementation', 'container', 'command'],
['implementation', 'container', 'args'],
]
for path in conversion_string_paths:
parsed_dict_location = parsed_dict
for label in path:
parsed_dict_location = parsed_dict_location.get(label, {})
if isinstance(parsed_dict_location, list):
for ind, value in enumerate(parsed_dict_location):
if isinstance(value, str):
parsed_dict_location[ind] = {'constantValue': value}
def _get_command_line_argument_type(
command: kfp_component_spec_pb2.StringOrPlaceholder
) -> placeholders.CommandlineArgumentType:
"""Converts README.ml-pipelines-sdk.md container command to the corresponding type.
Args:
command: StringOrPlaceholder which encodes README.ml-pipelines-sdk.md container command.
Returns:
command to be passed into create_container_component.
"""
if command.HasField('constantValue'):
return command.constantValue
if command.HasField('inputValue'):
return placeholders.InputValuePlaceholder(command.inputValue)
if command.HasField('inputPath'):
return placeholders.InputUriPlaceholder(command.inputPath)
if command.HasField('outputPath'):
return placeholders.OutputUriPlaceholder(command.outputPath)
raise ValueError('Unrecognized command %s' % command) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/extensions/experimental/kfp_compatibility/kfp_container_component.py | 0.754101 | 0.228716 | kfp_container_component.py | pypi |
"""An abstract class for the runner for both CAIP and uCAIP."""
import abc
import datetime
import json
from typing import Any, Dict, List, Optional, Text
from absl import logging
from googleapiclient import discovery
from googleapiclient import http
from tfx import types
from tfx.types import artifact_utils
from tfx.utils import telemetry_utils
from tfx.utils import version_utils
# Default contaier image being used for CAIP training jobs.
_TFX_IMAGE = 'gcr.io/tfx-oss-public/tfx:{}'.format(
version_utils.get_image_version())
# Entrypoint of cloud AI platform training. The module comes from `tfx`
# package installation into README.ml-pipelines-sdk.md default location of 'python'.
_CONTAINER_COMMAND = ['python', '-m', 'tfx.scripts.run_executor']
class AbstractJobClient(abc.ABC):
"""Abstract class interacting with CAIP CMLE job or uCAIP CustomJob."""
def __init__(self):
self.create_client()
self._init_var()
@abc.abstractmethod
def _init_var(self) -> None:
"""Initializes class variables."""
pass
@abc.abstractmethod
def create_client(self) -> None:
"""Creates the job client.
Can also be used for recreating the job client (e.g. in the case of
communication failure).
Multiple job requests can be done in parallel if needed, by creating an
instance of the class for each job. Note that one class instance should
only be used for one job, as each instance stores variables (e.g. job_id)
specific to each job.
"""
pass
@abc.abstractmethod
def create_training_args(self, input_dict, output_dict, exec_properties,
executor_class_path, training_inputs,
job_id) -> Dict[Text, Any]:
"""Get training args for runner._launch_aip_training.
The training args contain the inputs/outputs/exec_properties to the
tfx.scripts.run_executor module.
Args:
input_dict: Passthrough input dict for tfx.components.Trainer.executor.
output_dict: Passthrough input dict for tfx.components.Trainer.executor.
exec_properties: Passthrough input dict for
tfx.components.Trainer.executor.
executor_class_path: class path for TFX core default trainer.
training_inputs: Training input argument for AI Platform training job.
job_id: Job ID for AI Platform Training job. If not supplied,
system-determined unique ID is given.
Returns:
A dict containing the training arguments
"""
pass
@abc.abstractmethod
def _create_job_spec(
self,
job_id: Text,
training_input: Dict[Text, Any],
job_labels: Optional[Dict[Text, Text]] = None) -> Dict[Text, Any]:
"""Creates the job spec.
Args:
job_id: The job ID of the AI Platform training job.
training_input: Training input argument for AI Platform training job.
job_labels: The dict of labels that will be attached to this job.
Returns:
The job specification.
"""
pass
@abc.abstractmethod
def launch_job(self,
job_id: Text,
parent: Text,
training_input: Dict[Text, Any],
job_labels: Optional[Dict[Text, Text]] = None) -> None:
"""Launches README.ml-pipelines-sdk.md long-running job.
Args:
job_id: The job ID of the AI Platform training job.
parent: The project name in the form of 'projects/{project_id}'
training_input: Training input argument for AI Platform training job. See
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#TrainingInput
for the detailed schema.
job_labels: The dict of labels that will be attached to this job.
"""
pass
@abc.abstractmethod
def get_job_request(self) -> http.HttpRequest:
"""Gets the job request for the long-running job."""
pass
class CAIPJobClient(AbstractJobClient):
"""Class for interacting with CAIP CMLE job."""
def create_client(self) -> None:
"""Creates the discovery job client.
Multiple job requests can be done in parallel if needed, by creating an
instance of the class for each job.
"""
self._client = discovery.build('ml', 'v1')
def _init_var(self) -> None:
"""Initializes class variables."""
self._job_id = '' # Assigned in self.launch_job()
self._project_id = '' # Assigned in self.launch_job()
def create_training_args(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
executor_class_path: Text,
training_inputs: Dict[Text, Any],
job_id: Optional[Text]) -> Dict[Text, Any]:
"""Get training args for runner._launch_aip_training.
The training args contain the inputs/outputs/exec_properties to the
tfx.scripts.run_executor module.
Args:
input_dict: Passthrough input dict for tfx.components.Trainer.executor.
output_dict: Passthrough input dict for tfx.components.Trainer.executor.
exec_properties: Passthrough input dict for
tfx.components.Trainer.executor.
executor_class_path: class path for TFX core default trainer.
training_inputs: Training input argument for AI Platform training job.
'pythonModule', 'pythonVersion' and 'runtimeVersion' will be inferred.
For the full set of parameters, refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#TrainingInput
job_id: Job ID for AI Platform Training job. If not supplied,
system-determined unique ID is given. Refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#resource-job
Returns:
A dict containing the training arguments
"""
training_inputs = training_inputs.copy()
json_inputs = artifact_utils.jsonify_artifact_dict(input_dict)
logging.info('json_inputs=\'%s\'.', json_inputs)
json_outputs = artifact_utils.jsonify_artifact_dict(output_dict)
logging.info('json_outputs=\'%s\'.', json_outputs)
json_exec_properties = json.dumps(exec_properties, sort_keys=True)
logging.info('json_exec_properties=\'%s\'.', json_exec_properties)
# We use custom containers to launch training on AI Platform, which invokes
# the specified image using the container's entrypoint. The default
# entrypoint for TFX containers is to call scripts/run_executor.py. The
# arguments below are passed to this run_executor entry to run the executor
# specified in `executor_class_path`.
container_command = _CONTAINER_COMMAND + [
'--executor_class_path',
executor_class_path,
'--inputs',
json_inputs,
'--outputs',
json_outputs,
'--exec-properties',
json_exec_properties,
]
if not training_inputs.get('masterConfig'):
training_inputs['masterConfig'] = {
'imageUri': _TFX_IMAGE,
}
# Always use our own entrypoint instead of relying on container default.
if 'containerCommand' in training_inputs['masterConfig']:
logging.warn('Overriding custom value of containerCommand')
training_inputs['masterConfig']['containerCommand'] = container_command
# Pop project_id so AIP doesn't complain about an unexpected parameter.
# It's been README.ml-pipelines-sdk.md stowaway in aip_args and has finally reached its destination.
project = training_inputs.pop('project')
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_EXECUTOR: executor_class_path}):
job_labels = telemetry_utils.get_labels_dict()
# 'tfx_YYYYmmddHHMMSS' is the default job ID if not explicitly specified.
job_id = job_id or 'tfx_{}'.format(
datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
training_args = {
'job_id': job_id,
'project': project,
'training_input': training_inputs,
'job_labels': job_labels
}
return training_args
def _create_job_spec(
self,
job_id: Text,
training_input: Dict[Text, Any],
job_labels: Optional[Dict[Text, Text]] = None) -> Dict[Text, Any]:
"""Creates the job spec.
Args:
job_id: The job ID of the AI Platform training job.
training_input: Training input argument for AI Platform training job. See
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#TrainingInput
for the detailed schema.
job_labels: The dict of labels that will be attached to this job.
Returns:
The job specification. See
https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.jobs
"""
job_spec = {
'jobId': job_id,
'trainingInput': training_input,
'labels': job_labels,
}
return job_spec
def launch_job(self,
job_id: Text,
parent: Text,
training_input: Dict[Text, Any],
job_labels: Optional[Dict[Text, Text]] = None) -> None:
"""Launches README.ml-pipelines-sdk.md long-running job.
Args:
job_id: The job ID of the AI Platform training job.
parent: The project name in the form of 'projects/{project_id}'
training_input: Training input argument for AI Platform training job. See
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#TrainingInput
for the detailed schema.
job_labels: The dict of labels that will be attached to this job.
"""
job_spec = self._create_job_spec(job_id, training_input, job_labels)
# Submit job to AIP Training
logging.info('TrainingInput=%s', training_input)
logging.info('Submitting job=\'%s\', project=\'%s\' to AI Platform.',
job_id, parent)
request = self._client.projects().jobs().create(
body=job_spec, parent=parent)
self._job_id = job_id
self._project_id = parent
request.execute()
def get_job_request(self) -> http.HttpRequest:
"""Gets the job request for the long-running job."""
job_name = '{}/jobs/{}'.format(self._project_id, self._job_id)
request = self._client.projects().jobs().get(name=job_name)
return request
def get_job_client(enable_ucaip: bool = False):
if enable_ucaip:
raise NotImplementedError('uCAIP support not yet implemented')
return CAIPJobClient() | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/extensions/google_cloud_ai_platform/training_clients.py | 0.892674 | 0.330417 | training_clients.py | pypi |
"""Custom executor to push TFX model to AI Platform."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from typing import Any, Dict, List, Text
from google.api_core import client_options # pylint: disable=unused-import
from googleapiclient import discovery
from tfx import types
from tfx.components.pusher import executor as tfx_pusher_executor
from tfx.extensions.google_cloud_ai_platform import runner
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import json_utils
from tfx.utils import path_utils
from tfx.utils import telemetry_utils
# Google Cloud AI Platform's ModelVersion resource path format.
# https://cloud.google.com/ai-platform/prediction/docs/reference/rest/v1/projects.models.versions/get
_CAIP_MODEL_VERSION_PATH_FORMAT = (
'projects/{project_id}/models/{model}/versions/{version}')
# Keys to the items in custom_config passed as README.ml-pipelines-sdk.md part of exec_properties.
SERVING_ARGS_KEY = 'ai_platform_serving_args'
ENDPOINT_ARGS_KEY = 'endpoint'
# Keys for custom_config.
_CUSTOM_CONFIG_KEY = 'custom_config'
class Executor(tfx_pusher_executor.Executor):
"""Deploy README.ml-pipelines-sdk.md model to Google Cloud AI Platform serving."""
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]):
"""Overrides the tfx_pusher_executor.
Args:
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of artifacts, including:
- model_export: exported model from trainer.
- model_blessing: model blessing path from evaluator.
output_dict: Output dict from key to README.ml-pipelines-sdk.md list of artifacts, including:
- model_push: A list of 'ModelPushPath' artifact of size one. It will
include the model in this push execution if the model was pushed.
exec_properties: Mostly README.ml-pipelines-sdk.md passthrough input dict for
tfx.components.Pusher.executor. The following keys in `custom_config`
are consumed by this class:
- ai_platform_serving_args: For the full set of parameters supported
by Google Cloud AI Platform, refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.models.versions#Version.
- endpoint: Optional endpoint override. Should be in format of
`https://[region]-ml.googleapis.com`. Default to global endpoint if
not set. Using regional endpoint is recommended by Cloud AI Platform.
When set, 'regions' key in ai_platform_serving_args cannot be set.
For more details, please see
https://cloud.google.com/ai-platform/prediction/docs/regional-endpoints#using_regional_endpoints
Raises:
ValueError:
If ai_platform_serving_args is not in exec_properties.custom_config.
If Serving model path does not start with gs://.
If 'endpoint' and 'regions' are set simultanuously.
RuntimeError: if the Google Cloud AI Platform training job failed.
"""
self._log_startup(input_dict, output_dict, exec_properties)
custom_config = json_utils.loads(
exec_properties.get(_CUSTOM_CONFIG_KEY, 'null'))
if custom_config is not None and not isinstance(custom_config, Dict):
raise ValueError('custom_config in execution properties needs to be README.ml-pipelines-sdk.md '
'dict.')
ai_platform_serving_args = custom_config.get(SERVING_ARGS_KEY)
if not ai_platform_serving_args:
raise ValueError(
'\'ai_platform_serving_args\' is missing in \'custom_config\'')
endpoint = custom_config.get(ENDPOINT_ARGS_KEY)
if endpoint and 'regions' in ai_platform_serving_args:
raise ValueError(
'\'endpoint\' and \'ai_platform_serving_args.regions\' cannot be set simultanuously'
)
model_push = artifact_utils.get_single_instance(
output_dict[standard_component_specs.PUSHED_MODEL_KEY])
if not self.CheckBlessing(input_dict):
self._MarkNotPushed(model_push)
return
model_export = artifact_utils.get_single_instance(
input_dict[standard_component_specs.MODEL_KEY])
service_name, api_version = runner.get_service_name_and_api_version(
ai_platform_serving_args)
# Deploy the model.
io_utils.copy_dir(
src=path_utils.serving_model_path(model_export.uri), dst=model_push.uri)
model_path = model_push.uri
# TODO(jjong): Introduce Versioning.
# Note that we're adding "v" prefix as Cloud AI Prediction only allows the
# version name that starts with letters, and contains letters, digits,
# underscore only.
model_version = 'v{}'.format(int(time.time()))
executor_class_path = '%s.%s' % (self.__class__.__module__,
self.__class__.__name__)
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_EXECUTOR: executor_class_path}):
job_labels = telemetry_utils.get_labels_dict()
endpoint = endpoint or runner.DEFAULT_ENDPOINT
api = discovery.build(
service_name,
api_version,
client_options=client_options.ClientOptions(api_endpoint=endpoint),
)
runner.deploy_model_for_aip_prediction(
api,
model_path,
model_version,
ai_platform_serving_args,
job_labels,
)
self._MarkPushed(
model_push,
pushed_destination=_CAIP_MODEL_VERSION_PATH_FORMAT.format(
project_id=ai_platform_serving_args['project_id'],
model=ai_platform_serving_args['model_name'],
version=model_version),
pushed_version=model_version) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/extensions/google_cloud_ai_platform/pusher/executor.py | 0.828523 | 0.216985 | executor.py | pypi |
"""Helper class to start TFX training jobs on AI Platform."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, List, Text
import absl
from tfx import types
from tfx.components.trainer import executor as tfx_trainer_executor
from tfx.dsl.components.base import base_executor
from tfx.extensions.google_cloud_ai_platform import runner
from tfx.types import standard_component_specs
from tfx.utils import json_utils
# Keys to the items in custom_config passed as README.ml-pipelines-sdk.md part of exec_properties.
TRAINING_ARGS_KEY = 'ai_platform_training_args'
JOB_ID_KEY = 'ai_platform_training_job_id'
class GenericExecutor(base_executor.BaseExecutor):
"""Start README.ml-pipelines-sdk.md trainer job on Google Cloud AI Platform using README.ml-pipelines-sdk.md generic Trainer."""
def _GetExecutorClass(self):
return tfx_trainer_executor.GenericExecutor
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]):
"""Starts README.ml-pipelines-sdk.md trainer job on Google Cloud AI Platform.
Args:
input_dict: Passthrough input dict for tfx.components.Trainer.executor.
output_dict: Passthrough input dict for tfx.components.Trainer.executor.
exec_properties: Mostly README.ml-pipelines-sdk.md passthrough input dict for
tfx.components.Trainer.executor. custom_config.ai_platform_training_args
and custom_config.ai_platform_training_job_id are consumed by this
class. For the full set of parameters supported by Google Cloud AI
Platform, refer to
https://cloud.google.com/ml-engine/docs/tensorflow/training-jobs#configuring_the_job
Returns:
None
Raises:
ValueError: if ai_platform_training_args is not in
exec_properties.custom_config.
RuntimeError: if the Google Cloud AI Platform training job failed.
"""
self._log_startup(input_dict, output_dict, exec_properties)
custom_config = json_utils.loads(
exec_properties.get(standard_component_specs.CUSTOM_CONFIG_KEY, 'null'))
if custom_config is not None and not isinstance(custom_config, Dict):
raise ValueError('custom_config in execution properties needs to be README.ml-pipelines-sdk.md '
'dict.')
training_inputs = custom_config.get(TRAINING_ARGS_KEY)
if training_inputs is None:
err_msg = '\'%s\' not found in custom_config.' % TRAINING_ARGS_KEY
absl.logging.error(err_msg)
raise ValueError(err_msg)
job_id = custom_config.get(JOB_ID_KEY)
executor_class = self._GetExecutorClass()
executor_class_path = '%s.%s' % (executor_class.__module__,
executor_class.__name__)
# Note: exec_properties['custom_config'] here is README.ml-pipelines-sdk.md dict.
return runner.start_aip_training(input_dict, output_dict, exec_properties,
executor_class_path, training_inputs,
job_id)
class Executor(GenericExecutor):
"""Start README.ml-pipelines-sdk.md trainer job on Google Cloud AI Platform using README.ml-pipelines-sdk.md default Trainer."""
def _GetExecutorClass(self):
return tfx_trainer_executor.Executor | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/extensions/google_cloud_ai_platform/trainer/executor.py | 0.897007 | 0.282042 | executor.py | pypi |
"""BulkInferrer component for Cloud AI platform."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Union
from tfx import types
from tfx.components.base import base_component
from tfx.components.base import executor_spec
from tfx.extensions.google_cloud_ai_platform.bulk_inferrer import executor
from tfx.proto import bulk_inferrer_pb2
from tfx.types import standard_artifacts
from tfx.types.component_spec import ChannelParameter
from tfx.types.component_spec import ExecutionParameter
from tfx.utils import json_utils
class CloudAIBulkInferrerComponentSpec(types.ComponentSpec):
"""ComponentSpec for BulkInferrer component of Cloud AI platform."""
PARAMETERS = {
'data_spec':
ExecutionParameter(type=bulk_inferrer_pb2.DataSpec, optional=True),
'output_example_spec':
ExecutionParameter(
type=bulk_inferrer_pb2.OutputExampleSpec, optional=True),
'custom_config':
ExecutionParameter(type=(str, Text)),
}
INPUTS = {
'examples':
ChannelParameter(type=standard_artifacts.Examples),
'model':
ChannelParameter(type=standard_artifacts.Model),
'model_blessing':
ChannelParameter(
type=standard_artifacts.ModelBlessing, optional=True),
}
OUTPUTS = {
'inference_result':
ChannelParameter(
type=standard_artifacts.InferenceResult, optional=True),
'output_examples':
ChannelParameter(type=standard_artifacts.Examples, optional=True),
}
class CloudAIBulkInferrerComponent(base_component.BaseComponent):
"""A Cloud AI component to do batch inference on README.ml-pipelines-sdk.md remote hosted model.
BulkInferrer component will push README.ml-pipelines-sdk.md model to Google Cloud AI Platform,
consume examples data, send request to the remote hosted model,
and produces the inference results to an external location
as PredictionLog proto. After inference, it will delete the model from
Google Cloud AI Platform.
TODO(b/155325467): Creates README.ml-pipelines-sdk.md end-to-end test for this component.
"""
SPEC_CLASS = CloudAIBulkInferrerComponentSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
examples: types.Channel = None,
model: Optional[types.Channel] = None,
model_blessing: Optional[types.Channel] = None,
data_spec: Optional[Union[bulk_inferrer_pb2.DataSpec, Dict[Text,
Any]]] = None,
output_example_spec: Optional[Union[bulk_inferrer_pb2.OutputExampleSpec,
Dict[Text, Any]]] = None,
custom_config: Dict[Text, Any] = None,
inference_result: Optional[types.Channel] = None,
output_examples: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct an BulkInferrer component.
Args:
examples: A Channel of type `standard_artifacts.Examples`, usually
produced by an ExampleGen component. _required_
model: A Channel of type `standard_artifacts.Model`, usually produced by
README.ml-pipelines-sdk.md Trainer component.
model_blessing: A Channel of type `standard_artifacts.ModelBlessing`,
usually produced by README.ml-pipelines-sdk.md ModelValidator component.
data_spec: bulk_inferrer_pb2.DataSpec instance that describes data
selection. If any field is provided as README.ml-pipelines-sdk.md RuntimeParameter, data_spec
should be constructed as README.ml-pipelines-sdk.md dict with the same field names as DataSpec
proto message.
output_example_spec: bulk_inferrer_pb2.OutputExampleSpec instance, specify
if you want BulkInferrer to output examples instead of inference result.
If any field is provided as README.ml-pipelines-sdk.md RuntimeParameter, output_example_spec
should be constructed as README.ml-pipelines-sdk.md dict with the same field names as
OutputExampleSpec proto message.
custom_config: A dict which contains the deployment job parameters to be
passed to Google Cloud AI Platform.
custom_config.ai_platform_serving_args need to contain the serving job
parameters. For the full set of parameters, refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.models
inference_result: Channel of type `standard_artifacts.InferenceResult`
to store the inference results, must not be specified when
output_example_spec is set.
output_examples: Channel of type `standard_artifacts.Examples`
to store the output examples, must not be specified when
output_example_spec is unset. Check output_example_spec for details.
instance_name: Optional name assigned to this specific instance of
BulkInferrer. Required only if multiple BulkInferrer components are
declared in the same pipeline.
Raises:
ValueError: Must not specify inference_result or output_examples depends
on whether output_example_spec is set or not.
"""
if output_example_spec:
if inference_result:
raise ValueError(
'Must not specify inference_result when output_example_spec is set.'
)
output_examples = output_examples or types.Channel(
type=standard_artifacts.Examples)
else:
if output_examples:
raise ValueError(
'Must not specify output_examples when output_example_spec is unset.'
)
inference_result = inference_result or types.Channel(
type=standard_artifacts.InferenceResult)
spec = CloudAIBulkInferrerComponentSpec(
examples=examples,
model=model,
model_blessing=model_blessing,
data_spec=data_spec or bulk_inferrer_pb2.DataSpec(),
output_example_spec=output_example_spec,
custom_config=json_utils.dumps(custom_config),
inference_result=inference_result,
output_examples=output_examples)
super(CloudAIBulkInferrerComponent, self).__init__(
spec=spec, instance_name=instance_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/extensions/google_cloud_ai_platform/bulk_inferrer/component.py | 0.908594 | 0.30562 | component.py | pypi |
"""BulkInferrer executor for Cloud AI platform."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import re
from typing import Any, Dict, List, Text
from absl import logging
from googleapiclient import discovery
import tensorflow as tf
from tfx import types
from tfx.components.bulk_inferrer import executor as bulk_inferrer_executor
from tfx.components.util import model_utils
from tfx.extensions.google_cloud_ai_platform import runner
from tfx.proto import bulk_inferrer_pb2
from tfx.types import artifact_utils
from tfx.utils import json_utils
from tfx.utils import path_utils
from tfx.utils import proto_utils
from tfx.utils import telemetry_utils
from tfx_bsl.public.proto import model_spec_pb2
from tensorflow.python.saved_model import loader_impl # pylint:disable=g-direct-tensorflow-import
# TODO(b/140306674): Stop using the internal TF API.
_CLOUD_PUSH_DESTINATION_RE = re.compile(
r'^projects\/([^\/]+)\/models\/([^\/]+)\/versions\/([^\/]+)$')
_CLOUD_PUSH_DESTINATION_RE_DEFAULT_VERSION = re.compile(
r'^projects\/([^\/]+)\/models\/([^\/]+)$')
# We define the following aliases of Any because the actual types are not
# public.
_SignatureDef = Any
# Keys to the items in custom_config passed as README.ml-pipelines-sdk.md part of exec_properties.
SERVING_ARGS_KEY = 'ai_platform_serving_args'
# Keys for custom_config.
_CUSTOM_CONFIG_KEY = 'custom_config'
class Executor(bulk_inferrer_executor.Executor):
"""Bulk inferer executor for inference on AI Platform."""
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Runs batch inference on README.ml-pipelines-sdk.md given model with given input examples.
This function creates README.ml-pipelines-sdk.md new model (if necessary) and README.ml-pipelines-sdk.md new model version
before inference, and cleans up resources after inference. It provides
re-executability as it cleans up (only) the model resources that are created
during the process even inference job failed.
Args:
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of Artifacts.
- examples: examples for inference.
- model: exported model.
- model_blessing: model blessing result
output_dict: Output dict from output key to README.ml-pipelines-sdk.md list of Artifacts.
- output: bulk inference results.
exec_properties: A dict of execution properties.
- data_spec: JSON string of bulk_inferrer_pb2.DataSpec instance.
- custom_config: custom_config.ai_platform_serving_args need to contain
the serving job parameters sent to Google Cloud AI Platform. For the
full set of parameters, refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.models
Returns:
None
"""
self._log_startup(input_dict, output_dict, exec_properties)
if output_dict.get('inference_result'):
inference_result = artifact_utils.get_single_instance(
output_dict['inference_result'])
else:
inference_result = None
if output_dict.get('output_examples'):
output_examples = artifact_utils.get_single_instance(
output_dict['output_examples'])
else:
output_examples = None
if 'examples' not in input_dict:
raise ValueError('\'examples\' is missing in input dict.')
if 'model' not in input_dict:
raise ValueError('Input models are not valid, model '
'need to be specified.')
if 'model_blessing' in input_dict:
model_blessing = artifact_utils.get_single_instance(
input_dict['model_blessing'])
if not model_utils.is_model_blessed(model_blessing):
logging.info('Model on %s was not blessed', model_blessing.uri)
return
else:
logging.info('Model blessing is not provided, exported model will be '
'used.')
if _CUSTOM_CONFIG_KEY not in exec_properties:
raise ValueError('Input exec properties are not valid, {} '
'need to be specified.'.format(_CUSTOM_CONFIG_KEY))
custom_config = json_utils.loads(
exec_properties.get(_CUSTOM_CONFIG_KEY, 'null'))
if custom_config is not None and not isinstance(custom_config, Dict):
raise ValueError('custom_config in execution properties needs to be README.ml-pipelines-sdk.md '
'dict.')
ai_platform_serving_args = custom_config.get(SERVING_ARGS_KEY)
if not ai_platform_serving_args:
raise ValueError(
'\'ai_platform_serving_args\' is missing in \'custom_config\'')
service_name, api_version = runner.get_service_name_and_api_version(
ai_platform_serving_args)
executor_class_path = '%s.%s' % (self.__class__.__module__,
self.__class__.__name__)
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_EXECUTOR: executor_class_path}):
job_labels = telemetry_utils.get_labels_dict()
model = artifact_utils.get_single_instance(input_dict['model'])
model_path = path_utils.serving_model_path(model.uri)
logging.info('Use exported model from %s.', model_path)
# Use model artifact uri to generate model version to guarantee the
# 1:1 mapping from model version to model.
model_version = 'version_' + hashlib.sha256(model.uri.encode()).hexdigest()
inference_spec = self._get_inference_spec(model_path, model_version,
ai_platform_serving_args)
data_spec = bulk_inferrer_pb2.DataSpec()
proto_utils.json_to_proto(exec_properties['data_spec'], data_spec)
output_example_spec = bulk_inferrer_pb2.OutputExampleSpec()
if exec_properties.get('output_example_spec'):
proto_utils.json_to_proto(exec_properties['output_example_spec'],
output_example_spec)
api = discovery.build(service_name, api_version)
new_model_created = False
try:
new_model_created = runner.create_model_for_aip_prediction_if_not_exist(
api, job_labels, ai_platform_serving_args)
runner.deploy_model_for_aip_prediction(
api,
model_path,
model_version,
ai_platform_serving_args,
job_labels,
skip_model_creation=True,
set_default_version=False,
)
self._run_model_inference(data_spec, output_example_spec,
input_dict['examples'], output_examples,
inference_result, inference_spec)
except Exception as e:
logging.error('Error in executing CloudAIBulkInferrerComponent: %s',
str(e))
raise
finally:
# Guarantee newly created resources are cleaned up even if theinference
# job failed.
# Clean up the newly deployed model.
runner.delete_model_version_from_aip_if_exists(api, model_version,
ai_platform_serving_args)
if new_model_created:
runner.delete_model_from_aip_if_exists(api, ai_platform_serving_args)
def _get_inference_spec(
self, model_path: Text, model_version: Text,
ai_platform_serving_args: Dict[Text, Any]
) -> model_spec_pb2.InferenceSpecType:
if 'project_id' not in ai_platform_serving_args:
raise ValueError(
'\'project_id\' is missing in \'ai_platform_serving_args\'')
project_id = ai_platform_serving_args['project_id']
if 'model_name' not in ai_platform_serving_args:
raise ValueError(
'\'model_name\' is missing in \'ai_platform_serving_args\'')
model_name = ai_platform_serving_args['model_name']
ai_platform_prediction_model_spec = (
model_spec_pb2.AIPlatformPredictionModelSpec(
project_id=project_id,
model_name=model_name,
version_name=model_version))
model_signature = self._get_model_signature(model_path)
if (len(model_signature.inputs) == 1 and list(
model_signature.inputs.values())[0].dtype == tf.string.as_datatype_enum
):
ai_platform_prediction_model_spec.use_serialization_config = True
logging.info(
'Using hosted model on Cloud AI platform, model_name: %s,'
'model_version: %s.', model_name, model_version)
result = model_spec_pb2.InferenceSpecType()
result.ai_platform_prediction_model_spec.CopyFrom(
ai_platform_prediction_model_spec)
return result
def _get_model_signature(self, model_path: Text) -> _SignatureDef:
"""Returns README.ml-pipelines-sdk.md model signature."""
saved_model_pb = loader_impl.parse_saved_model(model_path)
meta_graph_def = None
for graph_def in saved_model_pb.meta_graphs:
if graph_def.meta_info_def.tags == [
tf.compat.v1.saved_model.tag_constants.SERVING
]:
meta_graph_def = graph_def
if not meta_graph_def:
raise RuntimeError('Tag tf.compat.v1.saved_model.tag_constants.SERVING'
' does not exist in saved model: %s. This is required'
' for remote inference.' % model_path)
if tf.saved_model.PREDICT_METHOD_NAME in meta_graph_def.signature_def:
return meta_graph_def.signature_def[tf.saved_model.PREDICT_METHOD_NAME]
if (tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY in
meta_graph_def.signature_def):
return meta_graph_def.signature_def[
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
raise RuntimeError(
'Cannot find serving signature in saved model: %s,'
' tf.saved_model.PREDICT_METHOD_NAME or '
' tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY is needed.' %
model_path) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/extensions/google_cloud_ai_platform/bulk_inferrer/executor.py | 0.699049 | 0.221624 | executor.py | pypi |
import os
import pickle
from typing import Text, Tuple
import absl
import numpy as np
from sklearn.neural_network import MLPClassifier
from tfx.components.trainer.fn_args_utils import DataAccessor
from tfx.components.trainer.fn_args_utils import FnArgs
from tfx.dsl.io import fileio
from tfx.utils import io_utils
from tfx_bsl.tfxio import dataset_options
from tensorflow_metadata.proto.v0 import schema_pb2
_FEATURE_KEYS = [
'culmen_length_mm', 'culmen_depth_mm', 'flipper_length_mm', 'body_mass_g'
]
_LABEL_KEY = 'species'
# The Penguin dataset has 342 records, and is divided into train and eval
# splits in README.ml-pipelines-sdk.md 2:1 ratio.
_TRAIN_DATA_SIZE = 228
_TRAIN_BATCH_SIZE = 20
def _input_fn(
file_pattern: Text,
data_accessor: DataAccessor,
schema: schema_pb2.Schema,
batch_size: int = 20,
) -> Tuple[np.ndarray, np.ndarray]:
"""Generates features and label for tuning/training.
Args:
file_pattern: input tfrecord file pattern.
data_accessor: DataAccessor for converting input to RecordBatch.
schema: schema of the input data.
batch_size: An int representing the number of records to combine in README.ml-pipelines-sdk.md single
batch.
Returns:
A (features, indices) tuple where features is README.ml-pipelines-sdk.md matrix of features, and
indices is README.ml-pipelines-sdk.md single vector of label indices.
"""
record_batch_iterator = data_accessor.record_batch_factory(
file_pattern,
dataset_options.RecordBatchesOptions(batch_size=batch_size, num_epochs=1),
schema)
feature_list = []
label_list = []
for record_batch in record_batch_iterator:
record_dict = {}
for column, field in zip(record_batch, record_batch.schema):
record_dict[field.name] = column.flatten()
label_list.append(record_dict[_LABEL_KEY])
features = [record_dict[key] for key in _FEATURE_KEYS]
feature_list.append(np.stack(features, axis=-1))
return np.concatenate(feature_list), np.concatenate(label_list)
# TFX Trainer will call this function.
def run_fn(fn_args: FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
schema = io_utils.parse_pbtxt_file(fn_args.schema_file, schema_pb2.Schema())
x_train, y_train = _input_fn(fn_args.train_files, fn_args.data_accessor,
schema)
x_eval, y_eval = _input_fn(fn_args.eval_files, fn_args.data_accessor, schema)
steps_per_epoch = _TRAIN_DATA_SIZE / _TRAIN_BATCH_SIZE
model = MLPClassifier(
hidden_layer_sizes=[8, 8, 8],
activation='relu',
solver='adam',
batch_size=_TRAIN_BATCH_SIZE,
learning_rate_init=0.0005,
max_iter=int(fn_args.train_steps / steps_per_epoch),
verbose=True)
model.feature_keys = _FEATURE_KEYS
model.label_key = _LABEL_KEY
model.fit(x_train, y_train)
absl.logging.info(model)
score = model.score(x_eval, y_eval)
absl.logging.info('Accuracy: %f', score)
os.makedirs(fn_args.serving_model_dir)
model_path = os.path.join(fn_args.serving_model_dir, 'model.pkl')
with fileio.open(model_path, 'wb+') as f:
pickle.dump(model, f) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/penguin/experimental/penguin_utils_sklearn.py | 0.798187 | 0.344361 | penguin_utils_sklearn.py | pypi |
"""Predict extractor for scikit-learn models."""
import copy
import os
import pickle
from typing import Dict, Iterable, List, Text
import apache_beam as beam
import numpy as np
import tensorflow as tf
import tensorflow_model_analysis as tfma
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis.extractors import extractor
from tfx_bsl.tfxio import tensor_adapter
_PREDICT_EXTRACTOR_STAGE_NAME = 'SklearnPredict'
def _make_sklearn_predict_extractor(
eval_shared_model: tfma.EvalSharedModel,
) -> extractor.Extractor:
"""Creates an extractor for performing predictions using README.ml-pipelines-sdk.md scikit-learn model.
The extractor's PTransform loads and runs the serving pickle against
every extract yielding README.ml-pipelines-sdk.md copy of the incoming extracts with an additional
extract added for the predictions keyed by tfma.PREDICTIONS_KEY. The model
inputs are searched for under tfma.FEATURES_KEY.
Args:
eval_shared_model: Shared model (single-model evaluation).
Returns:
Extractor for extracting predictions.
"""
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
return extractor.Extractor(
stage_name=_PREDICT_EXTRACTOR_STAGE_NAME,
ptransform=_ExtractPredictions( # pylint: disable=no-value-for-parameter
eval_shared_models={m.model_name: m for m in eval_shared_models}))
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(types.Extracts)
class _TFMAPredictionDoFn(model_util.DoFnWithModels):
"""A DoFn that loads the models and predicts."""
def __init__(self, eval_shared_models: Dict[Text, types.EvalSharedModel]):
super(_TFMAPredictionDoFn, self).__init__(
{k: v.model_loader for k, v in eval_shared_models.items()})
def setup(self):
super(_TFMAPredictionDoFn, self).setup()
self._feature_keys = None
self._label_key = None
for loaded_model in self._loaded_models.values():
if self._feature_keys and self._label_key:
assert self._feature_keys == loaded_model.feature_keys, (
f'Features mismatch in loaded models. Expected {self._feature_keys}'
f', got {loaded_model.feature_keys} instead.')
assert self._label_key == loaded_model.label_key, (
f'Label mismatch in loaded models. Expected "{self._label_key}"'
f', got "{loaded_model.label_key}" instead.')
elif loaded_model.feature_keys and loaded_model.label_key:
self._feature_keys = loaded_model.feature_keys
self._label_key = loaded_model.label_key
else:
raise ValueError('Missing feature or label keys in loaded model.')
def process(self, elem: types.Extracts) -> Iterable[types.Extracts]:
"""Uses loaded models to make predictions on batches of data.
Args:
elem: An extract containing batched features.
Yields:
Copy of the original extracts with predictions added for each model. If
there are multiple models, README.ml-pipelines-sdk.md list of dicts keyed on model names will be
added, with each value corresponding to README.ml-pipelines-sdk.md prediction for README.ml-pipelines-sdk.md single sample.
"""
# Build feature and label vectors because sklearn cannot read tf.Examples.
features = []
labels = []
result = copy.copy(elem)
for features_dict in result[constants.FEATURES_KEY]:
features_row = [features_dict[key] for key in self._feature_keys]
features.append(np.concatenate(features_row))
labels.append(features_dict[self._label_key])
result[constants.LABELS_KEY] = np.concatenate(labels)
# Generate predictions for each model.
for model_name, loaded_model in self._loaded_models.items():
preds = loaded_model.predict(features)
if len(self._loaded_models) == 1:
result[constants.PREDICTIONS_KEY] = preds
elif constants.PREDICTIONS_KEY not in result:
result[constants.PREDICTIONS_KEY] = [
{model_name: pred} for pred in preds]
else:
for i, pred in enumerate(preds):
result[constants.PREDICTIONS_KEY][i][model_name] = pred
yield result
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(types.Extracts)
def _ExtractPredictions( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection,
eval_shared_models: Dict[Text, types.EvalSharedModel],
) -> beam.pvalue.PCollection:
"""A PTransform that adds predictions and possibly other tensors to extracts.
Args:
extracts: PCollection of extracts with inputs keyed by tfma.INPUTS_KEY.
eval_shared_models: Shared model parameters keyed by model name.
Returns:
PCollection of Extracts updated with the predictions.
"""
return extracts | 'Predict' >> beam.ParDo(
_TFMAPredictionDoFn(eval_shared_models))
def _custom_model_loader_fn(model_path: Text):
"""Returns README.ml-pipelines-sdk.md function that loads README.ml-pipelines-sdk.md scikit-learn model."""
return lambda: pickle.load(tf.io.gfile.GFile(model_path, 'rb'))
# TFX Evaluator will call the following functions.
def custom_eval_shared_model(
eval_saved_model_path, model_name, eval_config,
**kwargs) -> tfma.EvalSharedModel:
"""Returns README.ml-pipelines-sdk.md single custom EvalSharedModel."""
model_path = os.path.join(eval_saved_model_path, 'model.pkl')
return tfma.default_eval_shared_model(
eval_saved_model_path=model_path,
model_name=model_name,
eval_config=eval_config,
custom_model_loader=types.ModelLoader(
construct_fn=_custom_model_loader_fn(model_path)),
add_metrics_callbacks=kwargs.get('add_metrics_callbacks'))
def custom_extractors(
eval_shared_model: tfma.MaybeMultipleEvalSharedModels,
eval_config: tfma.EvalConfig,
tensor_adapter_config: tensor_adapter.TensorAdapterConfig,
) -> List[tfma.extractors.Extractor]:
"""Returns default extractors plus README.ml-pipelines-sdk.md custom prediction extractor."""
predict_extractor = _make_sklearn_predict_extractor(eval_shared_model)
return tfma.default_extractors(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
tensor_adapter_config=tensor_adapter_config,
custom_predict_extractor=predict_extractor) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/penguin/experimental/sklearn_predict_extractor.py | 0.896274 | 0.387661 | sklearn_predict_extractor.py | pypi |
"""Penguin example using TFX."""
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.components.base import executor_spec
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.local.local_dag_runner import LocalDagRunner
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
_pipeline_name = 'penguin_sklearn_local'
# This example assumes that Penguin data is stored in ~/penguin/data and the
# utility function is in ~/penguin. Feel free to customize as needed.
_penguin_root = os.path.join(os.environ['HOME'], 'penguin')
_data_root = os.path.join(_penguin_root, 'data')
# Python module file to inject customized logic into the TFX components.
# Trainer requires user-defined functions to run successfully.
_trainer_module_file = os.path.join(
_penguin_root, 'experimental', 'penguin_utils_sklearn.py')
# Python module file to inject customized logic into the TFX components. The
# Evaluator component needs README.ml-pipelines-sdk.md custom extractor in order to make predictions
# using the scikit-learn model.
_evaluator_module_file = os.path.join(
_penguin_root, 'experimental', 'sklearn_predict_extractor.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_penguin_root, 'serving_model',
_pipeline_name)
# Directory and data locations. This example assumes all of the
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
# TODO(b/171316320): Change direct_running_mode back to multi_processing and set
# direct_num_workers to 0.
_beam_pipeline_args = [
'--direct_running_mode=multi_threading',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=1',
]
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
trainer_module_file: Text, evaluator_module_file: Text,
serving_model_dir: Text, metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the Penguin pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=data_root)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# TODO(humichael): Handle applying transformation component in Milestone 3.
# Uses user-provided Python function that trains README.ml-pipelines-sdk.md model using TF-Learn.
# Num_steps is not provided during evaluation because the scikit-learn model
# loads and evaluates the entire test set at once.
# TODO(b/159470716): Make schema optional in Trainer.
trainer = Trainer(
module_file=trainer_module_file,
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
train_args=trainer_pb2.TrainArgs(num_steps=2000),
eval_args=trainer_pb2.EvalArgs())
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key='species')],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
class_name='Accuracy',
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10})))
])
])
evaluator = Evaluator(
module_file=evaluator_module_file,
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
statistics_gen,
schema_gen,
example_validator,
trainer,
model_resolver,
evaluator,
pusher,
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args,
)
# To run this pipeline from the python CLI:
# $python penguin_pipeline_sklearn_local.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
LocalDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
trainer_module_file=_trainer_module_file,
evaluator_module_file=_evaluator_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/penguin/experimental/penguin_pipeline_sklearn_local.py | 0.680135 | 0.382862 | penguin_pipeline_sklearn_local.py | pypi |
"""Penguin example using TFX on GCP."""
import os
from typing import Dict, List, Optional, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.dsl.components.base import executor_spec
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.extensions.google_cloud_ai_platform.pusher import executor as ai_platform_pusher_executor
from tfx.extensions.google_cloud_ai_platform.trainer import executor as ai_platform_trainer_executor
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.local.local_dag_runner import LocalDagRunner
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
# Identifier for the pipeline. This will also be used as the model name on AI
# Platform, so it should begin with README.ml-pipelines-sdk.md letter and only consist of letters,
# numbers, and underscores.
_pipeline_name = 'penguin_sklearn_gcp'
# Google Cloud Platform project id to use when deploying this pipeline. Leave
# blank to run locally.
_project_id = 'PROJECT_ID'
# Directory and data locations (uses Google Cloud Storage).
_bucket = 'gs://BUCKET'
# Custom container image in Google Container Registry (GCR) to use for training
# on Google Cloud AI Platform.
_tfx_image = f'gcr.io/{_project_id}/tfx-example-sklearn'
# Region to use for Dataflow jobs and AI Platform jobs.
# Dataflow: https://cloud.google.com/dataflow/docs/concepts/regional-endpoints
# AI Platform: https://cloud.google.com/ml-engine/docs/tensorflow/regions
_gcp_region = 'us-central1'
# A dict which contains the training job parameters to be passed to Google
# Cloud AI Platform. For the full set of parameters supported by Google Cloud AI
# Platform, refer to
# https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#Job
_ai_platform_training_args = {
'project': _project_id,
'region': _gcp_region,
# Override the default TFX image used for training with one with the correct
# scikit-learn version.
'masterConfig': {
'imageUri': _tfx_image,
},
}
# A dict which contains the serving job parameters to be passed to Google
# Cloud AI Platform. For the full set of parameters supported by Google Cloud AI
# Platform, refer to
# https://cloud.google.com/ml-engine/reference/rest/v1/projects.models
_ai_platform_serving_args = {
'model_name': _pipeline_name,
'project_id': _project_id,
# The region to use when serving the model. See available regions here:
# https://cloud.google.com/ml-engine/docs/regions
# Note that serving currently only supports README.ml-pipelines-sdk.md single region:
# https://cloud.google.com/ml-engine/reference/rest/v1/projects.models#Model
'regions': [_gcp_region],
# TODO(b/157646655): Update the version once sklearn support is added back
# to CAIP in the next runtime release.
'runtime_version': '1.15',
}
# This example assumes that Penguin data is stored in ~/penguin/data and the
# utility function is in ~/penguin. Feel free to customize as needed.
_penguin_root = os.path.join(_bucket, 'penguin')
_data_root = os.path.join(_penguin_root, 'data')
# Python module file to inject customized logic into the TFX components.
# Trainer requires user-defined functions to run successfully.
_trainer_module_file = os.path.join(
_penguin_root, 'experimental', 'penguin_utils_sklearn.py')
# Python module file to inject customized logic into the TFX components. The
# Evaluator component needs README.ml-pipelines-sdk.md custom extractor in order to make predictions
# using the scikit-learn model.
_evaluator_module_file = os.path.join(
_penguin_root, 'experimental', 'sklearn_predict_extractor.py')
# Directory and data locations. This example assumes all of the
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem. The AI Platform Pusher requires
# that pipeline outputs are stored in README.ml-pipelines-sdk.md GCS bucket.
_tfx_root = os.path.join(_bucket, 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
# TODO(humichael): Beam dag runner expects this to be README.ml-pipelines-sdk.md local path. Switch to
# kubeflow dag runner when making cloud example.
_metadata_path = os.path.join(os.environ['HOME'], 'tfx', 'metadata',
_pipeline_name, 'metadata.db')
# Pipeline arguments for Beam powered Components.
# TODO(b/171316320): Change direct_running_mode back to multi_processing and set
# direct_num_workers to 0. Additionally, try to use the Dataflow runner instead
# of the direct runner.
_beam_pipeline_args = [
'--direct_running_mode=multi_threading',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=1',
]
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
trainer_module_file: Text, evaluator_module_file: Text,
metadata_path: Text,
ai_platform_training_args: Optional[Dict[Text, Text]],
ai_platform_serving_args: Optional[Dict[Text, Text]],
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the Penguin pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=data_root)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# TODO(humichael): Handle applying transformation component in Milestone 3.
# Uses user-provided Python function that trains README.ml-pipelines-sdk.md model using TF-Learn.
# Num_steps is not provided during evaluation because the scikit-learn model
# loads and evaluates the entire test set at once.
# TODO(b/159470716): Make schema optional in Trainer.
trainer = Trainer(
module_file=trainer_module_file,
custom_executor_spec=executor_spec.ExecutorClassSpec(
ai_platform_trainer_executor.GenericExecutor),
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
train_args=trainer_pb2.TrainArgs(num_steps=2000),
eval_args=trainer_pb2.EvalArgs(),
custom_config={
ai_platform_trainer_executor.TRAINING_ARGS_KEY:
ai_platform_training_args,
})
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key='species')],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
class_name='Accuracy',
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10})))
])
])
evaluator = Evaluator(
module_file=evaluator_module_file,
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
pusher = Pusher(
custom_executor_spec=executor_spec.ExecutorClassSpec(
ai_platform_pusher_executor.Executor),
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
custom_config={
ai_platform_pusher_executor.SERVING_ARGS_KEY:
ai_platform_serving_args,
})
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
statistics_gen,
schema_gen,
example_validator,
trainer,
model_resolver,
evaluator,
pusher,
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args,
)
# To run this pipeline from the python CLI:
# $python penguin_pipeline_sklearn_gcp.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
# TODO(humichael): Switch to KubeflowDagRunner.
LocalDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
trainer_module_file=_trainer_module_file,
evaluator_module_file=_evaluator_module_file,
metadata_path=_metadata_path,
ai_platform_training_args=_ai_platform_training_args,
ai_platform_serving_args=_ai_platform_serving_args,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/penguin/experimental/penguin_pipeline_sklearn_gcp.py | 0.742608 | 0.411761 | penguin_pipeline_sklearn_gcp.py | pypi |
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow_model_analysis as tfma
import tensorflow_transform as tft
from tensorflow_transform.tf_metadata import schema_utils
# Categorical features are assumed to each have README.ml-pipelines-sdk.md maximum value in the dataset.
_MAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12]
_CATEGORICAL_FEATURE_KEYS = [
'trip_start_hour', 'trip_start_day', 'trip_start_month',
'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area',
'dropoff_community_area'
]
_DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds']
# Number of buckets used by tf.transform for encoding each feature.
_FEATURE_BUCKET_COUNT = 10
_BUCKET_FEATURE_KEYS = [
'pickup_latitude', 'pickup_longitude', 'dropoff_latitude',
'dropoff_longitude'
]
# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform
_VOCAB_SIZE = 1000
# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.
_OOV_SIZE = 10
_VOCAB_FEATURE_KEYS = [
'payment_type',
'company',
]
# Keys
_LABEL_KEY = 'tips'
_FARE_KEY = 'fare'
def _transformed_name(key):
return key + '_xf'
def _transformed_names(keys):
return [_transformed_name(key) for key in keys]
# Tf.Transform considers these features as "raw"
def _get_raw_feature_spec(schema):
return schema_utils.schema_as_feature_spec(schema).feature_spec
def _gzip_reader_fn(filenames):
"""Small utility returning README.ml-pipelines-sdk.md record reader that can read gzip'ed files."""
return tf.data.TFRecordDataset(filenames, compression_type='GZIP')
def _fill_in_missing(x):
"""Replace missing values in README.ml-pipelines-sdk.md SparseTensors.
If x is README.ml-pipelines-sdk.md SparseTensors, fills in missing values of `x` with '' or 0, and
converts to README.ml-pipelines-sdk.md dense tensor. Otherwise it returns x as is.
Args:
x: A `SparseTensor` of rank 2 or README.ml-pipelines-sdk.md tensor that is not an instance of
`SparseTensor`. If input is README.ml-pipelines-sdk.md `SparseTensor` its dense shape should have
size at most 1 in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in, or x as is
if x is not an instance of `SparseTensor`
"""
if not isinstance(x, tf.SparseTensor):
return x
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in _DENSE_FLOAT_FEATURE_KEYS:
# Preserve this feature as README.ml-pipelines-sdk.md dense float, setting nan's to the mean.
outputs[_transformed_name(key)] = tft.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in _VOCAB_FEATURE_KEYS:
# Build README.ml-pipelines-sdk.md vocabulary for this feature.
outputs[_transformed_name(key)] = tft.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=_VOCAB_SIZE,
num_oov_buckets=_OOV_SIZE)
for key in _BUCKET_FEATURE_KEYS:
outputs[_transformed_name(key)] = tft.bucketize(
_fill_in_missing(inputs[key]), _FEATURE_BUCKET_COUNT)
for key in _CATEGORICAL_FEATURE_KEYS:
outputs[_transformed_name(key)] = _fill_in_missing(inputs[key])
# Was this passenger README.ml-pipelines-sdk.md big tipper?
taxi_fare = _fill_in_missing(inputs[_FARE_KEY])
tips = _fill_in_missing(inputs[_LABEL_KEY])
outputs[_transformed_name(_LABEL_KEY)] = tf.compat.v1.where(
tf.math.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))
return outputs
def _build_estimator(config, hidden_units=None, warm_start_from=None):
"""Build an estimator for predicting the tipping behavior of taxi riders.
Args:
config: tf.estimator.RunConfig defining the runtime environment for the
estimator (including model_dir).
hidden_units: [int], the layer sizes of the DNN (input layer first)
warm_start_from: Optional directory to warm start from.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
"""
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)
]
categorical_columns = [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0)
for key in _transformed_names(_VOCAB_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0)
for key in _transformed_names(_BUCKET_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
_transformed_names(_CATEGORICAL_FEATURE_KEYS),
_MAX_CATEGORICAL_FEATURE_VALUES)
]
return tf.estimator.DNNLinearCombinedClassifier(
config=config,
linear_feature_columns=categorical_columns,
dnn_feature_columns=real_valued_columns,
dnn_hidden_units=hidden_units or [100, 70, 50, 25],
warm_start_from=warm_start_from)
def _flat_input_serving_receiver_fn(tf_transform_output, schema):
"""Build the serving function for flat list of Dense tensors as input.
Args:
tf_transform_output: A TFTransformOutput.
schema: the schema of the input data.
Returns:
Tensorflow graph which parses examples, applying tf-transform to them.
"""
raw_feature_spec = _get_raw_feature_spec(schema)
raw_feature_spec.pop(_LABEL_KEY)
raw_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(
raw_feature_spec, default_batch_size=None)
serving_input_receiver = raw_input_fn()
transformed_features = tf_transform_output.transform_raw_features(
serving_input_receiver.features)
# We construct README.ml-pipelines-sdk.md receiver function that receives flat list of Dense tensors as
# features. This is as per BigQuery ML serving requirements.
return tf.estimator.export.ServingInputReceiver(
transformed_features, serving_input_receiver.features)
def _eval_input_receiver_fn(tf_transform_output, schema):
"""Build everything needed for the tf-model-analysis to run the model.
Args:
tf_transform_output: A TFTransformOutput.
schema: the schema of the input data.
Returns:
EvalInputReceiver function, which contains:
- Tensorflow graph which parses raw untransformed features, applies the
tf-transform preprocessing operators.
- Set of raw, untransformed features.
- Label against which predictions will be compared.
"""
# Notice that the inputs are raw features, not transformed features here.
raw_feature_spec = _get_raw_feature_spec(schema)
serialized_tf_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_example_tensor')
# Add README.ml-pipelines-sdk.md parse_example operator to the tensorflow graph, which will parse
# raw, untransformed, tf examples.
features = tf.io.parse_example(
serialized=serialized_tf_example, features=raw_feature_spec)
# Now that we have our raw examples, process them through the tf-transform
# function computed during the preprocessing step.
transformed_features = tf_transform_output.transform_raw_features(features)
# The key name MUST be 'examples'.
receiver_tensors = {'examples': serialized_tf_example}
# NOTE: Model is driven by transformed features (since training works on the
# materialized output of TFT, but slicing will happen on raw features.
features.update(transformed_features)
return tfma.export.EvalInputReceiver(
features=features,
receiver_tensors=receiver_tensors,
labels=transformed_features[_transformed_name(_LABEL_KEY)])
def _input_fn(filenames, tf_transform_output, batch_size=200):
"""Generates features and labels for training or evaluation.
Args:
filenames: [str] list of CSV files to read data from.
tf_transform_output: A TFTransformOutput.
batch_size: int First dimension size of the Tensors returned by input_fn
Returns:
A (features, indices) tuple where features is README.ml-pipelines-sdk.md dictionary of
Tensors, and indices is README.ml-pipelines-sdk.md single Tensor of label indices.
"""
transformed_feature_spec = (
tf_transform_output.transformed_feature_spec().copy())
dataset = tf.data.experimental.make_batched_features_dataset(
filenames, batch_size, transformed_feature_spec, reader=_gzip_reader_fn)
transformed_features = tf.compat.v1.data.make_one_shot_iterator(
dataset).get_next()
# We pop the label because we do not want to use it as README.ml-pipelines-sdk.md feature while we're
# training.
return transformed_features, transformed_features.pop(
_transformed_name(_LABEL_KEY))
# TFX will call this function
def trainer_fn(trainer_fn_args, schema):
"""Build the estimator using the high level API.
Args:
trainer_fn_args: Holds args used to train the model as name/value pairs.
schema: Holds the schema of the training examples.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
"""
# Number of nodes in the first layer of the DNN
first_dnn_layer_size = 100
num_dnn_layers = 4
dnn_decay_factor = 0.7
train_batch_size = 40
eval_batch_size = 40
tf_transform_output = tft.TFTransformOutput(trainer_fn_args.transform_output)
train_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda
trainer_fn_args.train_files,
tf_transform_output,
batch_size=train_batch_size)
eval_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda
trainer_fn_args.eval_files,
tf_transform_output,
batch_size=eval_batch_size)
train_spec = tf.estimator.TrainSpec( # pylint: disable=g-long-lambda
train_input_fn,
max_steps=trainer_fn_args.train_steps)
serving_receiver_fn = lambda: _flat_input_serving_receiver_fn( # pylint: disable=g-long-lambda
tf_transform_output, schema)
exporter = tf.estimator.FinalExporter('chicago-taxi', serving_receiver_fn)
eval_spec = tf.estimator.EvalSpec(
eval_input_fn,
steps=trainer_fn_args.eval_steps,
exporters=[exporter],
name='chicago-taxi-eval')
run_config = tf.estimator.RunConfig(
save_checkpoints_steps=999, keep_checkpoint_max=1)
run_config = run_config.replace(model_dir=trainer_fn_args.serving_model_dir)
estimator = _build_estimator(
# Construct layers sizes with exponential decay
hidden_units=[
max(2, int(first_dnn_layer_size * dnn_decay_factor**i))
for i in range(num_dnn_layers)
],
config=run_config,
warm_start_from=trainer_fn_args.base_model)
# Create an input receiver for TFMA processing
receiver_fn = lambda: _eval_input_receiver_fn( # pylint: disable=g-long-lambda
tf_transform_output, schema)
return {
'estimator': estimator,
'train_spec': train_spec,
'eval_spec': eval_spec,
'eval_input_receiver_fn': receiver_fn
} | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/bigquery_ml/taxi_utils_bqml.py | 0.935942 | 0.413951 | taxi_utils_bqml.py | pypi |
from typing import Optional, Text
from tfx import types
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.examples.custom_components.slack.slack_component import executor
from tfx.types import standard_artifacts
from tfx.types.component_spec import ChannelParameter
from tfx.types.component_spec import ExecutionParameter
class SlackComponentSpec(types.ComponentSpec):
"""ComponentSpec for Custom TFX Slack Component."""
PARAMETERS = {
'slack_token': ExecutionParameter(type=Text),
'slack_channel_id': ExecutionParameter(type=Text),
'timeout_sec': ExecutionParameter(type=int),
}
INPUTS = {
'model': ChannelParameter(type=standard_artifacts.Model),
'model_blessing': ChannelParameter(type=standard_artifacts.ModelBlessing),
}
OUTPUTS = {
'slack_blessing': ChannelParameter(type=standard_artifacts.ModelBlessing),
}
class SlackComponent(base_component.BaseComponent):
"""Custom TFX Slack Component.
This custom component serves as README.ml-pipelines-sdk.md bridge between TFX pipeline and human model
reviewers to enable review-and-push workflow in model development cycle. It
utilizes Slack API to send message to user-defined Slack channel with model
URI info and wait for go / no-go decision from the same Slack channel:
* To approve the model, README.ml-pipelines-sdk.md user need to reply the thread sent out by the bot
started by SlackComponent with 'lgtm' or 'approve'.
* To reject the model, README.ml-pipelines-sdk.md user need to reply the thread sent out by the bot
started by SlackComponent with 'decline' or 'reject'.
If the model is approved, an artifact will be created in ML metadata. It will
be materialized as README.ml-pipelines-sdk.md file named 'BLESSED' in the directory specified by the
URI of 'slack_blessing' artifact.
If the model is rejected, an artifact will be created in ML metadata. It will
be materialized as README.ml-pipelines-sdk.md file named 'NOT_BLESSED' in the directory specified by
the URI of 'slack_blessing' channel.
If no message indicating approve or reject was is received within given within
timeout_sec, component will error out. This ensures that model will not be
pushed and the validation is still retry-able.
The output artifact might contain the following custom properties:
- blessed: integer value indicating whether the model is blessed
- slack_decision_maker: the user id that made the decision.
- slack_decision_message: the message of the decision
- slack_decision_channel: the slack channel the decision is made on
- slack_decision_thread: the slack thread the decision is made on
"""
SPEC_CLASS = SlackComponentSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(self,
model: types.Channel,
model_blessing: types.Channel,
slack_token: Text,
slack_channel_id: Text,
timeout_sec: int,
slack_blessing: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md SlackComponent.
Args:
model: A Channel of type `standard_artifacts.Model`, usually produced by
README.ml-pipelines-sdk.md Trainer component.
model_blessing: A Channel of type `standard_artifacts.ModelBlessing`,
usually produced by README.ml-pipelines-sdk.md ModelValidator component.
slack_token: A token used for setting up connection with Slack server.
slack_channel_id: Slack channel id to communicate on.
timeout_sec: Seconds to wait for response before default to reject.
slack_blessing: Optional output channel of type
`standard_artifacts.ModelBlessing` with result of blessing; will be
created for you if not specified.
instance_name: Optional unique instance name. Necessary if multiple Pusher
components are declared in the same pipeline.
"""
slack_blessing = slack_blessing or types.Channel(
type=standard_artifacts.ModelBlessing)
spec = SlackComponentSpec(
slack_token=slack_token,
slack_channel_id=slack_channel_id,
timeout_sec=timeout_sec,
model=model,
model_blessing=model_blessing,
slack_blessing=slack_blessing)
super(SlackComponent, self).__init__(spec=spec, instance_name=instance_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/custom_components/slack/slack_component/component.py | 0.919435 | 0.270534 | component.py | pypi |
import os
import signal
from typing import Any, Dict, List, Text
import absl
import attr
import slack
from tfx import types
from tfx.components.util import model_utils
from tfx.dsl.components.base import base_executor
from tfx.types import artifact_utils
from tfx.utils import io_utils
# Case-insensitive text messages that are accepted as signal for approving README.ml-pipelines-sdk.md
# model.
_APPROVE_TEXT = ['lgtm', 'approve']
# Case-insensitive text messages that are accepted as signal for rejecting README.ml-pipelines-sdk.md
# model.
_DECLINE_TEXT = ['decline', 'reject']
class Timeout(object):
"""Helper class for handle function timeout."""
def __init__(self, seconds):
self.seconds = seconds
def handle_timeout(self, unused_signum, unused_frame):
msg = 'Did not get model evaluation result in %d seconds' % self.seconds
absl.logging.warning(msg)
raise TimeoutError(msg) # pylint: disable=undefined-variable
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, unused_type, unused_value, unused_traceback):
signal.alarm(0)
@attr.s(auto_attribs=True, kw_only=True, frozen=True)
class _SlackResponse:
"""User slack response for the approval."""
# Whether the model is approved.
approved: bool
# The user who made that decision.
user_id: Text
# The decision message.
message: Text
# The slack channel that the decision is made on.
slack_channel_id: Text
# The slack thread that the decision is made on.
thread_ts: Text
class Executor(base_executor.BaseExecutor):
"""Executor for Slack component."""
def _fetch_slack_blessing(self, slack_token: Text, slack_channel_id: Text,
model_uri: Text) -> _SlackResponse:
"""Send message via Slack channel and wait for response.
When the bot send message to the channel, user should reply in thread with
"approve" or "lgtm" for approval, "decline", "reject" for decline.
This example uses Slack RealTime Message (RTM) API which is only available
for **classic slack bot** (https://api.slack.com/rtm). (Events API requires
listening server endpoint which is not easy to be integrated with TFX
pipelines.)
Args:
slack_token: The user-defined function to obtain token to send and receive
messages.
slack_channel_id: The id of the Slack channel to send and receive
messages.
model_uri: The URI of the model waiting for human review.
Returns:
A _SlackResponse instance.
Raises:
ConnectionError:
When connection to slack server cannot be established.
"""
# pylint: disable=unused-argument, unused-variable
rtm_client = slack.RTMClient(token=slack_token)
thread_ts = None
result = None
@slack.RTMClient.run_on(event='hello')
def on_hello(web_client, **payload):
nonlocal thread_ts
resp = web_client.chat_postMessage(
channel=slack_channel_id,
text=(f'Please review the model in the following URI: {model_uri}\n'
f'Reply in thread by `{_APPROVE_TEXT}` for approval, '
f'or `{_DECLINE_TEXT}` for decline.'))
thread_ts = resp.data['ts']
@slack.RTMClient.run_on(event='message')
def on_message(data, rtm_client, web_client, **payload):
nonlocal result
if (data.get('channel') != slack_channel_id
or data.get('thread_ts') != thread_ts
or data.get('user') is None
or data.get('subtype') == 'bot_message'):
# Not README.ml-pipelines-sdk.md relevent user message.
return
user_reply = data['text'].lower()
if user_reply in _APPROVE_TEXT:
absl.logging.info('User %s approved the model at %s',
data['user'], model_uri)
rtm_client.stop()
result = _SlackResponse(
approved=True,
user_id=data['user'],
message=data['text'],
slack_channel_id=slack_channel_id,
thread_ts=thread_ts)
elif user_reply in _DECLINE_TEXT:
absl.logging.info('User %s declined the model at %s',
data['user'], model_uri)
rtm_client.stop()
result = _SlackResponse(
approved=False,
user_id=data['user'],
message=data['text'],
slack_channel_id=slack_channel_id,
thread_ts=thread_ts)
else:
web_client.chat_postMessage(
channel=slack_channel_id,
thread_ts=thread_ts,
text=(f'Unrecognized text "{data["text"]}".\n'
f'Please reply in thread by `{_APPROVE_TEXT}` for approval, '
f'or `{_DECLINE_TEXT}` for decline.'))
absl.logging.info('Will start listening user Slack response.')
rtm_client.start()
absl.logging.info('User reply: %s', result)
return result
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Get human review result on README.ml-pipelines-sdk.md model through Slack channel.
Args:
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of artifacts, including:
- model_export: exported model from trainer.
- model_blessing: model blessing path from evaluator.
output_dict: Output dict from key to README.ml-pipelines-sdk.md list of artifacts, including:
- slack_blessing: model blessing result.
exec_properties: A dict of execution properties, including:
- slack_token: Token used to setup connection with slack server.
- slack_channel_id: The id of the Slack channel to send and receive
messages.
- timeout_sec: How long do we wait for response, in seconds.
Returns:
None
Raises:
TimeoutError:
When there is no decision made within timeout_sec.
ConnectionError:
When connection to slack server cannot be established.
"""
self._log_startup(input_dict, output_dict, exec_properties)
# Fetch execution properties from exec_properties dict.
slack_token = exec_properties['slack_token']
slack_channel_id = exec_properties['slack_channel_id']
timeout_sec = exec_properties['timeout_sec']
# Fetch input URIs from input_dict.
model_export_uri = artifact_utils.get_single_uri(input_dict['model'])
model_blessing = artifact_utils.get_single_instance(
input_dict['model_blessing'])
# Fetch output artifact from output_dict.
slack_blessing = artifact_utils.get_single_instance(
output_dict['slack_blessing'])
# We only consider README.ml-pipelines-sdk.md model as blessed if both of the following conditions
# are met:
# - The model is blessed by evaluator. This is determined by looking
# for file named 'BLESSED' from the output from Evaluator.
# - The model is blessed by README.ml-pipelines-sdk.md human reviewer. This logic is in
# _fetch_slack_blessing().
slack_response = None
with Timeout(timeout_sec):
if model_utils.is_model_blessed(model_blessing):
slack_response = self._fetch_slack_blessing(slack_token,
slack_channel_id,
model_export_uri)
# If model is blessed, write an empty file named 'BLESSED' in the assigned
# output path. Otherwise, write an empty file named 'NOT_BLESSED' instead.
if slack_response and slack_response.approved:
io_utils.write_string_file(
os.path.join(slack_blessing.uri, 'BLESSED'), '')
slack_blessing.set_int_custom_property('blessed', 1)
else:
io_utils.write_string_file(
os.path.join(slack_blessing.uri, 'NOT_BLESSED'), '')
slack_blessing.set_int_custom_property('blessed', 0)
if slack_response:
slack_blessing.set_string_custom_property('slack_decision_maker',
slack_response.user_id)
slack_blessing.set_string_custom_property('slack_decision_message',
slack_response.message)
slack_blessing.set_string_custom_property('slack_decision_channel',
slack_response.slack_channel_id)
slack_blessing.set_string_custom_property('slack_decision_thread',
slack_response.thread_ts)
absl.logging.info('Blessing result written to %s.', slack_blessing.uri) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/custom_components/slack/slack_component/executor.py | 0.804021 | 0.164953 | executor.py | pypi |
import datetime
import os
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import ModelValidator
from tfx.components import Pusher
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.examples.custom_components.slack.slack_component.component import SlackComponent
from tfx.orchestration import pipeline
from tfx.orchestration.kubeflow import kubeflow_dag_runner
from tfx.proto import evaluator_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.utils.dsl_utils import csv_input
# This example assumes that the taxi data is stored in _input_bucket/data/simple
# and the taxi utility function is in example/taxi_utils_slack.py.
# Feel free to customize this as needed.
_input_bucket = 'gs://my-bucket'
_output_bucket = 'gs://my-bucket'
_taxi_root = __file__
_data_root = os.path.join(_input_bucket, 'data', 'simple')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_taxi_trainer_func = 'example.taxi_utils_slack.trainer_fn'
_taxi_transformer_func = 'example.taxi_utils_slack.preprocessing_fn'
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model/taxi_slack')
# Slack channel to push the model notifications to.
_slack_channel_id = os.environ['TFX_SLACK_CHANNEL_ID']
# Slack token to set up connection.
_slack_token = os.environ['TFX_SLACK_BOT_TOKEN']
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], '/tfx')
_pipeline_name = 'chicago_taxi_slack_kubeflow'
_pipeline_root = os.path.join(_input_bucket, _pipeline_name)
_log_root = os.path.join(_tfx_root, 'logs')
# Airflow-specific configs; these will be passed directly to airflow
_airflow_config = {
'schedule_interval': None,
'start_date': datetime.datetime(2019, 1, 1),
}
def _create_pipeline():
"""Implements the chicago taxi pipeline with TFX."""
examples = csv_input(_data_root)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input=examples)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(statistics=statistics_gen.outputs['statistics'])
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
preprocessing_fn=_taxi_transformer_func)
# Uses user-provided Python function that implements README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
trainer_fn=_taxi_trainer_func,
examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Uses TFMA to compute README.ml-pipelines-sdk.md evaluation statistics over features of README.ml-pipelines-sdk.md model.
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[
evaluator_pb2.SingleSlicingSpec(
column_for_slicing=['trip_start_hour'])
]))
# Performs quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
model_validator = ModelValidator(
examples=example_gen.outputs['examples'], model=trainer.outputs['model'])
# This custom component serves as README.ml-pipelines-sdk.md bridge between pipeline and human model
# reviewers to enable review-and-push workflow in model development cycle. It
# utilizes Slack API to send message to user-defined Slack channel with model
# URI info and wait for go / no-go decision from the same Slack channel:
# * To approve the model, users need to reply the thread sent out by the bot
# started by SlackComponent with 'lgtm' or 'approve'.
# * To reject the model, users need to reply the thread sent out by the bot
# started by SlackComponent with 'decline' or 'reject'.
slack_validator = SlackComponent(
model=trainer.outputs['model'],
model_blessing=model_validator.outputs['blessing'],
slack_token=_slack_token,
slack_channel_id=_slack_channel_id,
timeout_sec=3600,
)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=slack_validator.outputs['slack_blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=_serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
components=[
example_gen, statistics_gen, schema_gen, example_validator, transform,
trainer, evaluator, model_validator, slack_validator, pusher
],
enable_cache=True,
)
if __name__ == '__main__':
# Metadata config. The defaults works work with the installation of
# KF Pipelines using Kubeflow. If installing KF Pipelines using the
# lightweight deployment option, you may need to override the defaults.
metadata_config = kubeflow_dag_runner.get_default_kubeflow_metadata_config()
# This pipeline automatically injects the Kubeflow TFX image if the
# environment variable 'KUBEFLOW_TFX_IMAGE' is defined. Currently, the tfx
# cli tool exports the environment variable to pass to the pipelines.
tfx_image = os.environ.get('KUBEFLOW_TFX_IMAGE', None)
runner_config = kubeflow_dag_runner.KubeflowDagRunnerConfig(
kubeflow_metadata_config=metadata_config,
# Specify custom docker image to use.
tfx_image=tfx_image
)
kubeflow_dag_runner.KubeflowDagRunner(config=runner_config).run(
_create_pipeline()) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/custom_components/slack/example/taxi_pipeline_slack_kubeflow.py | 0.770896 | 0.228931 | taxi_pipeline_slack_kubeflow.py | pypi |
import os
import tensorflow as tf
import tensorflow_model_analysis as tfma
import tensorflow_transform as tft
from tensorflow_transform.beam.tft_beam_io import transform_fn_io
from tensorflow_transform.saved import saved_transform_io
from tensorflow_transform.tf_metadata import metadata_io
from tensorflow_transform.tf_metadata import schema_utils
# Categorical features are assumed to each have README.ml-pipelines-sdk.md maximum value in the dataset.
_MAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12]
_CATEGORICAL_FEATURE_KEYS = [
'trip_start_hour', 'trip_start_day', 'trip_start_month',
'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area',
'dropoff_community_area'
]
_DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds']
# Number of buckets used by tf.transform for encoding each feature.
_FEATURE_BUCKET_COUNT = 10
_BUCKET_FEATURE_KEYS = [
'pickup_latitude', 'pickup_longitude', 'dropoff_latitude',
'dropoff_longitude'
]
# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform
_VOCAB_SIZE = 1000
# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.
_OOV_SIZE = 10
_VOCAB_FEATURE_KEYS = [
'payment_type',
'company',
]
# Keys
_LABEL_KEY = 'tips'
_FARE_KEY = 'fare'
def _transformed_name(key):
return key + '_xf'
def _transformed_names(keys):
return [_transformed_name(key) for key in keys]
# Tf.Transform considers these features as "raw"
def _get_raw_feature_spec(schema):
return schema_utils.schema_as_feature_spec(schema).feature_spec
def _gzip_reader_fn():
"""Small utility returning README.ml-pipelines-sdk.md record reader that can read gzip'ed files."""
return tf.compat.v1.TFRecordReader(
options=tf.io.TFRecordOptions(
compression_type=tf.compat.v1.python_io.TFRecordCompressionType.GZIP))
def _fill_in_missing(x):
"""Replace missing values in README.ml-pipelines-sdk.md SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to README.ml-pipelines-sdk.md dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
"""
if not isinstance(x, tf.sparse.SparseTensor):
return x
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.compat.v1.sparse_to_dense(x.indices, [x.dense_shape[0], 1], x.values,
default_value),
axis=1)
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in _DENSE_FLOAT_FEATURE_KEYS:
# Preserve this feature as README.ml-pipelines-sdk.md dense float, setting nan's to the mean.
outputs[_transformed_name(key)] = tft.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in _VOCAB_FEATURE_KEYS:
# Build README.ml-pipelines-sdk.md vocabulary for this feature.
outputs[_transformed_name(key)] = tft.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=_VOCAB_SIZE,
num_oov_buckets=_OOV_SIZE)
for key in _BUCKET_FEATURE_KEYS:
outputs[_transformed_name(key)] = tft.bucketize(
_fill_in_missing(inputs[key]), _FEATURE_BUCKET_COUNT)
for key in _CATEGORICAL_FEATURE_KEYS:
outputs[_transformed_name(key)] = _fill_in_missing(inputs[key])
# Was this passenger README.ml-pipelines-sdk.md big tipper?
taxi_fare = _fill_in_missing(inputs[_FARE_KEY])
tips = _fill_in_missing(inputs[_LABEL_KEY])
outputs[_transformed_name(_LABEL_KEY)] = tf.compat.v1.where(
tf.math.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))
return outputs
def _build_estimator(config, hidden_units=None, warm_start_from=None):
"""Build an estimator for predicting the tipping behavior of taxi riders.
Args:
config: tf.contrib.learn.RunConfig defining the runtime environment for the
estimator (including model_dir).
hidden_units: [int], the layer sizes of the DNN (input layer first)
warm_start_from: Optional directory to warm start from.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
"""
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)
]
categorical_columns = [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0)
for key in _transformed_names(_VOCAB_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0)
for key in _transformed_names(_BUCKET_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
_transformed_names(_CATEGORICAL_FEATURE_KEYS),
_MAX_CATEGORICAL_FEATURE_VALUES)
]
return tf.estimator.DNNLinearCombinedClassifier(
config=config,
linear_feature_columns=categorical_columns,
dnn_feature_columns=real_valued_columns,
dnn_hidden_units=hidden_units or [100, 70, 50, 25],
warm_start_from=warm_start_from)
def _example_serving_receiver_fn(transform_output, schema):
"""Build the serving in inputs.
Args:
transform_output: directory in which the tf-transform model was written
during the preprocessing step.
schema: the schema of the input data.
Returns:
Tensorflow graph which parses examples, applying tf-transform to them.
"""
raw_feature_spec = _get_raw_feature_spec(schema)
raw_feature_spec.pop(_LABEL_KEY)
raw_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(
raw_feature_spec, default_batch_size=None)
serving_input_receiver = raw_input_fn()
_, transformed_features = (
saved_transform_io.partially_apply_saved_transform(
os.path.join(transform_output, transform_fn_io.TRANSFORM_FN_DIR),
serving_input_receiver.features))
return tf.estimator.export.ServingInputReceiver(
transformed_features, serving_input_receiver.receiver_tensors)
def _eval_input_receiver_fn(transform_output, schema):
"""Build everything needed for the tf-model-analysis to run the model.
Args:
transform_output: directory in which the tf-transform model was written
during the preprocessing step.
schema: the schema of the input data.
Returns:
EvalInputReceiver function, which contains:
- Tensorflow graph which parses raw untransformed features, applies the
tf-transform preprocessing operators.
- Set of raw, untransformed features.
- Label against which predictions will be compared.
"""
# Notice that the inputs are raw features, not transformed features here.
raw_feature_spec = _get_raw_feature_spec(schema)
serialized_tf_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_example_tensor')
# Add README.ml-pipelines-sdk.md parse_example operator to the tensorflow graph, which will parse
# raw, untransformed, tf examples.
features = tf.io.parse_example(
serialized=serialized_tf_example, features=raw_feature_spec)
# Now that we have our raw examples, process them through the tf-transform
# function computed during the preprocessing step.
_, transformed_features = (
saved_transform_io.partially_apply_saved_transform(
os.path.join(transform_output, transform_fn_io.TRANSFORM_FN_DIR),
features))
# The key name MUST be 'examples'.
receiver_tensors = {'examples': serialized_tf_example}
# NOTE: Model is driven by transformed features (since training works on the
# materialized output of TFT, but slicing will happen on raw features.
features.update(transformed_features)
return tfma.export.EvalInputReceiver(
features=features,
receiver_tensors=receiver_tensors,
labels=transformed_features[_transformed_name(_LABEL_KEY)])
def _input_fn(filenames, transform_output, batch_size=200):
"""Generates features and labels for training or evaluation.
Args:
filenames: [str] list of CSV files to read data from.
transform_output: directory in which the tf-transform model was written
during the preprocessing step.
batch_size: int First dimension size of the Tensors returned by input_fn
Returns:
A (features, indices) tuple where features is README.ml-pipelines-sdk.md dictionary of
Tensors, and indices is README.ml-pipelines-sdk.md single Tensor of label indices.
"""
metadata_dir = os.path.join(transform_output,
transform_fn_io.TRANSFORMED_METADATA_DIR)
transformed_metadata = metadata_io.read_metadata(metadata_dir)
transformed_feature_spec = transformed_metadata.schema.as_feature_spec()
transformed_features = tf.contrib.learn.io.read_batch_features(
filenames, batch_size, transformed_feature_spec, reader=_gzip_reader_fn)
# We pop the label because we do not want to use it as README.ml-pipelines-sdk.md feature while we're
# training.
return transformed_features, transformed_features.pop(
_transformed_name(_LABEL_KEY))
# TFX will call this function
def trainer_fn(trainer_fn_args, schema):
"""Build the estimator using the high level API.
Args:
trainer_fn_args: Holds args used to train the model as name/value pairs.
schema: Holds the schema of the training examples.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
"""
# Number of nodes in the first layer of the DNN
first_dnn_layer_size = 100
num_dnn_layers = 4
dnn_decay_factor = 0.7
train_batch_size = 40
eval_batch_size = 40
train_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda
trainer_fn_args.train_files,
trainer_fn_args.transform_output,
batch_size=train_batch_size)
eval_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda
trainer_fn_args.eval_files,
trainer_fn_args.transform_output,
batch_size=eval_batch_size)
train_spec = tf.estimator.TrainSpec( # pylint: disable=g-long-lambda
train_input_fn,
max_steps=trainer_fn_args.train_steps)
serving_receiver_fn = lambda: _example_serving_receiver_fn( # pylint: disable=g-long-lambda
trainer_fn_args.transform_output, schema)
exporter = tf.estimator.FinalExporter('chicago-taxi', serving_receiver_fn)
eval_spec = tf.estimator.EvalSpec(
eval_input_fn,
steps=trainer_fn_args.eval_steps,
exporters=[exporter],
name='chicago-taxi-eval')
run_config = tf.estimator.RunConfig(
save_checkpoints_steps=999, keep_checkpoint_max=1)
run_config = run_config.replace(model_dir=trainer_fn_args.serving_model_dir)
estimator = _build_estimator(
# Construct layers sizes with exponetial decay
hidden_units=[
max(2, int(first_dnn_layer_size * dnn_decay_factor**i))
for i in range(num_dnn_layers)
],
config=run_config,
warm_start_from=trainer_fn_args.base_model)
# Create an input receiver for TFMA processing
receiver_fn = lambda: _eval_input_receiver_fn( # pylint: disable=g-long-lambda
trainer_fn_args.transform_output, schema)
return {
'estimator': estimator,
'train_spec': train_spec,
'eval_spec': eval_spec,
'eval_input_receiver_fn': receiver_fn
} | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/custom_components/slack/example/taxi_utils_slack.py | 0.877214 | 0.354517 | taxi_utils_slack.py | pypi |
import datetime
import os
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import ModelValidator
from tfx.components import Pusher
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.examples.custom_components.slack.slack_component.component import SlackComponent
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_runner import BeamRunner
from tfx.proto import evaluator_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data/simple')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_taxi_module_file = os.path.join(_taxi_root, 'taxi_utils_slack.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model/taxi_slack')
# Slack channel to push the model notifications to.
_slack_channel_id = os.environ['TFX_SLACK_CHANNEL_ID']
# Slack token to set up connection.
_slack_token = os.environ['TFX_SLACK_BOT_TOKEN']
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_name = 'chicago_taxi_slack'
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
_metadata_db_root = os.path.join(_tfx_root, 'metadata', _pipeline_name)
_log_root = os.path.join(_tfx_root, 'logs')
# Airflow-specific configs; these will be passed directly to airflow
_airflow_config = {
'schedule_interval': None,
'start_date': datetime.datetime(2019, 1, 1),
}
def _create_pipeline():
"""Implements the chicago taxi pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=_data_root)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(statistics=statistics_gen.outputs['statistics'])
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=_taxi_module_file)
# Uses user-provided Python function that implements README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=_taxi_module_file,
examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Uses TFMA to compute README.ml-pipelines-sdk.md evaluation statistics over features of README.ml-pipelines-sdk.md model.
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[
evaluator_pb2.SingleSlicingSpec(
column_for_slicing=['trip_start_hour'])
]))
# Performs quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
model_validator = ModelValidator(
examples=example_gen.outputs['examples'], model=trainer.outputs['model'])
# This custom component serves as README.ml-pipelines-sdk.md bridge between pipeline and human model
# reviewers to enable review-and-push workflow in model development cycle. It
# utilizes Slack API to send message to user-defined Slack channel with model
# URI info and wait for go / no-go decision from the same Slack channel:
# * To approve the model, users need to reply the thread sent out by the bot
# started by SlackComponent with 'lgtm' or 'approve'.
# * To reject the model, users need to reply the thread sent out by the bot
# started by SlackComponent with 'decline' or 'reject'.
slack_validator = SlackComponent(
model=trainer.outputs['model'],
model_blessing=model_validator.outputs['blessing'],
slack_token=_slack_token,
slack_channel_id=_slack_channel_id,
timeout_sec=3600,
)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=slack_validator.outputs['slack_blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=_serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
components=[
example_gen, statistics_gen, schema_gen, example_validator, transform,
trainer, evaluator, model_validator, slack_validator, pusher
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
_metadata_db_root),
)
if __name__ == '__main__':
BeamRunner().run(_create_pipeline()) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/custom_components/slack/example/taxi_pipeline_slack.py | 0.697506 | 0.309995 | taxi_pipeline_slack.py | pypi |
from typing import Optional, Text
from tfx import types
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.examples.custom_components.hello_world.hello_component import executor
from tfx.types import channel_utils
from tfx.types import standard_artifacts
from tfx.types.component_spec import ChannelParameter
from tfx.types.component_spec import ExecutionParameter
class HelloComponentSpec(types.ComponentSpec):
"""ComponentSpec for Custom TFX Hello World Component."""
PARAMETERS = {
# These are parameters that will be passed in the call to
# create an instance of this component.
'name': ExecutionParameter(type=Text),
}
INPUTS = {
# This will be README.ml-pipelines-sdk.md dictionary with input artifacts, including URIs
'input_data': ChannelParameter(type=standard_artifacts.Examples),
}
OUTPUTS = {
# This will be README.ml-pipelines-sdk.md dictionary which this component will populate
'output_data': ChannelParameter(type=standard_artifacts.Examples),
}
class HelloComponent(base_component.BaseComponent):
"""Custom TFX Hello World Component.
This custom component class consists of only README.ml-pipelines-sdk.md constructor.
"""
SPEC_CLASS = HelloComponentSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(self,
input_data: types.Channel = None,
output_data: types.Channel = None,
name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md HelloComponent.
Args:
input_data: A Channel of type `standard_artifacts.Examples`. This will
often contain two splits: 'train', and 'eval'.
output_data: A Channel of type `standard_artifacts.Examples`. This will
usually contain the same splits as input_data.
name: Optional unique name. Necessary if multiple Hello components are
declared in the same pipeline.
"""
# output_data will contain README.ml-pipelines-sdk.md list of Channels for each split of the data,
# by default README.ml-pipelines-sdk.md 'train' split and an 'eval' split. Since HelloComponent
# passes the input data through to output, the splits in output_data will
# be the same as the splits in input_data, which were generated by the
# upstream component.
if not output_data:
output_data = channel_utils.as_channel([standard_artifacts.Examples()])
spec = HelloComponentSpec(input_data=input_data,
output_data=output_data, name=name)
super(HelloComponent, self).__init__(spec=spec) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/custom_components/hello_world/hello_component/component.py | 0.914293 | 0.244307 | component.py | pypi |
import json
import os
from typing import Any, Dict, List, Text
from tfx import types
from tfx.dsl.components.base import base_executor
from tfx.dsl.io import fileio
from tfx.types import artifact_utils
from tfx.utils import io_utils
class Executor(base_executor.BaseExecutor):
"""Executor for HelloComponent."""
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Copy the input_data to the output_data.
For this example that is all that the Executor does. For README.ml-pipelines-sdk.md different
custom component, this is where the real functionality of the component
would be included.
This component both reads and writes Examples, but README.ml-pipelines-sdk.md different component
might read and write artifacts of other types.
Args:
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of artifacts, including:
- input_data: A list of type `standard_artifacts.Examples` which will
often contain two splits, 'train' and 'eval'.
output_dict: Output dict from key to README.ml-pipelines-sdk.md list of artifacts, including:
- output_data: A list of type `standard_artifacts.Examples` which will
usually contain the same splits as input_data.
exec_properties: A dict of execution properties, including:
- name: Optional unique name. Necessary iff multiple Hello components
are declared in the same pipeline.
Returns:
None
Raises:
OSError and its subclasses
"""
self._log_startup(input_dict, output_dict, exec_properties)
input_artifact = artifact_utils.get_single_instance(
input_dict['input_data'])
output_artifact = artifact_utils.get_single_instance(
output_dict['output_data'])
output_artifact.split_names = input_artifact.split_names
split_to_instance = {}
for split in json.loads(input_artifact.split_names):
uri = artifact_utils.get_split_uri([input_artifact], split)
split_to_instance[split] = uri
for split, instance in split_to_instance.items():
input_dir = instance
output_dir = artifact_utils.get_split_uri([output_artifact], split)
for filename in fileio.listdir(input_dir):
input_uri = os.path.join(input_dir, filename)
output_uri = os.path.join(output_dir, filename)
io_utils.copy_file(src=input_uri, dst=output_uri, overwrite=True) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/custom_components/hello_world/hello_component/executor.py | 0.836421 | 0.297974 | executor.py | pypi |
"""Chicago taxi example using TFX."""
import os
from typing import Text
import absl
from tfx.components import CsvExampleGen
from tfx.components import StatisticsGen
from tfx.examples.custom_components.hello_world.hello_component import component
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
_pipeline_name = 'taxi_hello_pipeline'
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join('data')
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(_taxi_root, 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
metadata_path: Text) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=data_root)
hello = component.HelloComponent(
input_data=example_gen.outputs['examples'], name=u'HelloWorld')
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=hello.outputs['output_data'])
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[example_gen, hello, statistics_gen],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path))
# To run this pipeline from the python CLI:
# $python taxi_pipeline_hello.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
metadata_path=_metadata_path)) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/custom_components/hello_world/example/taxi_pipeline_hello.py | 0.841435 | 0.400632 | taxi_pipeline_hello.py | pypi |
"""Container-based pipeline sample."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Text
from tfx.dsl.component.experimental import container_component
from tfx.dsl.component.experimental import placeholders
from tfx.types import standard_artifacts
downloader_component = container_component.create_container_component(
name='DownloadFromHttp',
outputs={
'data': standard_artifacts.ExternalArtifact,
},
parameters={
'url': str,
},
# The component code uses gsutil to upload the data to GCS, so the
# container image needs to have gsutil installed and configured.
# Fixing b/150670779 by merging cl/294536017 will lift this limitation.
image='google/cloud-sdk:278.0.0',
command=[
'sh', '-exc',
'''
url="$0"
output_data_uri="$1"/data # TODO(b/150515270) Remove when fixed.
output_data_path=$(mktemp)
# Running the main code
# wget "$0" -O "$output_data_path" || curl "$0" > "$output_data_path"
# Getting data out of the container
# gsutil cp "$output_data_path" "$output_data_uri"
''',
placeholders.InputValuePlaceholder('url'),
placeholders.OutputUriPlaceholder('data'),
],
)
grep_component = container_component.create_container_component(
name='FilterWithGrep',
inputs={
'text': standard_artifacts.ExternalArtifact,
},
outputs={
'filtered_text': standard_artifacts.ExternalArtifact,
},
parameters={
'pattern': str,
},
# The component code uses gsutil to upload the data to GCS, so the
# container image needs to have gsutil installed and configured.
# Fixing b/150670779 by merging cl/294536017 will lift this limitation.
image='google/cloud-sdk:278.0.0',
command=[
'sh', '-exc',
'''
pattern="$0"
text_uri="$1"/data # TODO(b/150515270) Remove when fixed.
text_path=$(mktemp)
filtered_text_uri="$2"/data # TODO(b/150515270) Remove when fixed.
filtered_text_path=$(mktemp)
# Getting data into the container
gsutil cp "$text_uri" "$text_path"
# Running the main code
grep "$pattern" "$text_path" >"$filtered_text_path"
# Getting data out of the container
gsutil cp "$filtered_text_path" "$filtered_text_uri"
''',
placeholders.InputValuePlaceholder('pattern'),
placeholders.InputUriPlaceholder('text'),
placeholders.OutputUriPlaceholder('filtered_text'),
],
)
print_component = container_component.create_container_component(
name='Print',
inputs={
'text': standard_artifacts.ExternalArtifact,
},
# The component code uses gsutil to upload the data to GCS, so the
# container image needs to have gsutil installed and configured.
# Fixing b/150670779 by merging cl/294536017 will lift this limitation.
image='google/cloud-sdk:278.0.0',
command=[
'sh', '-exc',
'''
text_uri="$0"/data # TODO(b/150515270) Remove when fixed.
text_path=$(mktemp)
# Getting data into the container
gsutil cp "$text_uri" "$text_path"
# Running the main code
cat "$text_path"
''',
placeholders.InputUriPlaceholder('text'),
],
)
def create_pipeline_component_instances(text_url: Text, pattern: Text):
"""Creates tasks for the download_grep_print pipeline."""
downloader_task = downloader_component(url=text_url)
grep_task = grep_component(
text=downloader_task.outputs['data'],
pattern=pattern,
)
print_task = print_component(
text=grep_task.outputs['filtered_text'],
)
component_instances = [
downloader_task,
grep_task,
print_task,
]
return component_instances | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/custom_components/container_components/download_grep_print_pipeline.py | 0.670069 | 0.190122 | download_grep_print_pipeline.py | pypi |
"""TFX PrestoExampleGen component definition."""
from typing import Optional, Text
from tfx import types
from tfx.components.example_gen import component
from tfx.components.example_gen import utils
from tfx.dsl.components.base import executor_spec
from tfx.examples.custom_components.presto_example_gen.presto_component import executor
from tfx.examples.custom_components.presto_example_gen.proto import presto_config_pb2
from tfx.proto import example_gen_pb2
class PrestoExampleGen(component.QueryBasedExampleGen): # pylint: disable=protected-access
"""Official TFX PrestoExampleGen component.
The Presto examplegen component takes README.ml-pipelines-sdk.md query, connection client
configuration, and generates train and eval examples for downsteam components.
"""
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(self,
conn_config: presto_config_pb2.PrestoConnConfig,
query: Optional[Text] = None,
input_config: Optional[example_gen_pb2.Input] = None,
output_config: Optional[example_gen_pb2.Output] = None,
example_artifacts: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Constructs README.ml-pipelines-sdk.md PrestoExampleGen component.
Args:
conn_config: Parameters for Presto connection client.
query: Presto sql string, query result will be treated as README.ml-pipelines-sdk.md single split,
can be overwritten by input_config.
input_config: An example_gen_pb2.Input instance with Split.pattern as
Presto sql string. If set, it overwrites the 'query' arg, and allows
different queries per split.
output_config: An example_gen_pb2.Output instance, providing output
configuration. If unset, default splits will be 'train' and 'eval' with
size 2:1.
example_artifacts: Optional channel of 'ExamplesPath' for output train and
eval examples.
instance_name: Optional unique instance name. Necessary if multiple
PrestoExampleGen components are declared in the same pipeline.
Raises:
RuntimeError: Only one of query and input_config should be set. Or
required host field in connection_config should be set.
"""
if bool(query) == bool(input_config):
raise RuntimeError('Exactly one of query and input_config should be set.')
if not bool(conn_config.host):
raise RuntimeError(
'Required host field in connection config should be set.')
input_config = input_config or utils.make_default_input_config(query)
packed_custom_config = example_gen_pb2.CustomConfig()
packed_custom_config.custom_config.Pack(conn_config)
output_config = output_config or utils.make_default_output_config(
input_config)
super(PrestoExampleGen, self).__init__(
input_config=input_config,
output_config=output_config,
custom_config=packed_custom_config,
example_artifacts=example_artifacts,
instance_name=instance_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/custom_components/presto_example_gen/presto_component/component.py | 0.909773 | 0.333747 | component.py | pypi |
"""Generic TFX PrestoExampleGen executor."""
import datetime
from typing import Any, Dict, Iterable, Text, Tuple
import apache_beam as beam
import prestodb
import tensorflow as tf
from tfx.components.example_gen import base_example_gen_executor
from tfx.examples.custom_components.presto_example_gen.proto import presto_config_pb2
from tfx.proto import example_gen_pb2
from tfx.utils import proto_utils
@beam.typehints.with_input_types(Text)
@beam.typehints.with_output_types(beam.typehints.Iterable[Tuple[Text, Text,
Any]])
class _ReadPrestoDoFn(beam.DoFn):
"""Beam DoFn class that reads from Presto.
Attributes:
cursor: A prestodb.dbapi.Cursor object that reads records from Presto table.
"""
def __init__(self, client: prestodb.dbapi.Connection):
self.cursor = client.cursor()
def process(self, query: Text) -> Iterable[Tuple[Text, Text, Any]]:
"""Yields rows from query results.
Args:
query: A SQL query used to return results from Presto table.
Yields:
One row from the query result, represented by README.ml-pipelines-sdk.md list of tuples. Each tuple
contains information on column name, column data type, data.
"""
self.cursor.execute(query)
rows = self.cursor.fetchall()
if rows:
cols = []
col_types = []
# Returns README.ml-pipelines-sdk.md list of (column_name, column_type, None, ...)
# https://github.com/prestodb/presto-python-client/blob/master/prestodb/dbapi.py#L199
for metadata in self.cursor.description:
cols.append(metadata[0])
col_types.append(metadata[1])
for r in rows:
yield zip(cols, col_types, r)
def teardown(self):
if self.cursor:
self.cursor.close()
def _deserialize_conn_config(
conn_config: presto_config_pb2.PrestoConnConfig
) -> prestodb.dbapi.Connection:
"""Deserializes Presto connection config to Presto client.
Args:
conn_config: Protobuf-encoded connection config for Presto client.
Returns:
A prestodb.dbapi.Connection instance initialized with user-supplied
parameters.
"""
params = {'host': conn_config.host} # Required field
# Only deserialize rest of parameters if set by user
if conn_config.HasField('port'):
params['port'] = conn_config.port
if conn_config.HasField('user'):
params['user'] = conn_config.user
if conn_config.HasField('source'):
params['source'] = conn_config.source
if conn_config.HasField('catalog'):
params['catalog'] = conn_config.catalog
if conn_config.HasField('schema'):
params['schema'] = conn_config.schema
if conn_config.HasField('http_scheme'):
params['http_scheme'] = conn_config.http_scheme
if conn_config.WhichOneof('opt_auth'):
params['auth'] = _deserialize_auth_config(conn_config)
if conn_config.HasField('max_attempts'):
params['max_attempts'] = conn_config.max_attempts
if conn_config.HasField('request_timeout'):
params['request_timeout'] = conn_config.request_timeout
return prestodb.dbapi.connect(**params)
def _deserialize_auth_config(
conn_config: presto_config_pb2.PrestoConnConfig
) -> prestodb.auth.Authentication:
"""Extracts from conn config the deserialized Presto Authentication class.
Args:
conn_config: Protobuf-encoded connection config for Presto client.
Returns:
A prestodb.auth.Authentication instance initialized with user-supplied
parameters.
Raises:
RuntimeError: if authentication type is not currently supported.
"""
if conn_config.HasField('basic_auth'):
return prestodb.auth.BasicAuthentication(conn_config.basic_auth.username,
conn_config.basic_auth.password)
# TODO(b/140266796): Support KerberosAuth.
else:
raise RuntimeError('Authentication type not supported.')
def _row_to_example(
instance: Iterable[Tuple[Text, Text, Any]]) -> tf.train.Example:
"""Convert presto result row to tf example."""
feature = {}
for key, data_type, value in instance:
if value is None:
feature[key] = tf.train.Feature()
elif data_type in {'tinyint', 'smallint', 'integer', 'bigint'}:
feature[key] = tf.train.Feature(
int64_list=tf.train.Int64List(value=[value]))
elif data_type in {'real', 'double', 'decimal'}:
feature[key] = tf.train.Feature(
float_list=tf.train.FloatList(value=[value]))
elif data_type in {'varchar', 'char'}:
feature[key] = tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.compat.as_bytes(value)]))
elif data_type in {'timestamp'}:
value = int(datetime.datetime.fromisoformat(value).timestamp())
feature[key] = tf.train.Feature(
int64_list=tf.train.Int64List(value=[value]))
else:
# TODO(b/140266796): support more types
# https://prestodb.github.io/docs/current/language/types
raise RuntimeError(
'Presto column type {} is not supported.'.format(data_type))
return tf.train.Example(features=tf.train.Features(feature=feature))
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(tf.train.Example)
def _PrestoToExample( # pylint: disable=invalid-name
pipeline: beam.Pipeline,
exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection:
"""Read from Presto and transform to TF examples.
Args:
pipeline: beam pipeline.
exec_properties: A dict of execution properties.
split_pattern: Split.pattern in Input config, README.ml-pipelines-sdk.md Presto sql string.
Returns:
PCollection of TF examples.
"""
conn_config = example_gen_pb2.CustomConfig()
proto_utils.json_to_proto(exec_properties['custom_config'], conn_config)
presto_config = presto_config_pb2.PrestoConnConfig()
conn_config.custom_config.Unpack(presto_config)
client = _deserialize_conn_config(presto_config)
return (pipeline
| 'Query' >> beam.Create([split_pattern])
| 'QueryTable' >> beam.ParDo(_ReadPrestoDoFn(client))
| 'ToTFExample' >> beam.Map(_row_to_example))
class Executor(base_example_gen_executor.BaseExampleGenExecutor):
"""Generic TFX PrestoExampleGen executor."""
def GetInputSourceToExamplePTransform(self) -> beam.PTransform:
"""Returns PTransform for Presto to TF examples."""
return _PrestoToExample | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/custom_components/presto_example_gen/presto_component/executor.py | 0.801936 | 0.240842 | executor.py | pypi |
"""Chicago taxi example using TFX."""
import os
from typing import Text
import absl
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import ModelValidator
from tfx.components import Pusher
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.examples.custom_components.presto_example_gen.presto_component.component import PrestoExampleGen
from tfx.examples.custom_components.presto_example_gen.proto import presto_config_pb2
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import evaluator_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
_pipeline_name = 'chicago_taxi_presto'
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
# Presto configuration that corresponds with tutorial in README.md
_presto_config = presto_config_pb2.PrestoConnConfig(
host='localhost', port=8080, user='user', catalog='hive', schema='default')
# The query that extracts the Chicago taxi data examples from Presto, following
# setup as described in the README.md
_query = 'SELECT * FROM chicago_taxi_trips_parquet'
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_taxi_root, 'taxi_utils.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
def _create_pipeline(pipeline_name: Text, pipeline_root: Text,
module_file: Text,
presto_config: presto_config_pb2.PrestoConnConfig,
query: Text, serving_model_dir: Text,
metadata_path: Text) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data
example_gen = PrestoExampleGen(presto_config, query=query)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(statistics=statistics_gen.outputs['statistics'])
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that implements README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Uses TFMA to compute README.ml-pipelines-sdk.md evaluation statistics over features of README.ml-pipelines-sdk.md model.
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[
evaluator_pb2.SingleSlicingSpec(
column_for_slicing=['trip_start_hour'])
]))
# Performs quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
model_validator = ModelValidator(
examples=example_gen.outputs['examples'], model=trainer.outputs['model'])
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=model_validator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen, statistics_gen, schema_gen, example_validator, transform,
trainer, evaluator, model_validator, pusher
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
)
# To run this pipeline from the python CLI:
# $python taxi_pipeline_presto.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
presto_config=_presto_config,
query=_query,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path)) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/custom_components/presto_example_gen/example/taxi_pipeline_presto.py | 0.867457 | 0.393997 | taxi_pipeline_presto.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import ImportExampleGen
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.components.base import executor_spec
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import example_gen_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
_pipeline_name = 'cifar10_native_keras'
# This example assumes that CIFAR10 train set data is stored in
# ~/cifar10/data/train, test set data is stored in ~/cifar10/data/test, and
# the utility function is in ~/cifar10. Feel free to customize as needed.
_cifar10_root = os.path.join(os.environ['HOME'], 'cifar10')
_data_root = os.path.join(_cifar10_root, 'data')
# Python module files to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_cifar10_root, 'cifar10_utils_native_keras.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir_lite = os.path.join(_cifar10_root, 'serving_model_lite',
_pipeline_name)
# Directory and data locations. This example assumes all of the images,
# example code, and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Path to labels file for mapping model outputs.
_labels_path = os.path.join(_data_root, 'labels.txt')
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir_lite: Text,
metadata_path: Text,
labels_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the CIFAR10 image classification pipeline using TFX."""
# This is needed for datasets with pre-defined splits
# Change the pattern argument to train_whole/* and test_whole/* to train
# on the whole CIFAR-10 dataset
input_config = example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(name='train', pattern='train/*'),
example_gen_pb2.Input.Split(name='eval', pattern='test/*')
])
# Brings data into the pipeline.
example_gen = ImportExampleGen(
input_base=data_root, input_config=input_config)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that trains README.ml-pipelines-sdk.md model.
# When traning on the whole dataset, use 18744 for train steps, 156 for eval
# steps. 18744 train steps correspond to 24 epochs on the whole train set, and
# 156 eval steps correspond to 1 epoch on the whole test set. The
# configuration below is for training on the dataset we provided in the data
# folder, which has 128 train and 128 test samples. The 160 train steps
# correspond to 40 epochs on this tiny train set, and 4 eval steps correspond
# to 1 epoch on this tiny test set.
trainer = Trainer(
module_file=module_file,
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
train_args=trainer_pb2.TrainArgs(num_steps=160),
eval_args=trainer_pb2.EvalArgs(num_steps=4),
custom_config={'labels_path': labels_path})
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compare to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key='label_xf', model_type='tf_lite')],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
class_name='SparseCategoricalAccuracy',
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.55}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-3})))
])
])
# Uses TFMA to compute the evaluation statistics over features of README.ml-pipelines-sdk.md model.
# We evaluate using the materialized examples that are output by Transform
# because
# 1. the decoding_png function currently performed within Transform are not
# compatible with TFLite.
# 2. MLKit requires deserialized (float32) tensor image inputs
# Note that for deployment, the same logic that is performed within Transform
# must be reproduced client-side.
evaluator = Evaluator(
examples=transform.outputs['transformed_examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir_lite)))
components = [
example_gen, statistics_gen, schema_gen, example_validator, transform,
trainer, model_resolver, evaluator, pusher
]
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=components,
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args)
# To run this pipeline from the python CLI:
# $python cifar_pipeline_native_keras.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir_lite=_serving_model_dir_lite,
metadata_path=_metadata_path,
labels_path=_labels_path,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/cifar10/cifar10_pipeline_native_keras.py | 0.856122 | 0.305633 | cifar10_pipeline_native_keras.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import List, Text
import absl
import tensorflow as tf
import tensorflow_transform as tft
from tfx.components.trainer.fn_args_utils import FnArgs
from tfx.components.trainer.rewriting import converters
from tfx.components.trainer.rewriting import rewriter
from tfx.components.trainer.rewriting import rewriter_factory
from tfx.dsl.io import fileio
import flatbuffers
from tflite_support import metadata_schema_py_generated as _metadata_fb
from tflite_support import metadata as _metadata
# When training on the whole dataset use following constants instead.
# This setting should give ~91% accuracy on the whole test set
# _TRAIN_DATA_SIZE = 50000
# _EVAL_DATA_SIZE = 10000
# _TRAIN_BATCH_SIZE = 64
# _EVAL_BATCH_SIZE = 64
# _CLASSIFIER_LEARNING_RATE = 3e-4
# _FINETUNE_LEARNING_RATE = 5e-5
# _CLASSIFIER_EPOCHS = 12
_TRAIN_DATA_SIZE = 128
_EVAL_DATA_SIZE = 128
_TRAIN_BATCH_SIZE = 32
_EVAL_BATCH_SIZE = 32
_CLASSIFIER_LEARNING_RATE = 1e-3
_FINETUNE_LEARNING_RATE = 7e-6
_CLASSIFIER_EPOCHS = 30
_IMAGE_KEY = 'image'
_LABEL_KEY = 'label'
_TFLITE_MODEL_NAME = 'tflite'
def _transformed_name(key):
return key + '_xf'
def _gzip_reader_fn(filenames):
"""Small utility returning README.ml-pipelines-sdk.md record reader that can read gzip'ed files."""
return tf.data.TFRecordDataset(filenames, compression_type='GZIP')
def _get_serve_image_fn(model):
"""Returns README.ml-pipelines-sdk.md function that feeds the input tensor into the model."""
@tf.function
def serve_image_fn(image_tensor):
"""Returns the output to be used in the serving signature.
Args:
image_tensor: A tensor represeting input image. The image should have 3
channels.
Returns:
The model's predicton on input image tensor
"""
return model(image_tensor)
return serve_image_fn
def _image_augmentation(image_features):
"""Perform image augmentation on batches of images .
Args:
image_features: README.ml-pipelines-sdk.md batch of image features
Returns:
The augmented image features
"""
batch_size = tf.shape(image_features)[0]
image_features = tf.image.random_flip_left_right(image_features)
image_features = tf.image.resize_with_crop_or_pad(image_features, 250, 250)
image_features = tf.image.random_crop(image_features,
(batch_size, 224, 224, 3))
return image_features
def _data_augmentation(feature_dict):
"""Perform data augmentation on batches of data.
Args:
feature_dict: README.ml-pipelines-sdk.md dict containing features of samples
Returns:
The feature dict with augmented features
"""
image_features = feature_dict[_transformed_name(_IMAGE_KEY)]
image_features = _image_augmentation(image_features)
feature_dict[_transformed_name(_IMAGE_KEY)] = image_features
return feature_dict
def _input_fn(file_pattern: List[Text],
tf_transform_output: tft.TFTransformOutput,
is_train: bool = False,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
tf_transform_output: A TFTransformOutput.
is_train: Whether the input dataset is train split or not.
batch_size: representing the number of consecutive elements of returned
dataset to combine in README.ml-pipelines-sdk.md single batch
Returns:
A dataset that contains (features, indices) tuple where features is README.ml-pipelines-sdk.md
dictionary of Tensors, and indices is README.ml-pipelines-sdk.md single Tensor of label indices.
"""
transformed_feature_spec = (
tf_transform_output.transformed_feature_spec().copy())
dataset = tf.data.experimental.make_batched_features_dataset(
file_pattern=file_pattern,
batch_size=batch_size,
features=transformed_feature_spec,
reader=_gzip_reader_fn,
label_key=_transformed_name(_LABEL_KEY))
# Apply data augmentation. We have to do data augmentation here because
# we need to apply data agumentation on-the-fly during training. If we put
# it in Transform, it will only be applied once on the whole dataset, which
# will lose the point of data augmentation.
if is_train:
dataset = dataset.map(lambda x, y: (_data_augmentation(x), y))
return dataset
def _freeze_model_by_percentage(model: tf.keras.Model, percentage: float):
"""Freeze part of the model based on specified percentage.
Args:
model: The keras model need to be partially frozen
percentage: the percentage of layers to freeze
Raises:
ValueError: Invalid values.
"""
if percentage < 0 or percentage > 1:
raise ValueError('Freeze percentage should between 0.0 and 1.0')
if not model.trainable:
raise ValueError(
'The model is not trainable, please set model.trainable to True')
num_layers = len(model.layers)
num_layers_to_freeze = int(num_layers * percentage)
for idx, layer in enumerate(model.layers):
if idx < num_layers_to_freeze:
layer.trainable = False
else:
layer.trainable = True
def _build_keras_model() -> tf.keras.Model:
"""Creates README.ml-pipelines-sdk.md Image classification model with MobileNet backbone.
Returns:
The image classifcation Keras Model and the backbone MobileNet model
"""
# We create README.ml-pipelines-sdk.md MobileNet model with weights pre-trained on ImageNet.
# We remove the top classification layer of the MobileNet, which was
# used for classifying ImageNet objects. We will add our own classification
# layer for CIFAR10 later. We use average pooling at the last convolution
# layer to get README.ml-pipelines-sdk.md 1D vector for classifcation, which is consistent with the
# origin MobileNet setup
base_model = tf.keras.applications.MobileNet(
input_shape=(224, 224, 3),
include_top=False,
weights='imagenet',
pooling='avg')
base_model.input_spec = None
# We add README.ml-pipelines-sdk.md Dropout layer at the top of MobileNet backbone we just created to
# prevent overfiting, and then README.ml-pipelines-sdk.md Dense layer to classifying CIFAR10 objects
model = tf.keras.Sequential([
tf.keras.layers.InputLayer(
input_shape=(224, 224, 3), name=_transformed_name(_IMAGE_KEY)),
base_model,
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(10, activation='softmax')
])
# Freeze the whole MobileNet backbone to first train the top classifer only
_freeze_model_by_percentage(base_model, 1.0)
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.RMSprop(lr=_CLASSIFIER_LEARNING_RATE),
metrics=['sparse_categorical_accuracy'])
model.summary(print_fn=absl.logging.info)
return model, base_model
# TFX Transform will call this function.
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
# tf.io.decode_png function cannot be applied on README.ml-pipelines-sdk.md batch of data.
# We have to use tf.map_fn
image_features = tf.map_fn(
lambda x: tf.io.decode_png(x[0], channels=3),
inputs[_IMAGE_KEY],
dtype=tf.uint8)
# image_features = tf.cast(image_features, tf.float32)
image_features = tf.image.resize(image_features, [224, 224])
image_features = tf.keras.applications.mobilenet.preprocess_input(
image_features)
outputs[_transformed_name(_IMAGE_KEY)] = image_features
# TODO(b/157064428): Support label transformation for Keras.
# Do not apply label transformation as it will result in wrong evaluation.
outputs[_transformed_name(_LABEL_KEY)] = inputs[_LABEL_KEY]
return outputs
def _write_metadata(model_path: Text, label_map_path: Text, mean: List[float],
std: List[float]):
"""Add normalization option and label map TFLite metadata to the model.
Args:
model_path: The path of the TFLite model
label_map_path: The path of the label map file
mean: The mean value used to normalize input image tensor
std: The standard deviation used to normalize input image tensor
"""
# Creates flatbuffer for model information.
model_meta = _metadata_fb.ModelMetadataT()
# Creates flatbuffer for model input metadata.
# Here we add the input normalization info to input metadata.
input_meta = _metadata_fb.TensorMetadataT()
input_normalization = _metadata_fb.ProcessUnitT()
input_normalization.optionsType = (
_metadata_fb.ProcessUnitOptions.NormalizationOptions)
input_normalization.options = _metadata_fb.NormalizationOptionsT()
input_normalization.options.mean = mean
input_normalization.options.std = std
input_meta.processUnits = [input_normalization]
# Creates flatbuffer for model output metadata.
# Here we add label file to output metadata.
output_meta = _metadata_fb.TensorMetadataT()
label_file = _metadata_fb.AssociatedFileT()
label_file.name = os.path.basename(label_map_path)
label_file.type = _metadata_fb.AssociatedFileType.TENSOR_AXIS_LABELS
output_meta.associatedFiles = [label_file]
# Creates subgraph to contain input and output information,
# and add subgraph to the model information.
subgraph = _metadata_fb.SubGraphMetadataT()
subgraph.inputTensorMetadata = [input_meta]
subgraph.outputTensorMetadata = [output_meta]
model_meta.subgraphMetadata = [subgraph]
# Serialize the model metadata buffer we created above using flatbuffer
# builder.
b = flatbuffers.Builder(0)
b.Finish(
model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
metadata_buf = b.Output()
# Populates metadata and label file to the model file.
populator = _metadata.MetadataPopulator.with_model_file(model_path)
populator.load_metadata_buffer(metadata_buf)
populator.load_associated_files([label_map_path])
populator.populate()
# TFX Trainer will call this function.
def run_fn(fn_args: FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
Raises:
ValueError: if invalid inputs.
"""
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(
fn_args.train_files,
tf_transform_output,
is_train=True,
batch_size=_TRAIN_BATCH_SIZE)
eval_dataset = _input_fn(
fn_args.eval_files,
tf_transform_output,
is_train=False,
batch_size=_EVAL_BATCH_SIZE)
model, base_model = _build_keras_model()
absl.logging.info('Tensorboard logging to {}'.format(fn_args.model_run_dir))
# Write logs to path
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='batch')
# Our training regime has two phases: we first freeze the backbone and train
# the newly added classifier only, then unfreeze part of the backbone and
# fine-tune with classifier jointly.
steps_per_epoch = int(_TRAIN_DATA_SIZE / _TRAIN_BATCH_SIZE)
total_epochs = int(fn_args.train_steps / steps_per_epoch)
if _CLASSIFIER_EPOCHS > total_epochs:
raise ValueError('Classifier epochs is greater than the total epochs')
absl.logging.info('Start training the top classifier')
model.fit(
train_dataset,
epochs=_CLASSIFIER_EPOCHS,
steps_per_epoch=steps_per_epoch,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
absl.logging.info('Start fine-tuning the model')
# Unfreeze the top MobileNet layers and do joint fine-tuning
_freeze_model_by_percentage(base_model, 0.9)
# We need to recompile the model because layer properties have changed
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.RMSprop(lr=_FINETUNE_LEARNING_RATE),
metrics=['sparse_categorical_accuracy'])
model.summary(print_fn=absl.logging.info)
model.fit(
train_dataset,
initial_epoch=_CLASSIFIER_EPOCHS,
epochs=total_epochs,
steps_per_epoch=steps_per_epoch,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
# Prepare the TFLite model used for serving in MLKit
signatures = {
'serving_default':
_get_serve_image_fn(model).get_concrete_function(
tf.TensorSpec(
shape=[None, 224, 224, 3],
dtype=tf.float32,
name=_transformed_name(_IMAGE_KEY)))
}
temp_saving_model_dir = os.path.join(fn_args.serving_model_dir, 'temp')
model.save(temp_saving_model_dir, save_format='tf', signatures=signatures)
tfrw = rewriter_factory.create_rewriter(
rewriter_factory.TFLITE_REWRITER,
name='tflite_rewriter')
converters.rewrite_saved_model(temp_saving_model_dir,
fn_args.serving_model_dir, tfrw,
rewriter.ModelType.TFLITE_MODEL)
# Add necessary TFLite metadata to the model in order to use it within MLKit
# TODO(dzats@): Handle label map file path more properly, currently
# hard-coded.
tflite_model_path = os.path.join(fn_args.serving_model_dir,
_TFLITE_MODEL_NAME)
# TODO(dzats@): Extend the TFLite rewriter to be able to add TFLite metadata
#@ to the model.
_write_metadata(
model_path=tflite_model_path,
label_map_path=fn_args.custom_config['labels_path'],
mean=[127.5],
std=[127.5])
fileio.rmtree(temp_saving_model_dir) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/cifar10/cifar10_utils_native_keras.py | 0.929047 | 0.303383 | cifar10_utils_native_keras.py | pypi |
"""Chicago taxi example using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from typing import List, Text
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.components.base import executor_spec
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.airflow.airflow_dag_runner import AirflowDagRunner
from tfx.orchestration.airflow.airflow_dag_runner import AirflowPipelineConfig
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
_pipeline_name = 'taxi_solution'
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'airflow')
_data_root = os.path.join(_taxi_root, 'data', 'taxi_data')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_taxi_root, 'dags', 'taxi_utils_solution.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(_taxi_root, 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
# Airflow-specific configs; these will be passed directly to airflow
_airflow_config = {
'schedule_interval': None,
'start_date': datetime.datetime(2019, 1, 1),
}
# TODO(b/137289334): rename this as simple after DAG visualization is done.
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=data_root)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
infer_schema = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
# Performs anomaly detection based on statistics and data schema.
validate_stats = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=infer_schema.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=infer_schema.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that implements README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=module_file,
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=infer_schema.outputs['schema'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute README.ml-pipelines-sdk.md evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key='tips')],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
class_name='BinaryAccuracy',
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10})))
])
])
model_analyzer = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
# Change threshold will be ignored if there is no baseline (first run).
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=model_analyzer.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
statistics_gen,
infer_schema,
validate_stats,
transform,
trainer,
model_resolver,
model_analyzer,
pusher,
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args)
# 'DAG' below need to be kept for Airflow to detect dag.
DAG = AirflowDagRunner(AirflowPipelineConfig(_airflow_config)).run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/airflow_workshop/setup/dags/taxi_pipeline_solution.py | 0.814864 | 0.290396 | taxi_pipeline_solution.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Text
import absl
import tensorflow as tf
import tensorflow_transform as tft
from tfx.components.trainer.executor import TrainerFnArgs
# Categorical features are assumed to each have README.ml-pipelines-sdk.md maximum value in the dataset.
_MAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12]
_CATEGORICAL_FEATURE_KEYS = [
'trip_start_hour', 'trip_start_day', 'trip_start_month',
'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area',
'dropoff_community_area'
]
_DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds']
# Number of buckets used by tf.transform for encoding each feature.
_FEATURE_BUCKET_COUNT = 10
_BUCKET_FEATURE_KEYS = [
'pickup_latitude', 'pickup_longitude', 'dropoff_latitude',
'dropoff_longitude'
]
# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform
_VOCAB_SIZE = 1000
# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.
_OOV_SIZE = 10
_VOCAB_FEATURE_KEYS = [
'payment_type',
'company',
]
# Keys
_LABEL_KEY = 'tips'
_FARE_KEY = 'fare'
def _transformed_name(key):
return key + '_xf'
def _transformed_names(keys):
return [_transformed_name(key) for key in keys]
def _gzip_reader_fn(filenames):
"""Small utility returning README.ml-pipelines-sdk.md record reader that can read gzip'ed files."""
return tf.data.TFRecordDataset(
filenames,
compression_type='GZIP')
def _fill_in_missing(x):
"""Replace missing values in README.ml-pipelines-sdk.md SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to README.ml-pipelines-sdk.md dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
"""
if not isinstance(x, tf.sparse.SparseTensor):
return x
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
def _get_serve_tf_examples_fn(model, tf_transform_output):
"""Returns README.ml-pipelines-sdk.md function that parses README.ml-pipelines-sdk.md serialized tf.Example and applies TFT."""
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
feature_spec.pop(_LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)
transformed_features = model.tft_layer(parsed_features)
return model(transformed_features)
return serve_tf_examples_fn
def _input_fn(file_pattern: List[Text],
tf_transform_output: tft.TFTransformOutput,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in README.ml-pipelines-sdk.md single batch
Returns:
A dataset that contains (features, indices) tuple where features is README.ml-pipelines-sdk.md
dictionary of Tensors, and indices is README.ml-pipelines-sdk.md single Tensor of label indices.
"""
transformed_feature_spec = (
tf_transform_output.transformed_feature_spec().copy())
dataset = tf.data.experimental.make_batched_features_dataset(
file_pattern=file_pattern,
batch_size=batch_size,
features=transformed_feature_spec,
reader=_gzip_reader_fn,
label_key=_transformed_name(_LABEL_KEY))
return dataset
def _build_keras_model(hidden_units: List[int] = None) -> tf.keras.Model:
"""Creates README.ml-pipelines-sdk.md DNN Keras model for classifying taxi data.
Args:
hidden_units: [int], the layer sizes of the DNN (input layer first).
Returns:
A keras Model.
"""
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)
]
categorical_columns = [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0)
for key in _transformed_names(_VOCAB_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0)
for key in _transformed_names(_BUCKET_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
_transformed_names(_CATEGORICAL_FEATURE_KEYS),
_MAX_CATEGORICAL_FEATURE_VALUES)
]
indicator_column = [
tf.feature_column.indicator_column(categorical_column)
for categorical_column in categorical_columns
]
model = _wide_and_deep_classifier(
# TODO(b/139668410) replace with premade wide_and_deep keras model
wide_columns=indicator_column,
deep_columns=real_valued_columns,
dnn_hidden_units=hidden_units or [100, 70, 50, 25])
return model
def _wide_and_deep_classifier(wide_columns, deep_columns, dnn_hidden_units):
"""Build README.ml-pipelines-sdk.md simple keras wide and deep model.
Args:
wide_columns: Feature columns wrapped in indicator_column for wide (linear)
part of the model.
deep_columns: Feature columns for deep part of the model.
dnn_hidden_units: [int], the layer sizes of the hidden DNN.
Returns:
A Wide and Deep Keras model
"""
# Following values are hard coded for simplicity in this example,
# However prefarably they should be passsed in as hparams.
# Keras needs the feature definitions at compile time.
# TODO(b/139081439): Automate generation of input layers from FeatureColumn.
input_layers = {
colname: tf.keras.layers.Input(name=colname, shape=(), dtype=tf.float32)
for colname in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)
}
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in _transformed_names(_VOCAB_FEATURE_KEYS)
})
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in _transformed_names(_BUCKET_FEATURE_KEYS)
})
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in _transformed_names(_CATEGORICAL_FEATURE_KEYS)
})
# TODO(b/161952382): Replace with Keras premade models and
# Keras preprocessing layers.
deep = tf.keras.layers.DenseFeatures(deep_columns)(input_layers)
for numnodes in dnn_hidden_units:
deep = tf.keras.layers.Dense(numnodes)(deep)
wide = tf.keras.layers.DenseFeatures(wide_columns)(input_layers)
output = tf.keras.layers.Dense(
1, activation='sigmoid')(
tf.keras.layers.concatenate([deep, wide]))
model = tf.keras.Model(input_layers, output)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr=0.001),
metrics=[tf.keras.metrics.BinaryAccuracy()])
model.summary(print_fn=absl.logging.info)
return model
# TFX Transform will call this function.
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in _DENSE_FLOAT_FEATURE_KEYS:
# Preserve this feature as README.ml-pipelines-sdk.md dense float, setting nan's to the mean.
outputs[_transformed_name(key)] = tft.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in _VOCAB_FEATURE_KEYS:
# Build README.ml-pipelines-sdk.md vocabulary for this feature.
outputs[_transformed_name(key)] = tft.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=_VOCAB_SIZE,
num_oov_buckets=_OOV_SIZE)
for key in _BUCKET_FEATURE_KEYS:
outputs[_transformed_name(key)] = tft.bucketize(
_fill_in_missing(inputs[key]),
_FEATURE_BUCKET_COUNT)
for key in _CATEGORICAL_FEATURE_KEYS:
outputs[_transformed_name(key)] = _fill_in_missing(inputs[key])
# Was this passenger README.ml-pipelines-sdk.md big tipper?
taxi_fare = _fill_in_missing(inputs[_FARE_KEY])
tips = _fill_in_missing(inputs[_LABEL_KEY])
outputs[_transformed_name(_LABEL_KEY)] = tf.where(
tf.math.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))
return outputs
# TFX Trainer will call this function.
def run_fn(fn_args: TrainerFnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
# Number of nodes in the first layer of the DNN
first_dnn_layer_size = 100
num_dnn_layers = 4
dnn_decay_factor = 0.7
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(fn_args.train_files, tf_transform_output, 40)
eval_dataset = _input_fn(fn_args.eval_files, tf_transform_output, 40)
# If no GPUs are found, CPU is used.
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = _build_keras_model(
# Construct layers sizes with exponetial decay
hidden_units=[
max(2, int(first_dnn_layer_size * dnn_decay_factor**i))
for i in range(num_dnn_layers)
])
# Write logs to path
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='batch')
model.fit(
train_dataset,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
signatures = {
'serving_default':
_get_serve_tf_examples_fn(model,
tf_transform_output).get_concrete_function(
tf.TensorSpec(
shape=[None],
dtype=tf.string,
name='examples')),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/airflow_workshop/setup/dags/taxi_utils_solution.py | 0.883519 | 0.440289 | taxi_utils_solution.py | pypi |
"""Utils to query README.ml-pipelines-sdk.md TFX pipeline's ml-metadata store in README.ml-pipelines-sdk.md notebook."""
import os
import time
import papermill as pm
import tensorflow_data_validation as tfdv
import tensorflow_model_analysis as tfma
import utils
from ml_metadata.metadata_store import metadata_store
from ml_metadata.proto import metadata_store_pb2
class TFXArtifactTypes(object):
"""Constants for different TFX artifact type names."""
EXAMPLES = 'Examples'
SCHEMA = 'Schema'
EXAMPLE_STATS = 'ExampleStatistics'
EXAMPLE_VALIDATION = 'ExampleAnomalies'
TRANSFORMED_EXAMPLES = 'TransformGraph'
MODEL = 'Model'
MODEL_EVAL = 'ModelEvaluation'
class TFXExecutionTypes(object):
"""Constants for different TFX execution type names."""
EXAMPLE_GEN = 'tfx.components.example_gen.csv_example_gen.component.CsvExampleGen'
STATISTICS_GEN = 'tfx.components.statistics_gen.component.StatisticsGen'
SCHEMA_GEN = 'tfx.components.schema_gen.component.SchemaGen'
EXAMPLE_VALIDATION = 'tfx.components.example_validator.component.ExampleValidator'
TRANSFORM = 'tfx.components.transform.component.Transform'
TRAINER = 'tfx.components.trainer.component.Trainer'
EVALUATOR = 'tfx.components.evaluator.component.Evaluator'
class TFXReadonlyMetadataStore(utils.ReadonlyMetadataStore):
"""A TFX ml-metadata store that provides read-only methods for notebooks."""
@staticmethod
def from_sqlite_db(filename_uri):
"""Returns README.ml-pipelines-sdk.md `TFXReadonlyMetadataStore` based off README.ml-pipelines-sdk.md SQLITE db uri.
Args:
filename_uri: A `str` indicating the path to the SQLITE db.
Returns:
A `TFXReadonlyMetadataStore` based off README.ml-pipelines-sdk.md SQLITE db uri.
"""
c = metadata_store_pb2.ConnectionConfig()
c.sqlite.filename_uri = filename_uri
return TFXReadonlyMetadataStore(metadata_store.MetadataStore(c))
def display_tfma_analysis(self, model_id, slicing_column=None):
"""Displays TFMA metrics for `model_id` sliced by `slicing_column`.
Args:
model_id: A `int` indicating the id of README.ml-pipelines-sdk.md `TFXArtifactTypes.MODEL` artifact
slicing_column: (Optional) A `str` indicating the slicing column for the
TFMA metrics.
Returns:
A SlicingMetricsViewer object if in Jupyter notebook; None if in Colab.
"""
tfma_artifact = self.get_dest_artifact_of_type(model_id,
TFXArtifactTypes.MODEL_EVAL)
if tfma_artifact:
return tfma.view.render_slicing_metrics(
tfma.load_eval_result(tfma_artifact.uri),
slicing_column=slicing_column)
def compare_tfma_analysis(self, model_id, other_model_id):
"""Compares TFMA metrics for `model_id` and `other_model_id`.
Args:
model_id: A `int` indicating the id of README.ml-pipelines-sdk.md `TFXArtifactTypes.MODEL` artifact
other_model_id: A `int` indicating the id of another
`TFXArtifactTypes.MODEL` artifact.
Returns:
A TimeSeriesViewer object if in Jupyter notebook; None if in Colab.
"""
tfma_artifact, other_tfma_artifact = (self.get_dest_artifact_of_type(
model_id, TFXArtifactTypes.MODEL_EVAL),
self.get_dest_artifact_of_type(
other_model_id,
TFXArtifactTypes.MODEL_EVAL))
if tfma_artifact and other_tfma_artifact:
eval_results = tfma.make_eval_results([
tfma.load_eval_result(tfma_artifact.uri),
tfma.load_eval_result(other_tfma_artifact.uri)
], tfma.constants.MODEL_CENTRIC_MODE)
return tfma.view.render_time_series(eval_results,
tfma.slicer.slicer.SingleSliceSpec())
def display_stats_for_examples(self, examples_id, split='train'):
"""Displays stats for `examples_id`.
Args:
examples_id: A `int` indicating the id of README.ml-pipelines-sdk.md `TFXArtifactTypes.EXAMPLES`
artifact.
split: A `string` specifying the split name, by default 'train' is used.
"""
stats_artifact = self.get_dest_artifact_of_type(
examples_id, TFXArtifactTypes.EXAMPLE_STATS)
if stats_artifact:
tfdv.visualize_statistics(
tfdv.load_statistics(
os.path.join(stats_artifact.uri, split, 'stats_tfrecord')))
def compare_stats_for_examples(self,
examples_id,
other_examples_id,
name='',
other_name=''):
"""Compares stats for `examples_id` and `other_examples_id`.
Args:
examples_id: A `int` indicating the id of one `TFXArtifactTypes.EXAMPLES`
artifact.
other_examples_id: A `int` indicating the id of another
`TFXArtifactTypes.EXAMPLES` artifact.
name: (Optional) A `str` indicating the label to use for stats of
`examples_id`.
other_name: (Optional) A `str` indicating the label to use for stats of
`other_examples_id`.
"""
stats_artifact, other_stats_artifact = (self.get_dest_artifact_of_type(
examples_id, TFXArtifactTypes.EXAMPLE_STATS),
self.get_dest_artifact_of_type(
other_examples_id,
TFXArtifactTypes.EXAMPLE_STATS))
if stats_artifact and other_stats_artifact:
tfdv.visualize_statistics(
tfdv.load_statistics(stats_artifact.uri),
rhs_statistics=tfdv.load_statistics(other_stats_artifact.uri),
lhs_name=name,
rhs_name=other_name)
def display_examples_stats_for_model(self, model_id):
"""Displays stats for examples used to train `model_id`."""
examples_artifact = self.get_source_artifact_of_type(
model_id, TFXArtifactTypes.EXAMPLES)
if examples_artifact:
self.display_stats_for_examples(examples_artifact.id)
def compare_examples_stats_for_models(self, model_id, other_model_id):
"""Compares stats for examples to train `model_id` & `other_model_id`."""
examples_artifact, other_examples_artifact = (
self.get_source_artifact_of_type(model_id, TFXArtifactTypes.EXAMPLES),
self.get_source_artifact_of_type(other_model_id,
TFXArtifactTypes.EXAMPLES))
if examples_artifact and other_examples_artifact:
self.compare_stats_for_examples(
examples_artifact.id,
other_examples_artifact.id,
name='model_' + str(model_id),
other_name='model_' + str(other_model_id))
def display_tensorboard(self, model_id, *other_model_ids):
"""Returns README.ml-pipelines-sdk.md Tensorboard link for `model_id` and `other_model_ids`.
Args:
model_id: A `int` indicating the id of README.ml-pipelines-sdk.md `TFXArtifactTypes.MODEL`
artifact.
*other_model_ids: (Optional) A list of `int` indicating the ids of other
`TFXArtifactTypes.MODEL` artifacts to also include in the Tensorboard
invocation for comparison.
"""
model_ids = [model_id] + list(other_model_ids)
model_artifacts = self.metadata_store.get_artifacts_by_id(model_ids)
model_ids_str = '-'.join([str(m) for m in model_ids])
log_file = os.path.join(
os.environ['HOME'],
'tensorboard_model_{}_log.txt'.format(model_ids_str),
)
output_notebook_path = os.path.join(
os.environ['HOME'],
'spawn_tensorboard_{}_output.ipynb'.format(model_ids_str),
)
tensorboard_logdir = ','.join(
['model_{}:{}'.format(m.id, m.uri) for m in model_artifacts])
pm.execute_notebook(
'spawn_tensorboard.ipynb',
output_notebook_path,
parameters=dict(tb_logdir=tensorboard_logdir, tb_run_log=log_file),
progress_bar=False)
time.sleep(5) # Give it some time for log_filename to be flushed.
with open(log_file) as f:
for l in f:
if 'TensorBoard' in l:
# "TensorBoard 1.12.2 at http://... (Press CTRL+C to quit)"
return l.split(' ')[3] | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/airflow_workshop/notebooks/tfx_utils.py | 0.936829 | 0.362433 | tfx_utils.py | pypi |
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import Evaluator
from tfx.components import ImportExampleGen
from tfx.components import Pusher
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.experimental.data_view import binder_component
from tfx.components.experimental.data_view import provider_component
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.components.base import executor_spec
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import example_gen_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.utils.dsl_utils import external_input
_pipeline_name = 'tf_ranking_antique'
# This example assumes that the training data is stored in
# ~/tf_ranking_antique/data
# and the module file is in ~/tf_ranking_antique. Feel free to customize this
# as needed.
_ranking_root = os.path.join(os.environ['HOME'], 'tf_ranking_antique')
_data_root = os.path.join(_ranking_root, 'data')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_ranking_root, 'taxi_utils_native_keras.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(
_ranking_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all the example code and
# metadata library is relative to $HOME, but you can store these files anywhere
# on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text, beam_pipeline_args: List[Text]):
"""Creates pipeline."""
pipeline_root = os.path.join(pipeline_root, 'pipelines', pipeline_name)
examples = external_input(data_root)
example_gen = ImportExampleGen(
input=examples,
# IMPORTANT: must set FORMAT_PROTO
payload_format=example_gen_pb2.FORMAT_PROTO)
data_view_provider = provider_component.TfGraphDataViewProvider(
module_file=module_file,
create_decoder_func='make_decoder')
data_view_binder = binder_component.DataViewBinder(
example_gen.outputs['examples'],
data_view_provider.outputs['data_view'])
statistics_gen = StatisticsGen(
examples=data_view_binder.outputs['output_examples'])
schema_gen = SchemaGen(statistics=statistics_gen.outputs['statistics'])
transform = Transform(
examples=data_view_binder.outputs['output_examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file,
# important: must disable Transform materialization.
materialize=False)
trainer = Trainer(
examples=data_view_binder.outputs['output_examples'],
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
transform_graph=transform.outputs['transform_graph'],
module_file=module_file,
train_args=trainer_pb2.TrainArgs(num_steps=1000),
schema=schema_gen.outputs['schema'],
eval_args=trainer_pb2.EvalArgs(num_steps=10))
eval_config = tfma.EvalConfig(
model_specs=[
tfma.ModelSpec(
signature_name='',
label_key='relevance',
padding_options=tfma.config.PaddingOptions(
label_float_padding=-1.0, prediction_float_padding=-1.0))
],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['query_tokens']),
],
metrics_specs=[
tfma.MetricsSpec(
per_slice_thresholds={
'metric/ndcg_10':
tfma.config.PerSliceMetricThresholds(thresholds=[
tfma.PerSliceMetricThreshold(
# The overall slice.
slicing_specs=[tfma.SlicingSpec()],
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6})))
])
})
])
evaluator = Evaluator(
examples=data_view_binder.outputs['output_examples'],
model=trainer.outputs['model'],
eval_config=eval_config,
schema=schema_gen.outputs['schema'])
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen, data_view_provider, data_view_binder,
statistics_gen,
schema_gen,
transform,
trainer,
evaluator,
pusher,
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args)
# To run this pipeline from the python CLI:
# $ python ranking_pipeline.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
metadata_path=_metadata_path,
serving_model_dir=_serving_model_dir,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/ranking/ranking_pipeline.py | 0.697609 | 0.21564 | ranking_pipeline.py | pypi |
"""Module file."""
import tensorflow as tf
import tensorflow_ranking as tfr
import tensorflow_transform as tft
from tfx.examples.ranking import features
from tfx.examples.ranking import struct2tensor_parsing_utils
from tfx_bsl.public import tfxio
def make_decoder():
"""Creates README.ml-pipelines-sdk.md data decoder that that decodes ELWC records to tensors.
A DataView (see "TfGraphDataViewProvider" component in the pipeline)
will refer to this decoder. And any components that consumes the data
with the DataView applied will use this decoder.
Returns:
A ELWC decoder.
"""
context_features, example_features, label_feature = features.get_features()
return struct2tensor_parsing_utils.ELWCDecoder(
name='ELWCDecoder',
context_features=context_features,
example_features=example_features,
size_feature_name=features.LIST_SIZE_FEATURE_NAME,
label_feature=label_feature)
def preprocessing_fn(inputs):
"""Transform preprocessing_fn."""
# generate README.ml-pipelines-sdk.md shared vocabulary.
_ = tft.vocabulary(
tf.concat([
inputs[features.QUERY_TOKENS].flat_values,
inputs[features.DOCUMENT_TOKENS].flat_values
],
axis=0),
vocab_filename='shared_vocab')
return inputs
def run_fn(trainer_fn_args):
"""TFX trainer entry point."""
tf_transform_output = tft.TFTransformOutput(trainer_fn_args.transform_output)
hparams = dict(
batch_size=32,
embedding_dimension=20,
learning_rate=0.05,
dropout_rate=0.8,
hidden_layer_dims=[64, 32, 16],
loss='approx_ndcg_loss',
use_batch_norm=True,
batch_norm_moment=0.99
)
train_dataset = _input_fn(trainer_fn_args.train_files,
trainer_fn_args.data_accessor,
hparams['batch_size'])
eval_dataset = _input_fn(trainer_fn_args.eval_files,
trainer_fn_args.data_accessor,
hparams['batch_size'])
model = _create_ranking_model(tf_transform_output, hparams)
model.summary()
log_dir = trainer_fn_args.model_run_dir
# Write logs to path
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=log_dir, update_freq='batch')
model.fit(
train_dataset,
steps_per_epoch=trainer_fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=trainer_fn_args.eval_steps,
callbacks=[tensorboard_callback])
# TODO(zhuo): Add support for Regress signature.
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)],
autograph=False)
def predict_serving_fn(serialized_elwc_records):
decoder = make_decoder()
decoded = decoder.decode_record(serialized_elwc_records)
decoded.pop(features.LABEL)
return {tf.saved_model.PREDICT_OUTPUTS: model(decoded)}
model.save(
trainer_fn_args.serving_model_dir,
save_format='tf',
signatures={
'serving_default':
predict_serving_fn.get_concrete_function(),
})
def _input_fn(file_patterns,
data_accessor,
batch_size) -> tf.data.Dataset:
"""Returns README.ml-pipelines-sdk.md dataset of decoded tensors."""
def prepare_label(parsed_ragged_tensors):
label = parsed_ragged_tensors.pop(features.LABEL)
# Convert labels to README.ml-pipelines-sdk.md dense tensor.
label = label.to_tensor(default_value=features.LABEL_PADDING_VALUE)
return parsed_ragged_tensors, label
# NOTE: this dataset already contains RaggedTensors from the Decoder.
dataset = data_accessor.tf_dataset_factory(
file_patterns,
tfxio.TensorFlowDatasetOptions(batch_size=batch_size),
schema=None)
return dataset.map(prepare_label).repeat()
def _preprocess_keras_inputs(context_keras_inputs, example_keras_inputs,
tf_transform_output, hparams):
"""Preprocesses the inputs, including vocab lookup and embedding."""
lookup_layer = tf.keras.layers.experimental.preprocessing.StringLookup(
max_tokens=(
tf_transform_output.vocabulary_size_by_name('shared_vocab') + 1),
vocabulary=tf_transform_output.vocabulary_file_by_name('shared_vocab'),
num_oov_indices=1,
oov_token='[UNK#]',
mask_token=None)
embedding_layer = tf.keras.layers.Embedding(
input_dim=(
tf_transform_output.vocabulary_size_by_name('shared_vocab') + 1),
output_dim=hparams['embedding_dimension'],
embeddings_initializer=None,
embeddings_constraint=None)
def embedding(input_tensor):
# TODO(b/158673891): Support weighted features.
embedded_tensor = embedding_layer(lookup_layer(input_tensor))
mean_embedding = tf.reduce_mean(embedded_tensor, axis=-2)
# mean_embedding could be README.ml-pipelines-sdk.md dense tensor (context feature) or README.ml-pipelines-sdk.md ragged
# tensor (example feature). if it's ragged, we densify it first.
if isinstance(mean_embedding.type_spec, tf.RaggedTensorSpec):
return struct2tensor_parsing_utils.make_ragged_densify_layer()(
mean_embedding)
return mean_embedding
preprocessed_context_features, preprocessed_example_features = {}, {}
context_features, example_features, _ = features.get_features()
for feature in context_features:
preprocessed_context_features[feature.name] = embedding(
context_keras_inputs[feature.name])
for feature in example_features:
preprocessed_example_features[feature.name] = embedding(
example_keras_inputs[feature.name])
list_size = struct2tensor_parsing_utils.make_ragged_densify_layer()(
context_keras_inputs[features.LIST_SIZE_FEATURE_NAME])
list_size = tf.reshape(list_size, [-1])
mask = tf.sequence_mask(list_size)
return preprocessed_context_features, preprocessed_example_features, mask
def _create_ranking_model(tf_transform_output, hparams) -> tf.keras.Model:
"""Creates README.ml-pipelines-sdk.md Keras ranking model."""
context_feature_specs, example_feature_specs, _ = features.get_features()
context_keras_inputs, example_keras_inputs = (
struct2tensor_parsing_utils.create_keras_inputs(
context_feature_specs, example_feature_specs,
features.LIST_SIZE_FEATURE_NAME))
context_features, example_features, mask = _preprocess_keras_inputs(
context_keras_inputs, example_keras_inputs, tf_transform_output, hparams)
(flattened_context_features,
flattened_example_features) = tfr.keras.layers.FlattenList()(
context_features, example_features, mask)
# Concatenate flattened context and example features along `list_size` dim.
context_input = [
tf.keras.layers.Flatten()(flattened_context_features[name])
for name in sorted(flattened_context_features)
]
example_input = [
tf.keras.layers.Flatten()(flattened_example_features[name])
for name in sorted(flattened_example_features)
]
input_layer = tf.concat(context_input + example_input, 1)
dnn = tf.keras.Sequential()
if hparams['use_batch_norm']:
dnn.add(
tf.keras.layers.BatchNormalization(
momentum=hparams['batch_norm_moment']))
for layer_size in hparams['hidden_layer_dims']:
dnn.add(tf.keras.layers.Dense(units=layer_size))
if hparams['use_batch_norm']:
dnn.add(tf.keras.layers.BatchNormalization(
momentum=hparams['batch_norm_moment']))
dnn.add(tf.keras.layers.Activation(activation=tf.nn.relu))
dnn.add(tf.keras.layers.Dropout(rate=hparams['dropout_rate']))
dnn.add(tf.keras.layers.Dense(units=1))
logits = tfr.keras.layers.RestoreList()(dnn(input_layer), mask)
model = tf.keras.Model(
inputs={
**context_keras_inputs,
**example_keras_inputs
},
outputs=logits,
name='dnn_ranking_model')
model.compile(
optimizer=tf.keras.optimizers.Adagrad(
learning_rate=hparams['learning_rate']),
loss=tfr.keras.losses.get(hparams['loss']),
metrics=tfr.keras.metrics.default_keras_metrics())
return model | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/ranking/ranking_utils.py | 0.776326 | 0.40342 | ranking_utils.py | pypi |
"""BERT Sentence Pair Classification example on MRPC using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.components.base import executor_spec
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import example_gen_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
_pipeline_name = 'bert_mrpc'
# This example assumes that MRPC data is stored in ~/bert/mrpc/data and the
# utility function is in ~/bert/mrpc. Feel free to customize as needed.
_bert_mrpc_root = os.path.join(os.environ['HOME'], 'bert', 'mrpc')
_data_root = os.path.join(_bert_mrpc_root, 'data')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_bert_mrpc_root, 'bert_mrpc_utils.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_bert_mrpc_root, 'serving_model',
_pipeline_name)
# Directory and data locations. This example assumes all of the
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
# TODO(dzats): Release 0.23 for both tfma and tft address the issue with
# multi-worker. Switch to direct_num_workers=0 at that point.
_beam_pipeline_args = ['--direct_num_workers=1']
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the Bert classication on mrpc dataset pipline with TFX."""
input_config = example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(name='train', pattern='train/*'),
example_gen_pb2.Input.Split(name='eval', pattern='validation/*')
])
# Brings data into the pipline
example_gen = CsvExampleGen(input_base=data_root, input_config=input_config)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that trains README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=module_file,
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
# Adjust these steps when training on the full dataset.
train_args=trainer_pb2.TrainArgs(num_steps=1),
eval_args=trainer_pb2.EvalArgs(num_steps=1))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key='label')],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
class_name='SparseCategoricalAccuracy',
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
# Adjust the threshold when training on the
# full dataset.
lower_bound={'value': 0.5}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-2})))
])
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
components = [
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
trainer,
model_resolver,
evaluator,
pusher,
]
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=components,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
enable_cache=True,
beam_pipeline_args=beam_pipeline_args,
)
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/bert/mrpc/bert_mrpc_pipeline.py | 0.807081 | 0.333395 | bert_mrpc_pipeline.py | pypi |
"""Python source file include mrpc pipeline functions and necessary utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Text
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_transform as tft
from tfx.components.trainer.fn_args_utils import FnArgs
from tfx.examples.bert.utils.bert_models import build_and_compile_bert_classifier
from tfx.examples.bert.utils.bert_tokenizer_utils import BertPreprocessor
_BERT_LINK = 'https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/2'
_EPOCHS = 1
_EVAL_BATCH_SIZE = 32
_FEATURE_KEY_A = 'sentence1'
_FEATURE_KEY_B = 'sentence2'
_LABEL_KEY = 'label'
_MAX_LEN = 128
_TRAIN_BATCH_SIZE = 32
def _gzip_reader_fn(filenames):
"""Small utility returning README.ml-pipelines-sdk.md record reader that can read gzip'ed files."""
return tf.data.TFRecordDataset(filenames, compression_type='GZIP')
def _tokenize(sequence_a, sequence_b):
"""Tokenize the two sentences and insert appropriate tokens."""
processor = BertPreprocessor(_BERT_LINK)
return processor.tokenize_sentence_pair(
tf.reshape(sequence_a, [-1]), tf.reshape(sequence_b, [-1]), _MAX_LEN)
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature Tensors.
"""
input_word_ids, input_mask, segment_ids = _tokenize(inputs[_FEATURE_KEY_A],
inputs[_FEATURE_KEY_B])
return {
'label': inputs['label'],
'input_word_ids': input_word_ids,
'input_mask': input_mask,
'segment_ids': segment_ids
}
def _input_fn(file_pattern: List[Text],
tf_transform_output: tft.TFTransformOutput,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in README.ml-pipelines-sdk.md single batch
Returns:
A dataset that contains (features, indices) tuple where features is README.ml-pipelines-sdk.md
dictionary of Tensors, and indices is README.ml-pipelines-sdk.md single Tensor of label indices.
"""
transformed_feature_spec = (
tf_transform_output.transformed_feature_spec().copy())
dataset = tf.data.experimental.make_batched_features_dataset(
file_pattern=file_pattern,
batch_size=batch_size,
features=transformed_feature_spec,
reader=_gzip_reader_fn,
label_key=_LABEL_KEY)
return dataset.prefetch(tf.data.experimental.AUTOTUNE)
def _get_serve_tf_examples_fn(model, tf_transform_output):
"""Returns README.ml-pipelines-sdk.md function that parses README.ml-pipelines-sdk.md serialized tf.Example."""
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
feature_spec.pop(_LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)
transformed_features = model.tft_layer(parsed_features)
return model(transformed_features)
return serve_tf_examples_fn
# TFX Trainer will call this function.
def run_fn(fn_args: FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(
fn_args.train_files, tf_transform_output, batch_size=_TRAIN_BATCH_SIZE)
eval_dataset = _input_fn(
fn_args.eval_files, tf_transform_output, batch_size=_EVAL_BATCH_SIZE)
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
bert_layer = hub.KerasLayer(_BERT_LINK, trainable=True)
model = build_and_compile_bert_classifier(bert_layer, _MAX_LEN, 2, 2e-5)
model.fit(
train_dataset,
epochs=_EPOCHS,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps)
signatures = {
'serving_default':
_get_serve_tf_examples_fn(model,
tf_transform_output).get_concrete_function(
tf.TensorSpec(
shape=[None],
dtype=tf.string,
name='examples')),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/bert/mrpc/bert_mrpc_utils.py | 0.960473 | 0.376222 | bert_mrpc_utils.py | pypi |
"""Prepressing using tensorflow_text BertTokenizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Text
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_text as text
from tensorflow.python.eager.context import eager_mode # pylint: disable=g-direct-tensorflow-import
_CLS = '[CLS]'
_PAD = '[PAD]'
_SEP = '[SEP]'
class BertPreprocessor(object):
"""Bert Tokenizer built ontop of tensorflow_text.BertTokenizer."""
def __init__(self, model_link: Text):
self._model_link = model_link
self._model = hub.KerasLayer(model_link)
self._find_special_tokens()
def _find_special_tokens(self):
"""Find the special token ID's for [CLS] [PAD] [SEP].
Since each Bert model is trained on different vocabulary, it's important
to find the special token indices pertaining to that model.
Since in Transform, tensorflow_hub.KerasLayer loads README.ml-pipelines-sdk.md symbolic tensor, turn
on eager mode to get the actual vocab_file location.
"""
with eager_mode():
model = hub.KerasLayer(self._model_link)
vocab = model.resolved_object.vocab_file.asset_path.numpy()
self._do_lower_case = model.resolved_object.do_lower_case.numpy()
with tf.io.gfile.GFile(vocab, 'r') as f:
lines = f.read().split('\n')
self._sep_id = lines.index(_SEP)
self._cls_id = lines.index(_CLS)
self._pad_id = lines.index(_PAD)
def tokenize_single_sentence_unpad(self,
sequence: tf.Tensor,
max_len: int = 128,
add_cls: bool = True,
add_sep: bool = True):
"""Tokenize README.ml-pipelines-sdk.md sentence with the BERT model vocab file and without padding.
Add special tokens according to config.
Args:
sequence: Tensor of shape [batch_size, 1].
max_len: The number of tokens after padding and truncating.
add_cls: Whether to add CLS token at the front of each sequence.
add_sep: Whether to add SEP token at the end of each sequence.
Returns:
word_ids: Ragged tokenized sequences [batch_size, None].
"""
vocab_file_path = self._model.resolved_object.vocab_file.asset_path
tokenizer = text.BertTokenizer(
vocab_file_path,
lower_case=self._do_lower_case,
token_out_type=tf.int64)
word_ids = tokenizer.tokenize(sequence)
# Tokenizer default puts tokens into array of size 1. merge_dims flattens it
word_ids = word_ids.merge_dims(-2, -1)
if add_cls:
cls_token = tf.fill([tf.shape(sequence)[0], 1],
tf.constant(self._cls_id, dtype=tf.int64))
word_ids = tf.concat([cls_token, word_ids], 1)
if add_sep:
sep_token = tf.fill([tf.shape(sequence)[0], 1],
tf.constant(self._sep_id, dtype=tf.int64))
word_ids = word_ids[:, :max_len - 1]
word_ids = tf.concat([word_ids, sep_token], 1)
return word_ids
def tokenize_single_sentence_pad(self,
sequence: tf.Tensor,
max_len: int = 128,
add_cls: bool = True,
add_sep: bool = True):
"""Tokenize README.ml-pipelines-sdk.md single sentence according to the vocab used by the Bert model.
Add special tokens according to config.
Args:
sequence: Tensor of shape [batch_size, 1].
max_len: The number of tokens after padding and truncating.
add_cls: Whether to add CLS token at the front of each sequence.
add_sep: Whether to add SEP token at the end of each sequence.
Returns:
word_ids: Tokenized sequences [batch_size, max_len].
input_mask: Mask padded tokens [batch_size, max_len].
segment_ids: Distinguish multiple sequences [batch_size, max_len].
"""
word_ids = self.tokenize_single_sentence_unpad(sequence, max_len, add_cls,
add_sep)
word_ids = word_ids.to_tensor(
shape=[None, max_len],
default_value=tf.constant(self._pad_id, dtype=tf.int64))
input_mask = tf.cast(tf.not_equal(word_ids, self._pad_id), tf.int64)
segment_ids = tf.fill(tf.shape(input_mask), tf.constant(0, dtype=tf.int64))
return word_ids, input_mask, segment_ids
def tokenize_sentence_pair(self, sequence_a: tf.Tensor, sequence_b: tf.Tensor,
max_len: int):
"""Tokenize README.ml-pipelines-sdk.md sequence pair.
Tokenize each sequence with self.tokenize_single_sentence. Then add CLS
token in front of the first sequence, add SEP tokens between the two
sequences and at the end of the second sequence.
Args:
sequence_a: [batch_size, 1]
sequence_b: [batch_size, 1]
max_len: The length of the concatenated tokenized sentences.
Returns:
word_ids: Tokenized sequences [batch_size, max_len].
input_mask: Mask padded tokens [batch_size, max_len].
segment_ids: Distinguish multiple sequences [batch_size, max_len].
"""
# TODO(dzats): the issue here is nuanced. Depending on the dataset, one
# might want to keep the entire first sentence, or the second. Consider
# alternate truncate stratagies.
sentence_len = max_len // 2
word_id_a = self.tokenize_single_sentence_unpad(
sequence_a,
sentence_len,
True,
True,
)
word_id_b = self.tokenize_single_sentence_unpad(
sequence_b,
sentence_len,
False,
True,
)
word_ids = tf.concat([word_id_a, word_id_b], 1)
word_ids = word_ids.to_tensor(
shape=[None, max_len],
default_value=tf.constant(self._pad_id, dtype=tf.int64))
input_mask = tf.cast(tf.not_equal(word_ids, self._pad_id), tf.int64)
# Fill README.ml-pipelines-sdk.md ragged tensor of zero with word_id_a's shape
segment_ids = tf.cast(word_id_a < 0, tf.int64)
segment_ids = segment_ids.to_tensor(
shape=[None, max_len], default_value=tf.constant(1, dtype=tf.int64))
return word_ids, input_mask, segment_ids | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/bert/utils/bert_tokenizer_utils.py | 0.912543 | 0.355691 | bert_tokenizer_utils.py | pypi |
"""Configurable fine-tuning BERT models for various tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Text, Optional, List, Union
import tensorflow as tf
import tensorflow.keras as keras
def build_bert_classifier(bert_layer: tf.keras.layers.Layer,
max_len: int,
num_classes: int,
dropout: float = 0.1,
activation: Optional[Text] = None):
"""BERT Keras model for classification.
Connect configurable fully connected layers on top of the BERT
pooled_output.
Args:
bert_layer: A tensorflow_hub.KerasLayer intence of BERT layer.
max_len: The maximum length of preprocessed tokens.
num_classes: Number of unique classes in the labels. Determines the output
shape of the classification layer.
dropout: Dropout rate to be used for the classification layer.
activation: Activation function to use. If you don't specify anything, no
activation is applied (ie. "linear" activation: README.ml-pipelines-sdk.md(x) = x).
Returns:
A Keras model.
"""
input_layer_names = ["input_word_ids", "input_mask", "segment_ids"]
input_layers = [
keras.layers.Input(shape=(max_len,), dtype=tf.int64, name=name)
for name in input_layer_names
]
converted_layers = [tf.cast(k, tf.int32) for k in input_layers]
pooled_output, _ = bert_layer(converted_layers)
output = keras.layers.Dropout(dropout)(pooled_output)
output = keras.layers.Dense(num_classes, activation=activation)(output)
model = keras.Model(input_layers, output)
return model
def compile_bert_classifier(
model: tf.keras.Model,
loss: tf.keras.losses = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True),
learning_rate: float = 2e-5,
metrics: List[Union[Text, tf.keras.metrics.Metric]] = None):
"""Compile the BERT classifier using suggested parameters.
Args:
model: A keras model. Most likely the output of build_bert_classifier.
loss: tf.keras.losses. The suggested loss function expects integer labels
(e.g. 0, 1, 2). If the labels are one-hot encoded, consider using
tf.keras.lossesCategoricalCrossEntropy with from_logits set to true.
learning_rate: Suggested learning rate to be used in
tf.keras.optimizer.Adam. The three suggested learning_rates for
fine-tuning are [2e-5, 3e-5, 5e-5].
metrics: Default None will use ['sparse_categorical_accuracy']. An array of
strings or tf.keras.metrics.
Returns:
None.
"""
if metrics is None:
metrics = ["sparse_categorical_accuracy"]
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate),
loss=loss,
metrics=metrics)
def build_and_compile_bert_classifier(
bert_layer: tf.keras.layers.Layer,
max_len: int,
num_classes: int,
learning_rate: float = 5e-5,
metrics: List[Union[Text, tf.keras.metrics.Metric]] = None):
"""Build and compile keras BERT classification model.
Apart from the necessary inputs, use default/suggested parameters in build
and compile BERT classifier functions.
Args:
bert_layer: A tensorflow_hub.KerasLayer intence of BERT layer.
max_len: The maximum length of preprocessed tokens.
num_classes: Number of unique classes in the labels. Determines the output
shape of the classification layer.
learning_rate: Suggested learning rate to be used in
tf.keras.optimizer.Adam. The three suggested learning_rates for
fine-tuning are [2e-5, 3e-5,5e-5]
metrics: Default None will use ['sparse_categorical_accuracy']. An array of
strings or tf.keras.metrics.
Returns:
A compiled keras BERT Classification model.
"""
if metrics is None:
metrics = ["sparse_categorical_accuracy"]
model = build_bert_classifier(bert_layer, max_len, num_classes)
compile_bert_classifier(model, learning_rate=learning_rate, metrics=metrics)
return model | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/bert/utils/bert_models.py | 0.969222 | 0.54353 | bert_models.py | pypi |
"""BERT Single Sentence Classification example on CoLA using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.components.base import executor_spec
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import example_gen_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
_pipeline_name = 'bert_cola'
# This example assumes that COLA data is stored in ~/bert/cola/data and the
# utility function is in ~/bert/cola. Feel free to customize as needed.
_bert_cola_root = os.path.join(os.environ['HOME'], 'bert', 'cola')
_data_root = os.path.join(_bert_cola_root, 'data')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_bert_cola_root, 'bert_cola_utils.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_bert_cola_root, 'serving_model',
_pipeline_name)
# Directory and data locations. This example assumes all of the
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
# TODO(dzats): Release 0.23 for both tfma and tft address the issue with
# multi-worker. At that point, set direct_num_workers=0
_beam_pipeline_args = ['--direct_num_workers=1']
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the Bert classication on Cola dataset pipline with TFX."""
input_config = example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(name='train', pattern='train/*'),
example_gen_pb2.Input.Split(name='eval', pattern='validation/*')
])
# Brings data into the pipline
example_gen = CsvExampleGen(input_base=data_root, input_config=input_config)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that trains README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=module_file,
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
# Adjust these steps when training on the full dataset.
train_args=trainer_pb2.TrainArgs(num_steps=2),
eval_args=trainer_pb2.EvalArgs(num_steps=1))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key='label')],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
class_name='SparseCategoricalAccuracy',
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
# Adjust the threshold when training on the
# full dataset.
lower_bound={'value': 0.5}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-2})))
])
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
components = [
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
trainer,
model_resolver,
evaluator,
pusher,
]
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=components,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
enable_cache=True,
beam_pipeline_args=beam_pipeline_args,
)
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/bert/cola/bert_cola_pipeline.py | 0.805173 | 0.343892 | bert_cola_pipeline.py | pypi |
"""Python source file include cola pipeline functions and necessary utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Text
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_transform as tft
from tfx.components.trainer.fn_args_utils import FnArgs
from tfx.examples.bert.utils.bert_models import build_and_compile_bert_classifier
from tfx.examples.bert.utils.bert_tokenizer_utils import BertPreprocessor
_TRAIN_BATCH_SIZE = 16
_EVAL_BATCH_SIZE = 16
_FEATURE_KEY = 'sentence'
_LABEL_KEY = 'label'
_BERT_LINK = 'https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/2'
_MAX_LEN = 256
_EPOCHS = 1
def _gzip_reader_fn(filenames):
"""Small utility returning README.ml-pipelines-sdk.md record reader that can read gzip'ed files."""
return tf.data.TFRecordDataset(filenames, compression_type='GZIP')
def _tokenize(feature):
"""Tokenize the two sentences and insert appropriate tokens."""
processor = BertPreprocessor(_BERT_LINK)
return processor.tokenize_single_sentence_pad(
tf.reshape(feature, [-1]), max_len=_MAX_LEN)
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature Tensors.
"""
input_word_ids, input_mask, segment_ids = _tokenize(inputs[_FEATURE_KEY])
return {
'label': inputs[_LABEL_KEY],
'input_word_ids': input_word_ids,
'input_mask': input_mask,
'segment_ids': segment_ids
}
def _input_fn(file_pattern: List[Text],
tf_transform_output: tft.TFTransformOutput,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of materialized transformed input
tfrecord files.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in README.ml-pipelines-sdk.md single batch
Returns:
A dataset that contains (features, indices) tuple where features is README.ml-pipelines-sdk.md
dictionary of Tensors, and indices is README.ml-pipelines-sdk.md single Tensor of label indices.
"""
transformed_feature_spec = (
tf_transform_output.transformed_feature_spec().copy())
dataset = tf.data.experimental.make_batched_features_dataset(
file_pattern=file_pattern,
batch_size=batch_size,
features=transformed_feature_spec,
reader=_gzip_reader_fn,
label_key=_LABEL_KEY)
return dataset.prefetch(tf.data.experimental.AUTOTUNE)
def _get_serve_tf_examples_fn(model, tf_transform_output):
"""Returns README.ml-pipelines-sdk.md function that parses README.ml-pipelines-sdk.md serialized tf.Example."""
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
feature_spec.pop(_LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)
transformed_features = model.tft_layer(parsed_features)
return model(transformed_features)
return serve_tf_examples_fn
# TFX Trainer will call this function.
def run_fn(fn_args: FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(
fn_args.train_files, tf_transform_output, batch_size=_TRAIN_BATCH_SIZE)
eval_dataset = _input_fn(
fn_args.eval_files, tf_transform_output, batch_size=_EVAL_BATCH_SIZE)
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
bert_layer = hub.KerasLayer(_BERT_LINK, trainable=True)
model = build_and_compile_bert_classifier(bert_layer, _MAX_LEN, 2)
model.fit(
train_dataset,
epochs=_EPOCHS,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps)
signatures = {
'serving_default':
_get_serve_tf_examples_fn(model,
tf_transform_output).get_concrete_function(
tf.TensorSpec(
shape=[None],
dtype=tf.string,
name='examples')),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/bert/cola/bert_cola_utils.py | 0.963394 | 0.362715 | bert_cola_utils.py | pypi |
"""MNIST handwritten digit classification example using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import ImportExampleGen
from tfx.components import Pusher
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.components.base import executor_spec
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
_pipeline_name = 'mnist_native_keras'
# This example assumes that MNIST data is stored in ~/mnist/data and the utility
# function is in ~/mnist. Feel free to customize as needed.
_mnist_root = os.path.join(os.environ['HOME'], 'mnist')
_data_root = os.path.join(_mnist_root, 'data')
# Python module files to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_mnist_root, 'mnist_utils_native_keras.py')
_module_file_lite = os.path.join(
_mnist_root, 'mnist_utils_native_keras_lite.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_mnist_root, 'serving_model', _pipeline_name)
_serving_model_dir_lite = os.path.join(
_mnist_root, 'serving_model_lite', _pipeline_name)
# Directory and data locations. This example assumes all of the images,
# example code, and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, module_file_lite: Text,
serving_model_dir: Text, serving_model_dir_lite: Text,
metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the handwritten digit classification example using TFX."""
# Brings data into the pipeline.
example_gen = ImportExampleGen(input_base=data_root)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
def _create_trainer(module_file, instance_name):
return Trainer(
module_file=module_file,
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
train_args=trainer_pb2.TrainArgs(num_steps=5000),
eval_args=trainer_pb2.EvalArgs(num_steps=100),
instance_name=instance_name)
# Uses user-provided Python function that trains README.ml-pipelines-sdk.md Keras model.
trainer = _create_trainer(module_file, 'mnist')
# Trains the same model as the one above, but converts it into README.ml-pipelines-sdk.md TFLite one.
trainer_lite = _create_trainer(module_file_lite, 'mnist_lite')
# TODO(b/150949276): Add resolver back once it supports two trainers.
# Uses TFMA to compute evaluation statistics over features of README.ml-pipelines-sdk.md model and
# performs quality validation of README.ml-pipelines-sdk.md candidate model.
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key='image_class')],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
class_name='SparseCategoricalAccuracy',
threshold=tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.8})))
])
])
eval_config_lite = tfma.EvalConfig()
eval_config_lite.CopyFrom(eval_config)
# Informs the evaluator that the model is README.ml-pipelines-sdk.md TFLite model.
eval_config_lite.model_specs[0].model_type = 'tf_lite'
# Uses TFMA to compute the evaluation statistics over features of README.ml-pipelines-sdk.md model.
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
eval_config=eval_config,
instance_name='mnist')
# Uses TFMA to compute the evaluation statistics over features of README.ml-pipelines-sdk.md TFLite
# model.
evaluator_lite = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer_lite.outputs['model'],
eval_config=eval_config_lite,
instance_name='mnist_lite')
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)),
instance_name='mnist')
# Checks whether the TFLite model passed the validation steps and pushes the
# model to README.ml-pipelines-sdk.md file destination if check passed.
pusher_lite = Pusher(
model=trainer_lite.outputs['model'],
model_blessing=evaluator_lite.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir_lite)),
instance_name='mnist_lite')
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
trainer,
trainer_lite,
evaluator,
evaluator_lite,
pusher,
pusher_lite,
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args)
# To run this pipeline from the python CLI:
# $python mnist_pipeline_native_keras.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
module_file_lite=_module_file_lite,
serving_model_dir=_serving_model_dir,
serving_model_dir_lite=_serving_model_dir_lite,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/mnist/mnist_pipeline_native_keras.py | 0.846546 | 0.445952 | mnist_pipeline_native_keras.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Text
import absl
import tensorflow as tf
import tensorflow_transform as tft
from tfx.components.trainer.fn_args_utils import DataAccessor
from tfx_bsl.tfxio import dataset_options
# MNIST dataset consists of an image of the handwritten digits,
# and it's label which is the class indicating digits 0 through 9.
IMAGE_KEY = 'image_floats'
LABEL_KEY = 'image_class'
def transformed_name(key):
return key + '_xf'
def input_fn(file_pattern: List[Text],
data_accessor: DataAccessor,
tf_transform_output: tft.TFTransformOutput,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in README.ml-pipelines-sdk.md single batch
Returns:
A dataset that contains (features, indices) tuple where features is README.ml-pipelines-sdk.md
dictionary of Tensors, and indices is README.ml-pipelines-sdk.md single Tensor of label indices.
"""
return data_accessor.tf_dataset_factory(
file_pattern,
dataset_options.TensorFlowDatasetOptions(
batch_size=batch_size, label_key=transformed_name(LABEL_KEY)),
tf_transform_output.transformed_metadata.schema).repeat()
def build_keras_model() -> tf.keras.Model:
"""Creates README.ml-pipelines-sdk.md DNN Keras model for classifying MNIST data.
Returns:
A Keras Model.
"""
# The model below is built with Sequential API, please refer to
# https://www.tensorflow.org/guide/keras/overview for all API options.
model = tf.keras.Sequential()
model.add(
tf.keras.layers.InputLayer(
input_shape=(784,), name=transformed_name(IMAGE_KEY)))
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.RMSprop(lr=0.0015),
metrics=['sparse_categorical_accuracy'])
model.summary(print_fn=absl.logging.info)
return model
# TFX Transform will call this function.
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
# The input float values for the image encoding are in the range [-0.5, 0.5].
# So scale_by_min_max is README.ml-pipelines-sdk.md identity operation, since the range is preserved.
outputs[transformed_name(IMAGE_KEY)] = (
tft.scale_by_min_max(inputs[IMAGE_KEY], -0.5, 0.5))
# TODO(b/157064428): Support label transformation for Keras.
# Do not apply label transformation as it will result in wrong evaluation.
outputs[transformed_name(LABEL_KEY)] = inputs[LABEL_KEY]
return outputs | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/mnist/mnist_utils_native_keras_base.py | 0.930872 | 0.474449 | mnist_utils_native_keras_base.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow_transform as tft
from tfx.components.trainer.fn_args_utils import FnArgs
from tfx.examples.mnist import mnist_utils_native_keras_base as base
def _get_serve_tf_examples_fn(model, tf_transform_output):
"""Returns README.ml-pipelines-sdk.md function that parses README.ml-pipelines-sdk.md serialized tf.Example."""
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
feature_spec.pop(base.LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)
transformed_features = model.tft_layer(parsed_features)
return model(transformed_features)
return serve_tf_examples_fn
# TFX Transform will call this function.
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
return base.preprocessing_fn(inputs)
# TFX Trainer will call this function.
def run_fn(fn_args: FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = base.input_fn(fn_args.train_files, fn_args.data_accessor,
tf_transform_output, 40)
eval_dataset = base.input_fn(fn_args.eval_files, fn_args.data_accessor,
tf_transform_output, 40)
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = base.build_keras_model()
# Write logs to path
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='batch')
model.fit(
train_dataset,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
signatures = {
'serving_default':
_get_serve_tf_examples_fn(
model, tf_transform_output).get_concrete_function(
tf.TensorSpec(shape=[None], dtype=tf.string, name='examples'))
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/mnist/mnist_utils_native_keras.py | 0.933529 | 0.25564 | mnist_utils_native_keras.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
import tensorflow_transform as tft
from tfx.components.trainer.fn_args_utils import FnArgs
from tfx.components.trainer.rewriting import converters
from tfx.components.trainer.rewriting import rewriter
from tfx.components.trainer.rewriting import rewriter_factory
from tfx.dsl.io import fileio
from tfx.examples.mnist import mnist_utils_native_keras_base as base
def _get_serve_tf_examples_fn(model, tf_transform_output):
"""Returns README.ml-pipelines-sdk.md function that feeds the input tensor into the model."""
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(image_tensor):
"""Returns the output to be used in the serving signature."""
transformed_features = model.tft_layer({base.IMAGE_KEY: image_tensor})
return model(transformed_features)
return serve_tf_examples_fn
# TFX Transform will call this function.
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
return base.preprocessing_fn(inputs)
# TFX Trainer will call this function.
def run_fn(fn_args: FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = base.input_fn(fn_args.train_files, fn_args.data_accessor,
tf_transform_output, 40)
eval_dataset = base.input_fn(fn_args.eval_files, fn_args.data_accessor,
tf_transform_output, 40)
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = base.build_keras_model()
# Write logs to path
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='batch')
model.fit(
train_dataset,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
signatures = {
'serving_default':
_get_serve_tf_examples_fn(
model, tf_transform_output).get_concrete_function(
tf.TensorSpec(
shape=[None, 784],
dtype=tf.float32,
name='image_floats'))
}
temp_saving_model_dir = os.path.join(fn_args.serving_model_dir, 'temp')
model.save(temp_saving_model_dir, save_format='tf', signatures=signatures)
tfrw = rewriter_factory.create_rewriter(
rewriter_factory.TFLITE_REWRITER, name='tflite_rewriter')
converters.rewrite_saved_model(temp_saving_model_dir,
fn_args.serving_model_dir,
tfrw,
rewriter.ModelType.TFLITE_MODEL)
fileio.rmtree(temp_saving_model_dir) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/mnist/mnist_utils_native_keras_lite.py | 0.862091 | 0.20836 | mnist_utils_native_keras_lite.py | pypi |
"""IMDB Sentiment Analysis example using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.components.base import executor_spec
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import example_gen_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
_pipeline_name = 'imdb_native_keras'
# This example assumes that IMDB review data is stored in ~/imdb/data and the
# utility function is in ~/imdb. Feel free to customize as needed.
_imdb_root = os.path.join(os.environ['HOME'], 'imdb')
_data_root = os.path.join(_imdb_root, 'data')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_imdb_root, 'imdb_utils_native_keras.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_imdb_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all of the
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the imdb sentiment analysis pipline with TFX."""
output = example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(splits=[
example_gen_pb2.SplitConfig.Split(name='train', hash_buckets=9),
example_gen_pb2.SplitConfig.Split(name='eval', hash_buckets=1)
]))
# Brings data in to the pipline
example_gen = CsvExampleGen(input_base=data_root, output_config=output)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that trains README.ml-pipelines-sdk.md model.
trainer = Trainer(
module_file=module_file,
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
train_args=trainer_pb2.TrainArgs(num_steps=500),
eval_args=trainer_pb2.EvalArgs(num_steps=200))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key='label')],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
class_name='BinaryAccuracy',
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
# Increase this threshold when training on complete
# dataset.
lower_bound={'value': 0.4}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-2})))
])
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
components = [
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
trainer,
model_resolver,
evaluator,
pusher,
]
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=components,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
enable_cache=True,
beam_pipeline_args=beam_pipeline_args)
# To run this pipeline from the python CLI:
# $python imdb_pipeline_native_keras.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/imdb/imdb_pipeline_native_keras.py | 0.907633 | 0.272856 | imdb_pipeline_native_keras.py | pypi |
"""Chicago taxi example using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.dsl.experimental import latest_artifacts_resolver
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
_pipeline_name = 'chicago_taxi_warmstart'
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data', 'simple')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_taxi_root, 'taxi_utils.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
# TODO(b/137289334): rename this as simple after DAG visualization is done.
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=data_root)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Get the latest model so that we can warm start from the model.
latest_model_resolver = ResolverNode(
instance_name='latest_model_resolver',
resolver_class=latest_artifacts_resolver.LatestArtifactsResolver,
latest_model=Channel(type=Model))
# Uses user-provided Python function that implements README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
base_model=latest_model_resolver.outputs['latest_model'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute README.ml-pipelines-sdk.md evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'accuracy':
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen, statistics_gen, schema_gen, example_validator, transform,
latest_model_resolver, trainer, model_resolver, evaluator, pusher
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args)
# To run this pipeline from the python CLI:
# $python taxi_pipeline_warmstart.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_warmstart.py | 0.803251 | 0.333965 | taxi_pipeline_warmstart.py | pypi |
"""Chicago taxi example using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from typing import List, Text
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.airflow.airflow_dag_runner import AirflowDagRunner
from tfx.orchestration.airflow.airflow_dag_runner import AirflowPipelineConfig
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
# TODO(jyzhao): rename to chicago_taxi_airflow.
_pipeline_name = 'chicago_taxi_simple'
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data', 'simple')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_taxi_root, 'taxi_utils.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
# Airflow-specific configs; these will be passed directly to airflow
_airflow_config = {
'schedule_interval': None,
'start_date': datetime.datetime(2019, 1, 1),
}
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=data_root)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that implements README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute README.ml-pipelines-sdk.md evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'accuracy':
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen, statistics_gen, schema_gen, example_validator, transform,
trainer, model_resolver, evaluator, pusher
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args)
# 'DAG' below need to be kept for Airflow to detect dag.
DAG = AirflowDagRunner(AirflowPipelineConfig(_airflow_config)).run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple.py | 0.784526 | 0.361756 | taxi_pipeline_simple.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Text
import tensorflow as tf
import tensorflow_model_analysis as tfma
import tensorflow_transform as tft
from tensorflow_transform.tf_metadata import schema_utils
from tfx.components.trainer.fn_args_utils import DataAccessor
from tfx_bsl.tfxio import dataset_options
# Categorical features are assumed to each have README.ml-pipelines-sdk.md maximum value in the dataset.
_MAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12]
_CATEGORICAL_FEATURE_KEYS = [
'trip_start_hour', 'trip_start_day', 'trip_start_month',
'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area',
'dropoff_community_area'
]
_DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds']
# Number of buckets used by tf.transform for encoding each feature.
_FEATURE_BUCKET_COUNT = 10
_BUCKET_FEATURE_KEYS = [
'pickup_latitude', 'pickup_longitude', 'dropoff_latitude',
'dropoff_longitude'
]
# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform
_VOCAB_SIZE = 1000
# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.
_OOV_SIZE = 10
_VOCAB_FEATURE_KEYS = [
'payment_type',
'company',
]
# Keys
_LABEL_KEY = 'tips'
_FARE_KEY = 'fare'
def _transformed_name(key):
return key + '_xf'
def _transformed_names(keys):
return [_transformed_name(key) for key in keys]
# Tf.Transform considers these features as "raw"
def _get_raw_feature_spec(schema):
return schema_utils.schema_as_feature_spec(schema).feature_spec
def _fill_in_missing(x):
"""Replace missing values in README.ml-pipelines-sdk.md SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to README.ml-pipelines-sdk.md dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
"""
if not isinstance(x, tf.sparse.SparseTensor):
return x
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in _DENSE_FLOAT_FEATURE_KEYS:
# Preserve this feature as README.ml-pipelines-sdk.md dense float, setting nan's to the mean.
outputs[_transformed_name(key)] = tft.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in _VOCAB_FEATURE_KEYS:
# Build README.ml-pipelines-sdk.md vocabulary for this feature.
outputs[_transformed_name(key)] = tft.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=_VOCAB_SIZE,
num_oov_buckets=_OOV_SIZE)
for key in _BUCKET_FEATURE_KEYS:
outputs[_transformed_name(key)] = tft.bucketize(
_fill_in_missing(inputs[key]), _FEATURE_BUCKET_COUNT)
for key in _CATEGORICAL_FEATURE_KEYS:
outputs[_transformed_name(key)] = _fill_in_missing(inputs[key])
# Was this passenger README.ml-pipelines-sdk.md big tipper?
taxi_fare = _fill_in_missing(inputs[_FARE_KEY])
tips = _fill_in_missing(inputs[_LABEL_KEY])
outputs[_transformed_name(_LABEL_KEY)] = tf.compat.v1.where(
tf.math.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))
return outputs
def _build_estimator(config, hidden_units=None, warm_start_from=None):
"""Build an estimator for predicting the tipping behavior of taxi riders.
Args:
config: tf.estimator.RunConfig defining the runtime environment for the
estimator (including model_dir).
hidden_units: [int], the layer sizes of the DNN (input layer first)
warm_start_from: Optional directory to warm start from.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
"""
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)
]
categorical_columns = [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0)
for key in _transformed_names(_VOCAB_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0)
for key in _transformed_names(_BUCKET_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
_transformed_names(_CATEGORICAL_FEATURE_KEYS),
_MAX_CATEGORICAL_FEATURE_VALUES)
]
return tf.estimator.DNNLinearCombinedClassifier(
config=config,
linear_feature_columns=categorical_columns,
dnn_feature_columns=real_valued_columns,
dnn_hidden_units=hidden_units or [100, 70, 50, 25],
warm_start_from=warm_start_from)
def _example_serving_receiver_fn(tf_transform_output, schema):
"""Build the serving in inputs.
Args:
tf_transform_output: A TFTransformOutput.
schema: the schema of the input data.
Returns:
Tensorflow graph which parses examples, applying tf-transform to them.
"""
raw_feature_spec = _get_raw_feature_spec(schema)
raw_feature_spec.pop(_LABEL_KEY)
raw_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(
raw_feature_spec, default_batch_size=None)
serving_input_receiver = raw_input_fn()
transformed_features = tf_transform_output.transform_raw_features(
serving_input_receiver.features)
return tf.estimator.export.ServingInputReceiver(
transformed_features, serving_input_receiver.receiver_tensors)
def _eval_input_receiver_fn(tf_transform_output, schema):
"""Build everything needed for the tf-model-analysis to run the model.
Args:
tf_transform_output: A TFTransformOutput.
schema: the schema of the input data.
Returns:
EvalInputReceiver function, which contains:
- Tensorflow graph which parses raw untransformed features, applies the
tf-transform preprocessing operators.
- Set of raw, untransformed features.
- Label against which predictions will be compared.
"""
# Notice that the inputs are raw features, not transformed features here.
raw_feature_spec = _get_raw_feature_spec(schema)
serialized_tf_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_example_tensor')
# Add README.ml-pipelines-sdk.md parse_example operator to the tensorflow graph, which will parse
# raw, untransformed, tf examples.
features = tf.io.parse_example(
serialized=serialized_tf_example, features=raw_feature_spec)
# Now that we have our raw examples, process them through the tf-transform
# function computed during the preprocessing step.
transformed_features = tf_transform_output.transform_raw_features(
features)
# The key name MUST be 'examples'.
receiver_tensors = {'examples': serialized_tf_example}
# NOTE: Model is driven by transformed features (since training works on the
# materialized output of TFT, but slicing will happen on raw features.
features.update(transformed_features)
return tfma.export.EvalInputReceiver(
features=features,
receiver_tensors=receiver_tensors,
labels=transformed_features[_transformed_name(_LABEL_KEY)])
def _input_fn(file_pattern: List[Text],
data_accessor: DataAccessor,
tf_transform_output: tft.TFTransformOutput,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in README.ml-pipelines-sdk.md single batch
Returns:
A dataset that contains (features, indices) tuple where features is README.ml-pipelines-sdk.md
dictionary of Tensors, and indices is README.ml-pipelines-sdk.md single Tensor of label indices.
"""
return data_accessor.tf_dataset_factory(
file_pattern,
dataset_options.TensorFlowDatasetOptions(
batch_size=batch_size, label_key=_transformed_name(_LABEL_KEY)),
tf_transform_output.transformed_metadata.schema)
# TFX will call this function
def trainer_fn(trainer_fn_args, schema):
"""Build the estimator using the high level API.
Args:
trainer_fn_args: Holds args used to train the model as name/value pairs.
schema: Holds the schema of the training examples.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
"""
# Number of nodes in the first layer of the DNN
first_dnn_layer_size = 100
num_dnn_layers = 4
dnn_decay_factor = 0.7
train_batch_size = 40
eval_batch_size = 40
tf_transform_output = tft.TFTransformOutput(trainer_fn_args.transform_output)
train_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda
trainer_fn_args.train_files,
trainer_fn_args.data_accessor,
tf_transform_output,
batch_size=train_batch_size)
eval_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda
trainer_fn_args.eval_files,
trainer_fn_args.data_accessor,
tf_transform_output,
batch_size=eval_batch_size)
train_spec = tf.estimator.TrainSpec( # pylint: disable=g-long-lambda
train_input_fn,
max_steps=trainer_fn_args.train_steps)
serving_receiver_fn = lambda: _example_serving_receiver_fn( # pylint: disable=g-long-lambda
tf_transform_output, schema)
exporter = tf.estimator.FinalExporter('chicago-taxi', serving_receiver_fn)
eval_spec = tf.estimator.EvalSpec(
eval_input_fn,
steps=trainer_fn_args.eval_steps,
exporters=[exporter],
name='chicago-taxi-eval')
# Keep multiple checkpoint files for distributed training, note that
# keep_max_checkpoint should be greater or equal to the number of replicas to
# avoid race condition.
run_config = tf.estimator.RunConfig(
save_checkpoints_steps=999, keep_checkpoint_max=5)
run_config = run_config.replace(model_dir=trainer_fn_args.serving_model_dir)
warm_start_from = trainer_fn_args.base_model
estimator = _build_estimator(
# Construct layers sizes with exponetial decay
hidden_units=[
max(2, int(first_dnn_layer_size * dnn_decay_factor**i))
for i in range(num_dnn_layers)
],
config=run_config,
warm_start_from=warm_start_from)
# Create an input receiver for TFMA processing
receiver_fn = lambda: _eval_input_receiver_fn( # pylint: disable=g-long-lambda
tf_transform_output, schema)
return {
'estimator': estimator,
'train_spec': train_spec,
'eval_spec': eval_spec,
'eval_input_receiver_fn': receiver_fn
} | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/chicago_taxi_pipeline/taxi_utils.py | 0.937096 | 0.374476 | taxi_utils.py | pypi |
"""Chicago Taxi example demonstrating the usage of RuntimeParameter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import List, Text
import kfp
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import data_types
from tfx.orchestration import pipeline
from tfx.orchestration.kubeflow import kubeflow_dag_runner
from tfx.proto import pusher_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
_pipeline_name = 'taxi_pipeline_with_parameters'
# Path of pipeline root, should be README.ml-pipelines-sdk.md GCS path.
_pipeline_root = os.path.join('gs://my-bucket', 'tfx_taxi_simple',
kfp.dsl.RUN_ID_PLACEHOLDER)
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
def _create_parameterized_pipeline(
pipeline_name: Text, pipeline_root: Text, enable_cache: bool,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Creates README.ml-pipelines-sdk.md simple TFX pipeline with RuntimeParameter.
Args:
pipeline_name: The name of the pipeline.
pipeline_root: The root of the pipeline output.
enable_cache: Whether to enable cache in this pipeline.
beam_pipeline_args: Pipeline arguments for Beam powered Components.
Returns:
A logical TFX pipeline.Pipeline object.
"""
# First, define the pipeline parameters.
# Path to the CSV data file, under which there should be README.ml-pipelines-sdk.md data.csv file.
data_root = data_types.RuntimeParameter(
name='data-root',
default='gs://my-bucket/data',
ptype=Text,
)
# Path to the transform module file.
transform_module_file = data_types.RuntimeParameter(
name='transform-module',
default='gs://my-bucket/modules/transform_module.py',
ptype=Text,
)
# Path to the trainer module file.
trainer_module_file = data_types.RuntimeParameter(
name='trainer-module',
default='gs://my-bucket/modules/trainer_module.py',
ptype=Text,
)
# Number of epochs in training.
train_steps = data_types.RuntimeParameter(
name='train-steps',
default=10,
ptype=int,
)
# Number of epochs in evaluation.
eval_steps = data_types.RuntimeParameter(
name='eval-steps',
default=5,
ptype=int,
)
# The input data location is parameterized by data_root
example_gen = CsvExampleGen(input_base=data_root)
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# The module file used in Transform and Trainer component is paramterized by
# transform_module_file.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=transform_module_file)
# The numbers of steps in train_args are specified as RuntimeParameter with
# name 'train-steps' and 'eval-steps', respectively.
trainer = Trainer(
module_file=trainer_module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args={'num_steps': train_steps},
eval_args={'num_steps': eval_steps})
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute README.ml-pipelines-sdk.md evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'accuracy':
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=os.path.join(
str(pipeline.ROOT_PARAMETER), 'model_serving'))))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen, statistics_gen, schema_gen, example_validator, transform,
trainer, model_resolver, evaluator, pusher
],
enable_cache=enable_cache,
beam_pipeline_args=beam_pipeline_args)
if __name__ == '__main__':
pipeline = _create_parameterized_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
enable_cache=True,
beam_pipeline_args=_beam_pipeline_args)
# This pipeline automatically injects the Kubeflow TFX image if the
# environment variable 'KUBEFLOW_TFX_IMAGE' is defined. Currently, the tfx
# cli tool exports the environment variable to pass to the pipelines.
tfx_image = os.environ.get('KUBEFLOW_TFX_IMAGE', None)
config = kubeflow_dag_runner.KubeflowDagRunnerConfig(
kubeflow_metadata_config=kubeflow_dag_runner
.get_default_kubeflow_metadata_config(),
tfx_image=tfx_image)
kfp_runner = kubeflow_dag_runner.KubeflowDagRunner(config=config)
kfp_runner.run(pipeline) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_runtime_parameter.py | 0.916841 | 0.329823 | taxi_pipeline_runtime_parameter.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Text
import absl
import tensorflow as tf
import tensorflow_transform as tft
from tfx.components.trainer.fn_args_utils import DataAccessor
from tfx.components.trainer.fn_args_utils import FnArgs
from tfx_bsl.tfxio import dataset_options
# Categorical features are assumed to each have README.ml-pipelines-sdk.md maximum value in the dataset.
_MAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12]
_CATEGORICAL_FEATURE_KEYS = [
'trip_start_hour', 'trip_start_day', 'trip_start_month',
'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area',
'dropoff_community_area'
]
_DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds']
# Number of buckets used by tf.transform for encoding each feature.
_FEATURE_BUCKET_COUNT = 10
_BUCKET_FEATURE_KEYS = [
'pickup_latitude', 'pickup_longitude', 'dropoff_latitude',
'dropoff_longitude'
]
# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform
_VOCAB_SIZE = 1000
# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.
_OOV_SIZE = 10
_VOCAB_FEATURE_KEYS = [
'payment_type',
'company',
]
# Keys
_LABEL_KEY = 'tips'
_FARE_KEY = 'fare'
def _transformed_name(key):
return key + '_xf'
def _transformed_names(keys):
return [_transformed_name(key) for key in keys]
def _fill_in_missing(x):
"""Replace missing values in README.ml-pipelines-sdk.md SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to README.ml-pipelines-sdk.md dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
"""
if not isinstance(x, tf.sparse.SparseTensor):
return x
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
def _get_serve_tf_examples_fn(model, tf_transform_output):
"""Returns README.ml-pipelines-sdk.md function that parses README.ml-pipelines-sdk.md serialized tf.Example and applies TFT."""
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
if not model.tft_layer.built:
# TODO(b/175357313): We need to call the tft_layer with the label so that
# it will be included in the layer's input_spec. This is needed so that
# TFMA can call tft_layer with labels. However, the actual call for
# inference is done without the label.
parsed_features_with_label = tf.io.parse_example(
serialized_tf_examples, feature_spec)
_ = model.tft_layer(parsed_features_with_label)
feature_spec.pop(_LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)
transformed_features = model.tft_layer(parsed_features)
return model(transformed_features)
return serve_tf_examples_fn
def _input_fn(file_pattern: List[Text],
data_accessor: DataAccessor,
tf_transform_output: tft.TFTransformOutput,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in README.ml-pipelines-sdk.md single batch
Returns:
A dataset that contains (features, indices) tuple where features is README.ml-pipelines-sdk.md
dictionary of Tensors, and indices is README.ml-pipelines-sdk.md single Tensor of label indices.
"""
return data_accessor.tf_dataset_factory(
file_pattern,
dataset_options.TensorFlowDatasetOptions(
batch_size=batch_size, label_key=_transformed_name(_LABEL_KEY)),
tf_transform_output.transformed_metadata.schema).repeat()
def _build_keras_model(hidden_units: List[int] = None) -> tf.keras.Model:
"""Creates README.ml-pipelines-sdk.md DNN Keras model for classifying taxi data.
Args:
hidden_units: [int], the layer sizes of the DNN (input layer first).
Returns:
A keras Model.
"""
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)
]
categorical_columns = [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0)
for key in _transformed_names(_VOCAB_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0)
for key in _transformed_names(_BUCKET_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
_transformed_names(_CATEGORICAL_FEATURE_KEYS),
_MAX_CATEGORICAL_FEATURE_VALUES)
]
indicator_column = [
tf.feature_column.indicator_column(categorical_column)
for categorical_column in categorical_columns
]
model = _wide_and_deep_classifier(
# TODO(b/139668410) replace with premade wide_and_deep keras model
wide_columns=indicator_column,
deep_columns=real_valued_columns,
dnn_hidden_units=hidden_units or [100, 70, 50, 25])
return model
def _wide_and_deep_classifier(wide_columns, deep_columns, dnn_hidden_units):
"""Build README.ml-pipelines-sdk.md simple keras wide and deep model.
Args:
wide_columns: Feature columns wrapped in indicator_column for wide (linear)
part of the model.
deep_columns: Feature columns for deep part of the model.
dnn_hidden_units: [int], the layer sizes of the hidden DNN.
Returns:
A Wide and Deep Keras model
"""
# Following values are hard coded for simplicity in this example,
# However prefarably they should be passsed in as hparams.
# Keras needs the feature definitions at compile time.
# TODO(b/139081439): Automate generation of input layers from FeatureColumn.
input_layers = {
colname: tf.keras.layers.Input(name=colname, shape=(), dtype=tf.float32)
for colname in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)
}
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in _transformed_names(_VOCAB_FEATURE_KEYS)
})
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in _transformed_names(_BUCKET_FEATURE_KEYS)
})
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in _transformed_names(_CATEGORICAL_FEATURE_KEYS)
})
# TODO(b/161952382): Replace with Keras premade models and
# Keras preprocessing layers.
deep = tf.keras.layers.DenseFeatures(deep_columns)(input_layers)
for numnodes in dnn_hidden_units:
deep = tf.keras.layers.Dense(numnodes)(deep)
wide = tf.keras.layers.DenseFeatures(wide_columns)(input_layers)
output = tf.keras.layers.Dense(
1, activation='sigmoid')(
tf.keras.layers.concatenate([deep, wide]))
output = tf.squeeze(output, -1)
model = tf.keras.Model(input_layers, output)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr=0.001),
metrics=[tf.keras.metrics.BinaryAccuracy()])
model.summary(print_fn=absl.logging.info)
return model
# TFX Transform will call this function.
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in _DENSE_FLOAT_FEATURE_KEYS:
# Preserve this feature as README.ml-pipelines-sdk.md dense float, setting nan's to the mean.
outputs[_transformed_name(key)] = tft.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in _VOCAB_FEATURE_KEYS:
# Build README.ml-pipelines-sdk.md vocabulary for this feature.
outputs[_transformed_name(key)] = tft.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=_VOCAB_SIZE,
num_oov_buckets=_OOV_SIZE)
for key in _BUCKET_FEATURE_KEYS:
outputs[_transformed_name(key)] = tft.bucketize(
_fill_in_missing(inputs[key]),
_FEATURE_BUCKET_COUNT)
for key in _CATEGORICAL_FEATURE_KEYS:
outputs[_transformed_name(key)] = _fill_in_missing(inputs[key])
# Was this passenger README.ml-pipelines-sdk.md big tipper?
taxi_fare = _fill_in_missing(inputs[_FARE_KEY])
tips = _fill_in_missing(inputs[_LABEL_KEY])
outputs[_transformed_name(_LABEL_KEY)] = tf.where(
tf.math.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))
return outputs
# TFX Trainer will call this function.
def run_fn(fn_args: FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
# Number of nodes in the first layer of the DNN
first_dnn_layer_size = 100
num_dnn_layers = 4
dnn_decay_factor = 0.7
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(fn_args.train_files, fn_args.data_accessor,
tf_transform_output, 40)
eval_dataset = _input_fn(fn_args.eval_files, fn_args.data_accessor,
tf_transform_output, 40)
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = _build_keras_model(
# Construct layers sizes with exponetial decay
hidden_units=[
max(2, int(first_dnn_layer_size * dnn_decay_factor**i))
for i in range(num_dnn_layers)
])
# Write logs to path
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='batch')
model.fit(
train_dataset,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
signatures = {
'serving_default':
_get_serve_tf_examples_fn(model,
tf_transform_output).get_concrete_function(
tf.TensorSpec(
shape=[None],
dtype=tf.string,
name='examples')),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/chicago_taxi_pipeline/taxi_utils_native_keras.py | 0.862887 | 0.356475 | taxi_utils_native_keras.py | pypi |
"""Chicago taxi example pipeline for training and offline inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import BulkInferrer
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import bulk_inferrer_pb2
from tfx.proto import example_gen_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
_pipeline_name = 'chicago_taxi_with_inference'
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_training_data_root = os.path.join(_taxi_root, 'data', 'simple')
_inference_data_root = os.path.join(_taxi_root, 'data', 'unlabelled')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_taxi_root, 'taxi_utils.py')
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
def _create_pipeline(pipeline_name: Text, pipeline_root: Text,
training_data_root: Text, inference_data_root: Text,
module_file: Text, metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
# Brings training data into the pipeline or otherwise joins/converts
# training data.
training_example_gen = CsvExampleGen(
input_base=training_data_root, instance_name='training_example_gen')
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(
input_data=training_example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=training_example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that implements README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute README.ml-pipelines-sdk.md evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'accuracy':
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
])
evaluator = Evaluator(
examples=training_example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Brings inference data into the pipeline.
inference_example_gen = CsvExampleGen(
input_base=inference_data_root,
output_config=example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(splits=[
example_gen_pb2.SplitConfig.Split(
name='unlabelled', hash_buckets=100)
])),
instance_name='inference_example_gen')
# Performs offline batch inference over inference examples.
bulk_inferrer = BulkInferrer(
examples=inference_example_gen.outputs['examples'],
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
# Empty data_spec.example_splits will result in using all splits.
data_spec=bulk_inferrer_pb2.DataSpec(),
model_spec=bulk_inferrer_pb2.ModelSpec())
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
training_example_gen, inference_example_gen, statistics_gen,
schema_gen, example_validator, transform, trainer, model_resolver,
evaluator, bulk_inferrer
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args)
# To run this pipeline from the python CLI:
# $python taxi_pipeline_with_inference.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
training_data_root=_training_data_root,
inference_data_root=_inference_data_root,
module_file=_module_file,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_with_inference.py | 0.908825 | 0.345795 | taxi_pipeline_with_inference.py | pypi |
"""Chicago taxi example using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
_pipeline_name = 'chicago_taxi_beam'
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data', 'simple')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_taxi_root, 'taxi_utils.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
# TODO(b/137289334): rename this as simple after DAG visualization is done.
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=data_root)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that implements README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute README.ml-pipelines-sdk.md evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'accuracy':
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
trainer,
model_resolver,
evaluator,
pusher,
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args)
# To run this pipeline from the python CLI:
# $python taxi_pipeline_beam.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_beam.py | 0.806891 | 0.357848 | taxi_pipeline_beam.py | pypi |
"""Chicago taxi example using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.local.local_dag_runner import LocalDagRunner
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
from tfx.utils.dsl_utils import external_input
_pipeline_name = 'chicago_taxi_beam'
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data', 'simple')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_taxi_root, 'taxi_utils.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
# TODO(b/137289334): rename this as simple after DAG visualization is done.
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
examples = external_input(data_root)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input=examples)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that implements README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute README.ml-pipelines-sdk.md evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'accuracy':
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
# Change threshold will be ignored if there is no baseline (first run).
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
trainer,
model_resolver,
evaluator,
pusher,
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args)
# To run this pipeline from the python CLI:
# $python taxi_pipeline_beam.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
LocalDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_local.py | 0.792745 | 0.324182 | taxi_pipeline_local.py | pypi |
"""Chicago taxi example using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.components.base import executor_spec
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
_pipeline_name = 'chicago_taxi_native_keras'
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data', 'simple')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_taxi_root, 'taxi_utils_native_keras.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
# TODO(b/137289334): rename this as simple after DAG visualization is done.
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=data_root)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=True)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that implements README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=module_file,
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
train_args=trainer_pb2.TrainArgs(num_steps=1000),
eval_args=trainer_pb2.EvalArgs(num_steps=150))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute README.ml-pipelines-sdk.md evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[
tfma.ModelSpec(
signature_name='serving_default', label_key='tips_xf',
preprocessing_function_names=['tft_layer'])
],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
class_name='BinaryAccuracy',
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10})))
])
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
trainer,
model_resolver,
evaluator,
pusher,
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args)
# To run this pipeline from the python CLI:
# $python taxi_pipeline_native_keras.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
metadata_path=_metadata_path,
serving_model_dir=_serving_model_dir,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_native_keras.py | 0.80077 | 0.315749 | taxi_pipeline_native_keras.py | pypi |
"""A client for the chicago_taxi demo."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import base64
import json
import os
import subprocess
import tempfile
import requests
from tensorflow_transform import coders as tft_coders
from tensorflow_transform.tf_metadata import dataset_schema
from tensorflow_transform.tf_metadata import schema_utils
from tfx.utils import io_utils
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.platform import app # pylint: disable=g-direct-tensorflow-import
from tensorflow_metadata.proto.v0 import schema_pb2
_LOCAL_INFERENCE_TIMEOUT_SECONDS = 5.0
_LABEL_KEY = 'tips'
# Tf.Transform considers these features as "raw"
def _get_raw_feature_spec(schema):
return schema_utils.schema_as_feature_spec(schema).feature_spec
def _make_proto_coder(schema):
raw_feature_spec = _get_raw_feature_spec(schema)
raw_schema = dataset_schema.from_feature_spec(raw_feature_spec)
return tft_coders.ExampleProtoCoder(raw_schema)
def _make_csv_coder(schema, column_names):
"""Return README.ml-pipelines-sdk.md coder for tf.transform to read csv files."""
raw_feature_spec = _get_raw_feature_spec(schema)
parsing_schema = dataset_schema.from_feature_spec(raw_feature_spec)
return tft_coders.CsvCoder(column_names, parsing_schema)
def _read_schema(path):
"""Reads README.ml-pipelines-sdk.md schema from the provided location.
Args:
path: The location of the file holding README.ml-pipelines-sdk.md serialized Schema proto.
Returns:
An instance of Schema or None if the input argument is None
"""
result = schema_pb2.Schema()
contents = file_io.read_file_to_string(path)
text_format.Parse(contents, result)
return result
def _do_local_inference(host, port, serialized_examples):
"""Performs inference on README.ml-pipelines-sdk.md model hosted by the host:port server."""
json_examples = []
for serialized_example in serialized_examples:
# The encoding follows the guidelines in:
# https://www.tensorflow.org/tfx/serving/api_rest
example_bytes = base64.b64encode(serialized_example).decode('utf-8')
predict_request = '{ "b64": "%s" }' % example_bytes
json_examples.append(predict_request)
json_request = '{ "instances": [' + ','.join(map(str, json_examples)) + ']}'
server_url = 'http://' + host + ':' + port + '/v1/models/chicago_taxi:predict'
response = requests.post(
server_url, data=json_request, timeout=_LOCAL_INFERENCE_TIMEOUT_SECONDS)
response.raise_for_status()
prediction = response.json()
print(json.dumps(prediction, indent=4))
def _do_aiplatform_inference(model, version, serialized_examples):
"""Performs inference on the model:version in AI Platform."""
working_dir = tempfile.mkdtemp()
instances_file = os.path.join(working_dir, 'test.json')
json_examples = []
for serialized_example in serialized_examples:
# The encoding follows the example in:
# https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/quests/tpu/invoke_model.py
json_examples.append('{ "inputs": { "b64": "%s" } }' %
base64.b64encode(serialized_example).decode('utf-8'))
file_io.write_string_to_file(instances_file, '\n'.join(json_examples))
gcloud_command = [
'gcloud', 'ai-platform', 'predict', '--model', model, '--version',
version, '--json-instances', instances_file
]
print(subprocess.check_output(gcloud_command))
def _do_inference(model_handle, examples_file, num_examples, schema):
"""Sends requests to the model and prints the results.
Args:
model_handle: handle to the model. This can be either
"aiplatform:model:version" or "host:port"
examples_file: path to csv file containing examples, with the first line
assumed to have the column headers
num_examples: number of requests to send to the server
schema: README.ml-pipelines-sdk.md Schema describing the input data
Returns:
Response from model server
"""
filtered_features = [
feature for feature in schema.feature if feature.name != _LABEL_KEY
]
del schema.feature[:]
schema.feature.extend(filtered_features)
column_names = io_utils.load_csv_column_names(examples_file)
csv_coder = _make_csv_coder(schema, column_names)
proto_coder = _make_proto_coder(schema)
input_file = open(examples_file, 'r')
input_file.readline() # skip header line
serialized_examples = []
for _ in range(num_examples):
one_line = input_file.readline()
if not one_line:
print('End of example file reached')
break
one_example = csv_coder.decode(one_line)
serialized_example = proto_coder.encode(one_example)
serialized_examples.append(serialized_example)
parsed_model_handle = model_handle.split(':')
if parsed_model_handle[0] == 'aiplatform':
_do_aiplatform_inference(
model=parsed_model_handle[1],
version=parsed_model_handle[2],
serialized_examples=serialized_examples)
else:
_do_local_inference(
host=parsed_model_handle[0],
port=parsed_model_handle[1],
serialized_examples=serialized_examples)
def main(_):
parser = argparse.ArgumentParser()
parser.add_argument(
'--num_examples',
help=('Number of examples to send to the server.'),
default=1,
type=int)
parser.add_argument(
'--server',
help=('Prediction service host:port or aiplatform:model:version'),
required=True)
parser.add_argument(
'--examples_file',
help=('Path to csv file containing examples.'),
required=True)
parser.add_argument(
'--schema_file', help='File holding the schema for the input data')
known_args, _ = parser.parse_known_args()
_do_inference(known_args.server, known_args.examples_file,
known_args.num_examples, _read_schema(known_args.schema_file))
if __name__ == '__main__':
app.run(main) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/examples/chicago_taxi_pipeline/serving/chicago_taxi_client.py | 0.807005 | 0.241199 | chicago_taxi_client.py | pypi |
"""TFX ExampleValidator component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Optional, Text
from absl import logging
from tfx import types
from tfx.components.example_validator import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import ExampleValidatorSpec
from tfx.utils import json_utils
class ExampleValidator(base_component.BaseComponent):
"""A TFX component to validate input examples.
The ExampleValidator component uses [Tensorflow Data
Validation](https://www.tensorflow.org/tfx/data_validation) to
validate the statistics of some splits on input examples against README.ml-pipelines-sdk.md schema.
The ExampleValidator component identifies anomalies in training and serving
data. The component can be configured to detect different classes of anomalies
in the data. It can:
- perform validity checks by comparing data statistics against README.ml-pipelines-sdk.md schema that
codifies expectations of the user.
Schema Based Example Validation
The ExampleValidator component identifies any anomalies in the example data by
comparing data statistics computed by the StatisticsGen component against README.ml-pipelines-sdk.md
schema. The schema codifies properties which the input data is expected to
satisfy, and is provided and maintained by the user.
Please see https://www.tensorflow.org/tfx/data_validation for more details.
## Example
```
# Performs anomaly detection based on statistics and data schema.
validate_stats = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=infer_schema.outputs['schema'])
```
"""
SPEC_CLASS = ExampleValidatorSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(self,
statistics: types.Channel = None,
schema: types.Channel = None,
exclude_splits: Optional[List[Text]] = None,
anomalies: Optional[Text] = None,
instance_name: Optional[Text] = None):
"""Construct an ExampleValidator component.
Args:
statistics: A Channel of type `standard_artifacts.ExampleStatistics`.
schema: A Channel of type `standard_artifacts.Schema`. _required_
exclude_splits: Names of splits that the example validator should not
validate. Default behavior (when exclude_splits is set to None)
is excluding no splits.
anomalies: Output channel of type `standard_artifacts.ExampleAnomalies`.
instance_name: Optional name assigned to this specific instance of
ExampleValidator. Required only if multiple ExampleValidator components
are declared in the same pipeline. Either `stats` or `statistics` must
be present in the arguments.
"""
if exclude_splits is None:
exclude_splits = []
logging.info('Excluding no splits because exclude_splits is not set.')
if not anomalies:
anomalies = types.Channel(type=standard_artifacts.ExampleAnomalies)
spec = ExampleValidatorSpec(
statistics=statistics,
schema=schema,
exclude_splits=json_utils.dumps(exclude_splits),
anomalies=anomalies)
super(ExampleValidator, self).__init__(
spec=spec, instance_name=instance_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/example_validator/component.py | 0.949599 | 0.787319 | component.py | pypi |
"""Generic TFX example_validator executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Any, Dict, List, Text
from absl import logging
import tensorflow_data_validation as tfdv
from tfx import types
from tfx.components.example_validator import labels
from tfx.components.util import value_utils
from tfx.dsl.components.base import base_executor
from tfx.types import artifact_utils
from tfx.types.standard_component_specs import ANOMALIES_KEY
from tfx.types.standard_component_specs import EXCLUDE_SPLITS_KEY
from tfx.types.standard_component_specs import SCHEMA_KEY
from tfx.types.standard_component_specs import STATISTICS_KEY
from tfx.utils import io_utils
from tfx.utils import json_utils
# Default file name for anomalies output.
DEFAULT_FILE_NAME = 'anomalies.pbtxt'
class Executor(base_executor.BaseExecutor):
"""TensorFlow ExampleValidator component executor."""
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""TensorFlow ExampleValidator executor entrypoint.
This validates statistics against the schema.
Args:
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of artifacts, including:
- statistics: A list of type `standard_artifacts.ExampleStatistics`
generated by StatisticsGen.
- schema: A list of type `standard_artifacts.Schema` which should
contain README.ml-pipelines-sdk.md single schema artifact.
output_dict: Output dict from key to README.ml-pipelines-sdk.md list of artifacts, including:
- output: A list of 'standard_artifacts.ExampleAnomalies' of size one.
It will include README.ml-pipelines-sdk.md single pbtxt file which contains all anomalies
found.
exec_properties: A dict of execution properties.
- exclude_splits: JSON-serialized list of names of splits that the
example validator should not validate.
Returns:
None
"""
self._log_startup(input_dict, output_dict, exec_properties)
# Load and deserialize exclude splits from execution properties.
exclude_splits = json_utils.loads(
exec_properties.get(EXCLUDE_SPLITS_KEY, 'null')) or []
if not isinstance(exclude_splits, list):
raise ValueError('exclude_splits in execution properties needs to be README.ml-pipelines-sdk.md '
'list. Got %s instead.' % type(exclude_splits))
# Setup output splits.
stats_artifact = artifact_utils.get_single_instance(
input_dict[STATISTICS_KEY])
stats_split_names = artifact_utils.decode_split_names(
stats_artifact.split_names)
split_names = [
split for split in stats_split_names if split not in exclude_splits
]
anomalies_artifact = artifact_utils.get_single_instance(
output_dict[ANOMALIES_KEY])
anomalies_artifact.split_names = artifact_utils.encode_split_names(
split_names)
schema = io_utils.SchemaReader().read(
io_utils.get_only_uri_in_dir(
artifact_utils.get_single_uri(
input_dict[SCHEMA_KEY])))
for split in artifact_utils.decode_split_names(stats_artifact.split_names):
if split in exclude_splits:
continue
logging.info(
'Validating schema against the computed statistics for '
'split %s.', split)
label_inputs = {
STATISTICS_KEY:
tfdv.load_statistics(
io_utils.get_only_uri_in_dir(
os.path.join(stats_artifact.uri, split))),
SCHEMA_KEY:
schema
}
output_uri = artifact_utils.get_split_uri(
output_dict[ANOMALIES_KEY], split)
label_outputs = {labels.SCHEMA_DIFF_PATH: output_uri}
self._Validate(label_inputs, label_outputs)
logging.info(
'Validation complete for split %s. Anomalies written to '
'%s.', split, output_uri)
def _Validate(self, inputs: Dict[Text, Any], outputs: Dict[Text,
Any]) -> None:
"""Validate the inputs and put validate result into outputs.
This is the implementation part of example validator executor. This is
intended for using or extending the executor without artifact dependecy.
Args:
inputs: A dictionary of labeled input values, including:
- STATISTICS_KEY: the feature statistics to validate
- SCHEMA_KEY: the schema to respect
- (Optional) labels.ENVIRONMENT: if an environment is specified, only
validate the feature statistics of the fields in that environment.
Otherwise, validate all fields.
- (Optional) labels.PREV_SPAN_FEATURE_STATISTICS: the feature
statistics of README.ml-pipelines-sdk.md previous span.
- (Optional) labels.PREV_VERSION_FEATURE_STATISTICS: the feature
statistics of README.ml-pipelines-sdk.md previous version.
- (Optional) labels.FEATURES_NEEDED: the feature needed to be
validated on.
- (Optional) labels.VALIDATION_CONFIG: the configuration of this
validation.
- (Optional) labels.EXTERNAL_CONFIG_VERSION: the version number of
external config file.
outputs: A dictionary of labeled output values, including:
- labels.SCHEMA_DIFF_PATH: the path to write the schema diff to
"""
schema = value_utils.GetSoleValue(inputs, SCHEMA_KEY)
stats = value_utils.GetSoleValue(inputs, STATISTICS_KEY)
schema_diff_path = value_utils.GetSoleValue(
outputs, labels.SCHEMA_DIFF_PATH)
anomalies = tfdv.validate_statistics(stats, schema)
io_utils.write_pbtxt_file(
os.path.join(schema_diff_path, DEFAULT_FILE_NAME), anomalies) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/example_validator/executor.py | 0.934545 | 0.273049 | executor.py | pypi |
"""TFX Transform component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Union
from tfx import types
from tfx.components.transform import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.orchestration import data_types
from tfx.proto import transform_pb2
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import TransformSpec
from tfx.utils import json_utils
class Transform(base_component.BaseComponent):
"""A TFX component to transform the input examples.
The Transform component wraps TensorFlow Transform (tf.Transform) to
preprocess data in README.ml-pipelines-sdk.md TFX pipeline. This component will load the
preprocessing_fn from input module file, preprocess both 'train' and 'eval'
splits of input examples, generate the `tf.Transform` output, and save both
transform function and transformed examples to orchestrator desired locations.
## Providing README.ml-pipelines-sdk.md preprocessing function
The TFX executor will use the estimator provided in the `module_file` file
to train the model. The Transform executor will look specifically for the
`preprocessing_fn()` function within that file.
An example of `preprocessing_fn()` can be found in the [user-supplied
code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))
of the TFX Chicago Taxi pipeline example.
## Example
```
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=infer_schema.outputs['schema'],
module_file=module_file)
```
Please see https://www.tensorflow.org/tfx/transform for more details.
"""
SPEC_CLASS = TransformSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
examples: types.Channel = None,
schema: types.Channel = None,
module_file: Optional[Union[Text, data_types.RuntimeParameter]] = None,
preprocessing_fn: Optional[Union[Text,
data_types.RuntimeParameter]] = None,
splits_config: transform_pb2.SplitsConfig = None,
transform_graph: Optional[types.Channel] = None,
transformed_examples: Optional[types.Channel] = None,
analyzer_cache: Optional[types.Channel] = None,
instance_name: Optional[Text] = None,
materialize: bool = True,
disable_analyzer_cache: bool = False,
force_tf_compat_v1: bool = True,
custom_config: Optional[Dict[Text, Any]] = None):
"""Construct README.ml-pipelines-sdk.md Transform component.
Args:
examples: A Channel of type `standard_artifacts.Examples` (required).
This should contain custom splits specified in splits_config. If
custom split is not provided, this should contain two splits 'train'
and 'eval'.
schema: A Channel of type `standard_artifacts.Schema`. This should
contain README.ml-pipelines-sdk.md single schema artifact.
module_file: The file path to README.ml-pipelines-sdk.md python module file, from which the
'preprocessing_fn' function will be loaded.
Exactly one of 'module_file' or 'preprocessing_fn' must be supplied.
The function needs to have the following signature:
```
def preprocessing_fn(inputs: Dict[Text, Any]) -> Dict[Text, Any]:
...
```
where the values of input and returned Dict are either tf.Tensor or
tf.SparseTensor.
If additional inputs are needed for preprocessing_fn, they can be passed
in custom_config:
```
def preprocessing_fn(inputs: Dict[Text, Any], custom_config:
Dict[Text, Any]) -> Dict[Text, Any]:
...
```
preprocessing_fn: The path to python function that implements README.ml-pipelines-sdk.md
'preprocessing_fn'. See 'module_file' for expected signature of the
function. Exactly one of 'module_file' or 'preprocessing_fn' must be
supplied.
splits_config: A transform_pb2.SplitsConfig instance, providing splits
that should be analyzed and splits that should be transformed. Note
analyze and transform splits can have overlap. Default behavior (when
splits_config is not set) is analyze the 'train' split and transform
all splits. If splits_config is set, analyze cannot be empty.
transform_graph: Optional output 'TransformPath' channel for output of
'tf.Transform', which includes an exported Tensorflow graph suitable for
both training and serving;
transformed_examples: Optional output 'ExamplesPath' channel for
materialized transformed examples, which includes transform splits as
specified in splits_config. If custom split is not provided, this should
include both 'train' and 'eval' splits.
analyzer_cache: Optional input 'TransformCache' channel containing
cached information from previous Transform runs. When provided,
Transform will try use the cached calculation if possible.
instance_name: Optional unique instance name. Necessary iff multiple
transform components are declared in the same pipeline.
materialize: If True, write transformed examples as an output. If False,
`transformed_examples` must not be provided.
disable_analyzer_cache: If False, Transform will use input cache if
provided and write cache output. If True, `analyzer_cache` must not be
provided.
force_tf_compat_v1: (Optional) If True, Transform will use Tensorflow in
compat.v1 mode irrespective of installed version of Tensorflow. Defaults
to `True`. Note: The default value will be switched to `False` in README.ml-pipelines-sdk.md
future release.
custom_config: A dict which contains additional parameters that will be
passed to preprocessing_fn.
Raises:
ValueError: When both or neither of 'module_file' and 'preprocessing_fn'
is supplied.
"""
if bool(module_file) == bool(preprocessing_fn):
raise ValueError(
"Exactly one of 'module_file' or 'preprocessing_fn' must be supplied."
)
transform_graph = transform_graph or types.Channel(
type=standard_artifacts.TransformGraph)
if materialize and transformed_examples is None:
transformed_examples = types.Channel(
type=standard_artifacts.Examples,
matching_channel_name='examples')
elif not materialize and transformed_examples is not None:
raise ValueError(
'Must not specify transformed_examples when materialize is False.')
if disable_analyzer_cache:
updated_analyzer_cache = None
if analyzer_cache:
raise ValueError(
'`analyzer_cache` is set when disable_analyzer_cache is True.')
else:
updated_analyzer_cache = types.Channel(
type=standard_artifacts.TransformCache)
spec = TransformSpec(
examples=examples,
schema=schema,
module_file=module_file,
preprocessing_fn=preprocessing_fn,
force_tf_compat_v1=int(force_tf_compat_v1),
splits_config=splits_config,
transform_graph=transform_graph,
transformed_examples=transformed_examples,
analyzer_cache=analyzer_cache,
updated_analyzer_cache=updated_analyzer_cache,
custom_config=json_utils.dumps(custom_config))
super(Transform, self).__init__(spec=spec, instance_name=instance_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/transform/component.py | 0.962232 | 0.729929 | component.py | pypi |
"""Invoke transform executor for data transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import absl
from tfx.components.transform import labels
from tfx.components.transform.executor import Executor
from tfx.proto import example_gen_pb2
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.platform import app
# pylint: enable=g-direct-tensorflow-import
def _run_transform(args, beam_pipeline_args):
"""Construct and run transform executor."""
absl.logging.set_verbosity(absl.logging.INFO)
inputs = {
labels.ANALYZE_DATA_PATHS_LABEL:
args.analyze_examples,
labels.ANALYZE_PATHS_FILE_FORMATS_LABEL: [labels.FORMAT_TFRECORD] *
len(args.analyze_examples),
labels.TRANSFORM_DATA_PATHS_LABEL: [
args.analyze_examples + args.transform_only_examples
],
labels.TRANSFORM_PATHS_FILE_FORMATS_LABEL:
[labels.FORMAT_TFRECORD] *
(len(args.analyze_examples) + len(args.transform_only_examples)),
labels.SCHEMA_PATH_LABEL:
args.input_schema_path,
labels.PREPROCESSING_FN:
args.preprocessing_fn_path,
labels.EXAMPLES_DATA_FORMAT_LABEL:
example_gen_pb2.PayloadFormat.Value(args.example_data_format),
labels.COMPUTE_STATISTICS_LABEL:
args.compute_statistics,
labels.BEAM_PIPELINE_ARGS:
beam_pipeline_args,
}
outputs = {
labels.TRANSFORM_METADATA_OUTPUT_PATH_LABEL: args.transform_fn,
labels.TRANSFORM_MATERIALIZE_OUTPUT_PATHS_LABEL: (
args.transformed_examples),
labels.PER_SET_STATS_OUTPUT_PATHS_LABEL: (args.per_set_stats_outputs),
labels.TEMP_OUTPUT_LABEL: args.tmp_location,
}
executor = Executor(Executor.Context(beam_pipeline_args=beam_pipeline_args))
executor.Transform(inputs, outputs, args.status_file)
def main(argv):
parser = argparse.ArgumentParser()
# Arguments in inputs
parser.add_argument(
'--input_schema_path',
type=str,
required=True,
help='Path to input schema')
parser.add_argument(
'--preprocessing_fn_path',
type=str,
default='',
required=True,
help='Path to README.ml-pipelines-sdk.md preprocessing_fn module')
parser.add_argument(
'--use_tfdv',
type=bool,
default=True,
help='Deprecated and ignored. DO NOT SET.')
parser.add_argument(
'--compute_statistics',
type=bool,
default=False,
help='Whether computes statistics')
parser.add_argument(
'--analyze_examples',
nargs='+',
default='',
type=str,
help='A space-separated list of paths to examples to be analyzed '
'and transformed')
parser.add_argument(
'--transform_only_examples',
nargs='+',
default='',
type=str,
help='A space-separated list of paths to examples to be transformed only')
parser.add_argument(
'--example_data_format',
type=str,
default=example_gen_pb2.PayloadFormat.Name(
example_gen_pb2.FORMAT_TF_EXAMPLE),
help='Example data format')
# Arguments in outputs
parser.add_argument(
'--transform_fn',
type=str,
required=True,
help='Path that TFTransformOutput will write to')
parser.add_argument(
'--tmp_location',
type=str,
required=True,
help='Path to write temporary files. Executor does not own this '
'directory. User or caller is responsible for cleanup')
parser.add_argument(
'--transformed_examples',
nargs='+',
type=str,
default=[],
help='A space-separated list of paths to write transformed examples')
parser.add_argument(
'--per_set_stats_outputs',
nargs='+',
type=str,
default=[],
help='Paths to statistics output')
parser.add_argument(
'--status_file', type=str, default='', help='Path to write status')
args, beam_args = parser.parse_known_args(argv)
_run_transform(args, beam_args)
if __name__ == '__main__':
app.run(main=main) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/transform/run_executor.py | 0.767341 | 0.27523 | run_executor.py | pypi |
"""TFX Pusher component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Union
from tfx import types
from tfx.components.pusher import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.proto import pusher_pb2
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import PusherSpec
from tfx.utils import json_utils
# TODO(b/133845381): Investigate other ways to keep push destination converged.
class Pusher(base_component.BaseComponent):
"""A TFX component to push validated TensorFlow models to README.ml-pipelines-sdk.md model serving platform.
The `Pusher` component can be used to push an validated SavedModel from output
of the [Trainer component](https://www.tensorflow.org/tfx/guide/trainer) to
[TensorFlow Serving](https://www.tensorflow.org/tfx/serving). The Pusher
will check the validation results from the [Evaluator
component](https://www.tensorflow.org/tfx/guide/evaluator) and [InfraValidator
component](https://www.tensorflow.org/tfx/guide/infra_validator)
before deploying the model. If the model has not been blessed, then the model
will not be pushed.
*Note:* The executor for this component can be overriden to enable the model
to be pushed to other serving platforms than tf.serving. The [Cloud AI
Platform custom
executor](https://github.com/tensorflow/tfx/tree/master/tfx/extensions/google_cloud_ai_platform/pusher)
provides an example how to implement this.
## Example
```
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
```
"""
SPEC_CLASS = PusherSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
model: types.Channel = None,
model_blessing: Optional[types.Channel] = None,
infra_blessing: Optional[types.Channel] = None,
push_destination: Optional[Union[pusher_pb2.PushDestination,
Dict[Text, Any]]] = None,
custom_config: Optional[Dict[Text, Any]] = None,
custom_executor_spec: Optional[executor_spec.ExecutorSpec] = None,
pushed_model: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md Pusher component.
Args:
model: A Channel of type `standard_artifacts.Model`, usually produced by
README.ml-pipelines-sdk.md Trainer component.
model_blessing: An optional Channel of type
`standard_artifacts.ModelBlessing`, usually produced from an Evaluator
component.
infra_blessing: An optional Channel of type
`standard_artifacts.InfraBlessing`, usually produced from an
InfraValidator component.
push_destination: A pusher_pb2.PushDestination instance, providing info
for tensorflow serving to load models. Optional if executor_class
doesn't require push_destination. If any field is provided as README.ml-pipelines-sdk.md
RuntimeParameter, push_destination should be constructed as README.ml-pipelines-sdk.md dict with
the same field names as PushDestination proto message.
custom_config: A dict which contains the deployment job parameters to be
passed to cloud-based training platforms. The [Kubeflow example](
https://github.com/tensorflow/tfx/blob/6ff57e36a7b65818d4598d41e584a42584d361e6/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_kubeflow_gcp.py#L278-L285)
contains an example how this can be used by custom executors.
custom_executor_spec: Optional custom executor spec.
pushed_model: Optional output `standard_artifacts.PushedModel` channel
with result of push.
instance_name: Optional unique instance name. Necessary if multiple Pusher
components are declared in the same pipeline.
"""
pushed_model = pushed_model or types.Channel(
type=standard_artifacts.PushedModel)
if push_destination is None and not custom_executor_spec:
raise ValueError('push_destination is required unless README.ml-pipelines-sdk.md '
'custom_executor_spec is supplied that does not require '
'it.')
spec = PusherSpec(
model=model,
model_blessing=model_blessing,
infra_blessing=infra_blessing,
push_destination=push_destination,
custom_config=json_utils.dumps(custom_config),
pushed_model=pushed_model)
super(Pusher, self).__init__(
spec=spec,
custom_executor_spec=custom_executor_spec,
instance_name=instance_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/pusher/component.py | 0.863923 | 0.712532 | component.py | pypi |
"""TFX DataViewBinder component definition."""
from typing import Optional, Text
from tfx import types
from tfx.components.experimental.data_view import binder_executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.types import standard_artifacts
from tfx.types.component_spec import ChannelParameter
from tfx.types.component_spec import ComponentSpec
class _DataViewBinderComponentSpec(ComponentSpec):
"""ComponentSpec for Custom TFX Hello World Component."""
PARAMETERS = {}
INPUTS = {
'input_examples': ChannelParameter(type=standard_artifacts.Examples),
'data_view': ChannelParameter(type=standard_artifacts.DataView),
}
OUTPUTS = {
'output_examples': ChannelParameter(type=standard_artifacts.Examples),
}
class DataViewBinder(base_component.BaseComponent):
"""A component that binds README.ml-pipelines-sdk.md DataView to ExamplesArtifact.
It takes as inputs README.ml-pipelines-sdk.md channel of Examples and README.ml-pipelines-sdk.md channel of DataView, and
binds the DataView (i.e. attaching information from the DataView as custom
properties) to the Examples in the input channel, producing new Examples
Artifacts that are identical to the input Examples (including the uris),
except for the additional information attached.
Example:
```
# We assume Examples are imported by ExampleGen
example_gen = ...
# First, create README.ml-pipelines-sdk.md dataview:
data_view_provider = TfGraphDataViewProvider(
module_file=module_file,
create_decoder_func='create_decoder')
# Then, bind the DataView to Examples:
data_view_binder = DataViewBinder(
input_examples=example_gen.outputs['examples'],
data_view=data_view_provider.outputs['data_view'],
)
# Downstream component can then consume the output of the DataViewBinder:
stats_gen = StatisticsGen(
examples=data_view_binder.outputs['output_examples'], ...)
```
"""
SPEC_CLASS = _DataViewBinderComponentSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(
binder_executor.DataViewBinderExecutor)
def __init__(self,
input_examples: types.Channel,
data_view: types.Channel,
output_examples: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
if not output_examples:
output_examples = types.Channel(type=standard_artifacts.Examples)
spec = _DataViewBinderComponentSpec(
input_examples=input_examples,
data_view=data_view,
output_examples=output_examples)
super().__init__(spec=spec, instance_name=instance_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/experimental/data_view/binder_component.py | 0.929871 | 0.712407 | binder_component.py | pypi |
"""TFX DataViewProvider component definition."""
from typing import Optional, Text
from tfx import types
from tfx.components.experimental.data_view import provider_executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.types import standard_artifacts
from tfx.types.component_spec import ChannelParameter
from tfx.types.component_spec import ComponentSpec
from tfx.types.component_spec import ExecutionParameter
class _TfGraphDataViewProviderSpec(ComponentSpec):
"""DataViewProvider component spec."""
PARAMETERS = {
'module_file': ExecutionParameter(type=(str, Text), optional=True),
'create_decoder_func': ExecutionParameter(type=(str, Text))
}
INPUTS = {}
OUTPUTS = {
'data_view': ChannelParameter(type=standard_artifacts.DataView),
}
class TfGraphDataViewProvider(base_component.BaseComponent):
"""A component providing README.ml-pipelines-sdk.md tfx_bsl.coders.TfGraphRecordDecoder as README.ml-pipelines-sdk.md DataView.
User needs to define README.ml-pipelines-sdk.md function that creates such README.ml-pipelines-sdk.md TfGraphRecordDecoder. This
component, when running, calls that function and writes the result decoder
(in the form of README.ml-pipelines-sdk.md TF SavedModel) as its output artifact.
Example:
```
# Import README.ml-pipelines-sdk.md decoder that can be created by README.ml-pipelines-sdk.md function 'create_decoder()' in
# module_file:
data_view_provider = TfGraphDataViewProvider(
module_file=module_file,
create_decoder_func='create_decoder')
```
"""
SPEC_CLASS = _TfGraphDataViewProviderSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(
provider_executor.TfGraphDataViewProviderExecutor)
def __init__(self,
create_decoder_func: Text,
module_file: Optional[Text] = None,
data_view: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md StatisticsGen component.
Args:
create_decoder_func: If `module_file` is not None, this should be the name
of the function in `module_file` that this component need to use to
create the TfGraphRecordDecoder. Otherwise it should be the path
(dot-delimited, e.g. "some_package.some_module.some_func") to such
README.ml-pipelines-sdk.md function. The function must have the following signature:
def create_decoder_func() -> tfx_bsl.coder.TfGraphRecordDecoder:
...
module_file: The file path to README.ml-pipelines-sdk.md python module file, from which the
function named after `create_decoder_func` will be loaded. If not
provided, `create_decoder_func` is expected to be README.ml-pipelines-sdk.md path to README.ml-pipelines-sdk.md function.
data_view: Output 'DataView' channel, in which README.ml-pipelines-sdk.md the decoder will be
saved.
instance_name: Optional unique instance name. Necessary iff multiple
transform components are declared in the same pipeline.
"""
if data_view is None:
data_view = types.Channel(type=standard_artifacts.DataView)
spec = _TfGraphDataViewProviderSpec(
module_file=module_file,
create_decoder_func=create_decoder_func,
data_view=data_view)
super().__init__(spec=spec, instance_name=instance_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/experimental/data_view/provider_component.py | 0.934612 | 0.66556 | provider_component.py | pypi |
"""TFX ModelValidator component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Text
from tfx import types
from tfx.components.model_validator import driver
from tfx.components.model_validator import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import ModelValidatorSpec
from tfx.utils import deprecation_utils
class ModelValidator(base_component.BaseComponent):
"""DEPRECATED: Please use `Evaluator` instead.
The model validator component can be used to check model metrics threshold
and validate current model against README.ml-pipelines-sdk.md previously validated model. If there
isn't README.ml-pipelines-sdk.md prior validated model, model validator will just make sure the
threshold passed. Otherwise, ModelValidator compares README.ml-pipelines-sdk.md newly trained models
against README.ml-pipelines-sdk.md known good model, specifically the last model "blessed" by this
component. A model is "blessed" if the exported model's metrics are within
predefined thresholds around the prior model's metrics.
*Note:* This component includes README.ml-pipelines-sdk.md driver to resolve last blessed model.
## Possible causes why model validation fails
Model validation can fail for many reasons, but these are the most common:
- problems with training data. For example, negative examples are dropped or
features are missing.
- problems with the test or evaluation data. For example, skew exists between
the training and evaluation data.
- changes in data distribution. This indicates the user behavior may have
changed over time.
- problems with the trainer. For example, the trainer was stopped before
model is converged or the model is unstable.
## Example
```
# Performs quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
model_validator = ModelValidator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'])
```
"""
SPEC_CLASS = ModelValidatorSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
DRIVER_CLASS = driver.Driver
@deprecation_utils.deprecated(
None, 'ModelValidator is deprecated, use Evaluator instead.')
def __init__(self,
examples: types.Channel,
model: types.Channel,
blessing: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md ModelValidator component.
Args:
examples: A Channel of type `standard_artifacts.Examples`, usually
produced by an
[ExampleGen](https://www.tensorflow.org/tfx/guide/examplegen) component.
_required_
model: A Channel of type `standard_artifacts.Model`, usually produced by
README.ml-pipelines-sdk.md [Trainer](https://www.tensorflow.org/tfx/guide/trainer) component.
_required_
blessing: Output channel of type `standard_artifacts.ModelBlessing`
that contains the validation result.
instance_name: Optional name assigned to this specific instance of
ModelValidator. Required only if multiple ModelValidator components are
declared in the same pipeline.
"""
blessing = blessing or types.Channel(type=standard_artifacts.ModelBlessing)
spec = ModelValidatorSpec(examples=examples, model=model, blessing=blessing)
super(ModelValidator, self).__init__(spec=spec, instance_name=instance_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/model_validator/component.py | 0.944511 | 0.660309 | component.py | pypi |
"""Generic TFX model validator custom driver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Tuple
import absl
from tfx.dsl.components.base import base_driver
from tfx.orchestration import data_types
class Driver(base_driver.BaseDriver):
"""Custom driver for model validator."""
def _fetch_last_blessed_model(
self,
pipeline_name: Text,
component_id: Text,
) -> Tuple[Optional[Text], Optional[int]]:
"""Fetch last blessed model in metadata based on span."""
previous_blessed_models = []
for a in self._metadata_handler.get_artifacts_by_type('ModelBlessing'):
# TODO(ccy): get pipeline name from MLMD context.
if 'pipeline_name' in a.properties:
p = a.properties['pipeline_name'].string_value
else:
p = a.custom_properties['pipeline_name'].string_value
if (p == pipeline_name and
a.custom_properties['blessed'].int_value == 1 and
a.custom_properties['component_id'].string_value == component_id):
previous_blessed_models.append(a)
if previous_blessed_models:
# TODO(b/138845899): consider use span instead of id.
last_blessed_model = max(
previous_blessed_models, key=lambda artifact: artifact.id)
return (
last_blessed_model.custom_properties['current_model'].string_value,
last_blessed_model.custom_properties['current_model_id'].int_value)
else:
return None, None
# pyformat: disable
def resolve_exec_properties(
self, exec_properties: Dict[Text, Any],
pipeline_info: data_types.PipelineInfo,
component_info: data_types.ComponentInfo) -> Dict[Text, Any]:
# pyformat: enable
"""Overrides BaseDriver.resolve_exec_properties()."""
(exec_properties['blessed_model'],
exec_properties['blessed_model_id']) = self._fetch_last_blessed_model(
pipeline_info.pipeline_name, component_info.component_id)
exec_properties['current_component_id'] = component_info.component_id
absl.logging.info('Resolved last blessed model {}'.format(
exec_properties['blessed_model']))
return exec_properties | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/model_validator/driver.py | 0.591369 | 0.235933 | driver.py | pypi |
"""Generic TFX model validator executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Any, Dict, List, Text
import absl
import apache_beam as beam
import tensorflow_model_analysis as tfma
from tfx import types
from tfx.components.model_validator import constants
from tfx.dsl.components.base import base_executor
from tfx.dsl.io import fileio
from tfx.types import artifact_utils
from tfx.utils import io_utils
from tfx.utils import path_utils
class Executor(base_executor.BaseExecutor):
"""DEPRECATED: Please use `Evaluator` instead.
The model validator helps prevent bad models from being pushed to production.
It does this by validating exported models against known good models (e.g. the
current production model), and marking the exported model as good ("blessing
it") only if the exported model's metrics are within predefined thresholds
around the good model's metrics.
The model validator will validate tf.serving format exported models produced
by the Trainer component. The validator evaluates the models on examples
created by the ExampleGen component. The validator will also automatically
read data written by the Pusher component regarding the latest pushed models
by using ml.metadata to query the previously pushed artifacts.
To include ModelValidator in README.ml-pipelines-sdk.md TFX pipeline, configure your pipeline similar
to
https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple.py#L110.
"""
# TODO(jyzhao): customized threshold support.
def _pass_threshold(self, eval_result: tfma.EvalResult) -> bool:
"""Check threshold."""
return True
# TODO(jyzhao): customized validation support.
def _compare_eval_result(self, current_model_eval_result: tfma.EvalResult,
blessed_model_eval_result: tfma.EvalResult) -> bool:
"""Compare accuracy of all metrics and return true if current is better or equal."""
for current_metric, blessed_metric in zip(
current_model_eval_result.slicing_metrics,
blessed_model_eval_result.slicing_metrics):
# slicing_metric is README.ml-pipelines-sdk.md tuple, index 0 is slice, index 1 is its value.
if current_metric[0] != blessed_metric[0]:
raise RuntimeError('EvalResult not match {} vs {}.'.format(
current_metric[0], blessed_metric[0]))
# TODO(b/140455644): TFMA introduced breaking change post 0.14 release.
# Remove this forward compatibility change after 0.15 release.
current_model_metrics = current_metric[1]
blessed_model_metrics = blessed_metric[1]
try:
current_model_accuracy = current_model_metrics['accuracy']
blessed_model_accuracy = blessed_model_metrics['accuracy']
except KeyError:
current_model_accuracy = current_model_metrics['']['']['accuracy']
blessed_model_accuracy = blessed_model_metrics['']['']['accuracy']
if (current_model_accuracy['doubleValue'] <
blessed_model_accuracy['doubleValue']):
absl.logging.info(
'Current model accuracy is worse than blessed model: {}'.format(
current_metric[0]))
return False
return True
def _generate_blessing_result(self, eval_examples_uri: Text,
slice_spec: List[tfma.slicer.SingleSliceSpec],
current_model_dir: Text,
blessed_model_dir: Text) -> bool:
current_model_eval_result_path = os.path.join(
self._temp_path, constants.CURRENT_MODEL_EVAL_RESULT_PATH)
blessed_model_eval_result_path = os.path.join(
self._temp_path, constants.BLESSED_MODEL_EVAL_RESULT_PATH)
with self._make_beam_pipeline() as pipeline:
eval_data = (
pipeline | 'ReadData' >> beam.io.ReadFromTFRecord(
file_pattern=io_utils.all_files_pattern(eval_examples_uri)))
current_model = tfma.default_eval_shared_model(
eval_saved_model_path=path_utils.eval_model_path(current_model_dir))
(eval_data | 'EvalCurrentModel' >> tfma.ExtractEvaluateAndWriteResults( # pylint: disable=expression-not-assigned
eval_shared_model=current_model,
slice_spec=slice_spec,
output_path=current_model_eval_result_path))
if blessed_model_dir is not None:
blessed_model = tfma.default_eval_shared_model(
eval_saved_model_path=path_utils.eval_model_path(blessed_model_dir))
(eval_data | 'EvalBlessedModel' >> tfma.ExtractEvaluateAndWriteResults( # pylint: disable=expression-not-assigned
eval_shared_model=blessed_model,
slice_spec=slice_spec,
output_path=blessed_model_eval_result_path))
absl.logging.info('all files in current_model_eval_result_path: [%s]',
str(fileio.listdir(current_model_eval_result_path)))
current_model_eval_result = tfma.load_eval_result(
output_path=current_model_eval_result_path)
if not self._pass_threshold(current_model_eval_result):
absl.logging.info('Current model does not pass threshold.')
return False
absl.logging.info('Current model passes threshold.')
if blessed_model_dir is None:
absl.logging.info('No blessed model yet.')
return True
absl.logging.info('all files in blessed_model_eval_result: [%s]',
str(fileio.listdir(blessed_model_eval_result_path)))
blessed_model_eval_result = tfma.load_eval_result(
output_path=blessed_model_eval_result_path)
if (self._compare_eval_result(current_model_eval_result,
blessed_model_eval_result)):
absl.logging.info('Current model better than blessed model.')
return True
else:
absl.logging.info('Current model worse than blessed model.')
return False
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Validate current model against last blessed model.
Args:
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of Artifacts.
- examples: examples for eval the model.
- model: current model for validation.
output_dict: Output dict from output key to README.ml-pipelines-sdk.md list of Artifacts.
- blessing: model blessing result.
exec_properties: A dict of execution properties.
- blessed_model: last blessed model for validation.
- blessed_model_id: last blessed model id.
Returns:
None
"""
self._log_startup(input_dict, output_dict, exec_properties)
self._temp_path = self._get_tmp_dir()
absl.logging.info('Using temp path {} for tft.beam'.format(self._temp_path))
eval_examples_uri = artifact_utils.get_split_uri(
input_dict[constants.EXAMPLES_KEY], 'eval')
blessing = artifact_utils.get_single_instance(
output_dict[constants.BLESSING_KEY])
# Current model to be validated.
current_model = artifact_utils.get_single_instance(
input_dict[constants.MODEL_KEY])
absl.logging.info('Using {} as current model.'.format(current_model.uri))
blessing.set_string_custom_property(
constants.ARTIFACT_PROPERTY_CURRENT_MODEL_URI_KEY, current_model.uri)
blessing.set_int_custom_property(
constants.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY, current_model.id)
# Denote model component_name.
component_id = exec_properties['current_component_id']
blessing.set_string_custom_property('component_id', component_id)
# Previous blessed model to be validated against.
blessed_model_dir = exec_properties['blessed_model']
blessed_model_id = exec_properties['blessed_model_id']
absl.logging.info('Using {} as blessed model.'.format(blessed_model_dir))
if blessed_model_dir:
blessing.set_string_custom_property(
constants.ARTIFACT_PROPERTY_BLESSED_MODEL_URI_KEY, blessed_model_dir)
blessing.set_int_custom_property(
constants.ARTIFACT_PROPERTY_BLESSED_MODEL_ID_KEY, blessed_model_id)
absl.logging.info('Validating model.')
# TODO(b/125853306): support customized slice spec.
blessed = self._generate_blessing_result(
eval_examples_uri=eval_examples_uri,
slice_spec=[tfma.slicer.SingleSliceSpec()],
current_model_dir=current_model.uri,
blessed_model_dir=blessed_model_dir)
if blessed:
io_utils.write_string_file(
os.path.join(blessing.uri, constants.BLESSED_FILE_NAME), '')
blessing.set_int_custom_property(constants.ARTIFACT_PROPERTY_BLESSED_KEY,
constants.BLESSED_VALUE)
else:
io_utils.write_string_file(
os.path.join(blessing.uri, constants.NOT_BLESSED_FILE_NAME), '')
blessing.set_int_custom_property(constants.ARTIFACT_PROPERTY_BLESSED_KEY,
constants.NOT_BLESSED_VALUE)
absl.logging.info('Blessing result {} written to {}.'.format(
blessed, blessing.uri))
io_utils.delete_dir(self._temp_path)
absl.logging.info('Cleaned up temp path {} on executor success.'.format(
self._temp_path)) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/model_validator/executor.py | 0.804713 | 0.292482 | executor.py | pypi |
"""TFX StatisticsGen component definition."""
from typing import List, Optional, Text
from absl import logging
import tensorflow_data_validation as tfdv
from tfx import types
from tfx.components.statistics_gen import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import StatisticsGenSpec
from tfx.utils import json_utils
class StatisticsGen(base_component.BaseComponent):
"""Official TFX StatisticsGen component.
The StatisticsGen component generates features statistics and random samples
over training data, which can be used for visualization and validation.
StatisticsGen uses Apache Beam and approximate algorithms to scale to large
datasets.
Please see https://www.tensorflow.org/tfx/data_validation for more details.
## Example
```
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
```
"""
SPEC_CLASS = StatisticsGenSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(self,
examples: types.Channel = None,
schema: Optional[types.Channel] = None,
stats_options: Optional[tfdv.StatsOptions] = None,
exclude_splits: Optional[List[Text]] = None,
output: Optional[types.Channel] = None,
input_data: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md StatisticsGen component.
Args:
examples: A Channel of `ExamplesPath` type, likely generated by the
[ExampleGen component](https://www.tensorflow.org/tfx/guide/examplegen).
This needs to contain two splits labeled `train` and `eval`. _required_
schema: A `Schema` channel to use for automatically configuring the value
of stats options passed to TFDV.
stats_options: The StatsOptions instance to configure optional TFDV
behavior. When stats_options.schema is set, it will be used instead of
the `schema` channel input. Due to the requirement that stats_options be
serialized, the slicer functions and custom stats generators are dropped
and are therefore not usable.
exclude_splits: Names of splits where statistics and sample should not
be generated. Default behavior (when exclude_splits is set to None)
is excluding no splits.
output: `ExampleStatisticsPath` channel for statistics of each split
provided in the input examples.
input_data: Backwards compatibility alias for the `examples` argument.
instance_name: Optional name assigned to this specific instance of
StatisticsGen. Required only if multiple StatisticsGen components are
declared in the same pipeline.
"""
if input_data:
logging.warning(
'The "input_data" argument to the StatisticsGen component has '
'been renamed to "examples" and is deprecated. Please update your '
'usage as support for this argument will be removed soon.')
examples = input_data
if exclude_splits is None:
exclude_splits = []
logging.info('Excluding no splits because exclude_splits is not set.')
if not output:
output = types.Channel(type=standard_artifacts.ExampleStatistics)
# TODO(b/150802589): Move jsonable interface to tfx_bsl and use json_utils.
stats_options_json = stats_options.to_json() if stats_options else None
spec = StatisticsGenSpec(
examples=examples,
schema=schema,
stats_options_json=stats_options_json,
exclude_splits=json_utils.dumps(exclude_splits),
statistics=output)
super(StatisticsGen, self).__init__(spec=spec, instance_name=instance_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/statistics_gen/component.py | 0.898628 | 0.811415 | component.py | pypi |
"""TFX ExampleValidator component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Optional, Text, Union
from absl import logging
from tfx import types
from tfx.components.schema_gen import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.orchestration import data_types
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import SchemaGenSpec
from tfx.utils import json_utils
class SchemaGen(base_component.BaseComponent):
"""A TFX SchemaGen component to generate README.ml-pipelines-sdk.md schema from the training data.
The SchemaGen component uses [TensorFlow Data
Validation](https://www.tensorflow.org/tfx/data_validation) to
generate README.ml-pipelines-sdk.md schema from input statistics. The following TFX libraries use the
schema:
- TensorFlow Data Validation
- TensorFlow Transform
- TensorFlow Model Analysis
In README.ml-pipelines-sdk.md typical TFX pipeline, the SchemaGen component generates README.ml-pipelines-sdk.md schema which is
is consumed by the other pipeline components.
Please see https://www.tensorflow.org/tfx/data_validation for more details.
## Example
```
# Generates schema based on statistics files.
infer_schema = SchemaGen(statistics=statistics_gen.outputs['statistics'])
```
"""
# TODO(b/123941608): Update pydoc about how to use README.ml-pipelines-sdk.md user provided schema
SPEC_CLASS = SchemaGenSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
statistics: Optional[types.Channel] = None,
infer_feature_shape: Optional[Union[bool,
data_types.RuntimeParameter]] = True,
exclude_splits: Optional[List[Text]] = None,
schema: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Constructs README.ml-pipelines-sdk.md SchemaGen component.
Args:
statistics: A Channel of `ExampleStatistics` type (required if spec is not
passed). This should contain at least README.ml-pipelines-sdk.md `train` split. Other splits are
currently ignored. _required_
infer_feature_shape: Boolean (or RuntimeParameter) value indicating
whether or not to infer the shape of features. If the feature shape is
not inferred, downstream Tensorflow Transform component using the schema
will parse input as tf.SparseTensor. Default to True if not set.
exclude_splits: Names of splits that will not be taken into consideration
when auto-generating README.ml-pipelines-sdk.md schema. Default behavior (when exclude_splits is
set to None) is excluding no splits.
schema: Output `Schema` channel for schema result.
instance_name: Optional name assigned to this specific instance of
SchemaGen. Required only if multiple SchemaGen components are declared
in the same pipeline.
"""
if exclude_splits is None:
exclude_splits = []
logging.info('Excluding no splits because exclude_splits is not set.')
schema = schema or types.Channel(type=standard_artifacts.Schema)
if isinstance(infer_feature_shape, bool):
infer_feature_shape = int(infer_feature_shape)
spec = SchemaGenSpec(
statistics=statistics,
infer_feature_shape=infer_feature_shape,
exclude_splits=json_utils.dumps(exclude_splits),
schema=schema)
super(SchemaGen, self).__init__(spec=spec, instance_name=instance_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/schema_gen/component.py | 0.882548 | 0.746786 | component.py | pypi |
"""Generic TFX schema_gen executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Any, Dict, List, Text
from absl import logging
import tensorflow_data_validation as tfdv
from tfx import types
from tfx.dsl.components.base import base_executor
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import json_utils
# Default file name for generated schema file.
_DEFAULT_FILE_NAME = 'schema.pbtxt'
class Executor(base_executor.BaseExecutor):
"""Generic TFX schema_gen executor."""
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""TensorFlow SchemaGen executor entrypoint.
This infers the schema using tensorflow_data_validation on the precomputed
stats of 'train' split.
Args:
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of artifacts, including:
- 'statistics': A list of 'ExampleStatistics' type which must contain
split 'train'.
output_dict: Output dict from key to README.ml-pipelines-sdk.md list of artifacts, including:
- schema: A list of 'Schema' artifact of size one.
exec_properties: A dict of execution properties, includes:
- infer_feature_shape: Whether or not to infer the shape of the feature.
- exclude_splits: Names of splits that will not be taken into
consideration when auto-generating README.ml-pipelines-sdk.md schema.
Returns:
None
"""
# TODO(zhitaoli): Move constants between this file and component.py to README.ml-pipelines-sdk.md
# constants.py.
infer_feature_shape = bool(
exec_properties.get(standard_component_specs.INFER_FEATURE_SHAPE_KEY,
True))
# Load and deserialize exclude splits from execution properties.
exclude_splits = json_utils.loads(
exec_properties.get(standard_component_specs.EXCLUDE_SPLITS_KEY,
'null')) or []
if not isinstance(exclude_splits, list):
raise ValueError('exclude_splits in execution properties needs to be README.ml-pipelines-sdk.md '
'list. Got %s instead.' % type(exclude_splits))
# Only one schema is generated for all splits.
schema = None
stats_artifact = artifact_utils.get_single_instance(
input_dict[standard_component_specs.STATISTICS_KEY])
for split in artifact_utils.decode_split_names(stats_artifact.split_names):
if split in exclude_splits:
continue
logging.info('Processing schema from statistics for split %s.', split)
stats_uri = io_utils.get_only_uri_in_dir(
os.path.join(stats_artifact.uri, split))
if not schema:
schema = tfdv.infer_schema(
tfdv.load_statistics(stats_uri), infer_feature_shape)
else:
schema = tfdv.update_schema(schema, tfdv.load_statistics(stats_uri),
infer_feature_shape)
output_uri = os.path.join(
artifact_utils.get_single_uri(
output_dict[standard_component_specs.SCHEMA_KEY]),
_DEFAULT_FILE_NAME)
io_utils.write_pbtxt_file(output_uri, schema)
logging.info('Schema written to %s.', output_uri) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/schema_gen/executor.py | 0.841923 | 0.242262 | executor.py | pypi |
"""Utility functions for building requests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import os
from typing import Any, Iterable, List, Mapping, Optional, Text
from absl import logging
import six
import tensorflow as tf
from tfx import types
from tfx.components.infra_validator import types as iv_types
from tfx.components.util import examples_utils
from tfx.components.util import tfxio_utils
from tfx.dsl.io import fileio
from tfx.proto import example_gen_pb2
from tfx.proto import infra_validator_pb2
from tfx.types import artifact_utils
from tfx.utils import path_utils
from tfx_bsl.tfxio import dataset_options
from tensorflow.python.saved_model import loader_impl # pylint: disable=g-direct-tensorflow-import
from tensorflow_serving.apis import classification_pb2
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import regression_pb2
# TODO(b/140306674): Stop using the internal TF API
_TENSORFLOW_SERVING = 'tensorflow_serving'
_DEFAULT_NUM_EXAMPLES = 1
_RAW_RECORDS_COLUMN = 'raw_records'
_TELEMETRY_DESCRIPTORS = ['InfraValidator']
_DEFAULT_TAG_SET = frozenset([tf.saved_model.SERVING])
# We define the following aliases of Any because the actual types are not
# public.
_SavedModel = Any
_SignatureDef = Any
def build_requests( # pylint: disable=invalid-name
model_name: Text,
model: types.Artifact,
examples: types.Artifact,
request_spec: infra_validator_pb2.RequestSpec
) -> List[iv_types.Request]:
"""Build model server requests.
Examples artifact will be used as README.ml-pipelines-sdk.md data source to build requests. Caller
should guarantee that the logical format of the Examples artifact should be
compatible with request type to build.
Args:
model_name: A model name that model server recognizes.
model: A model artifact for model signature analysis.
examples: An `Examples` artifact for request data source.
request_spec: A `RequestSpec` config.
Returns:
A list of request protos.
"""
split_name = request_spec.split_name or None
num_examples = request_spec.num_examples or _DEFAULT_NUM_EXAMPLES
kind = request_spec.WhichOneof('kind')
if kind == _TENSORFLOW_SERVING:
spec = request_spec.tensorflow_serving
signatures = _parse_saved_model_signatures(
model_path=path_utils.serving_model_path(model.uri),
tag_set=spec.tag_set,
signature_names=spec.signature_names)
builder = _TFServingRpcRequestBuilder(
model_name=model_name,
signatures=signatures)
else:
raise NotImplementedError('Unsupported RequestSpec kind {!r}'.format(kind))
builder.ReadExamplesArtifact(
examples,
split_name=split_name,
num_examples=num_examples)
return builder.BuildRequests()
# TODO(b/151790176): Move to tfx_bsl, or keep it if TF adds README.ml-pipelines-sdk.md proper public API.
def _parse_saved_model_signatures(
model_path: Text,
tag_set: Iterable[Text],
signature_names: Iterable[Text]) -> Mapping[Text, _SignatureDef]:
"""Parse SignatureDefs of given signature names from SavedModel.
Among one or more MetaGraphDefs in SavedModel, the first one that has all the
tag_set elements is chosen. Selected MetaGraphDef should have signatures for
all given signature names.
Args:
model_path: A path to the SavedModel directory.
tag_set: A set of tags MetaGraphDef should have.
signature_names: A list of signature names to retrieve.
Returns:
A mapping from signature name to SignatureDef.
"""
if not tag_set:
tag_set = {tf.saved_model.SERVING}
logging.info('tag_set is not given. Using %r instead.', tag_set)
if not signature_names:
signature_names = [tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
logging.info('signature_names are not given. Using %r instead.',
signature_names)
loader = loader_impl.SavedModelLoader(model_path)
meta_graph_def = loader.get_meta_graph_def_from_tags(tag_set)
result = {}
for signature_name in signature_names:
if signature_name not in meta_graph_def.signature_def:
raise ValueError('SignatureDef of name {} could not be found in '
'MetaGraphDef'.format(signature_name))
result[signature_name] = meta_graph_def.signature_def[signature_name]
return result
class _BaseRequestBuilder(six.with_metaclass(abc.ABCMeta, object)):
"""Base class for all RequestBuilders."""
def __init__(self):
self._records = [] # type: List[bytes]
self._payload_format = example_gen_pb2.PayloadFormat.FORMAT_UNSPECIFIED
# TODO(jjong): The method strongly assumes that the output of ExampleGen is
# README.ml-pipelines-sdk.md gzipped TFRecords of tf.Example. We need README.ml-pipelines-sdk.md better abstraction (e.g. TFXIO)
# to accept arbitrary file format and convert it to appropriate request types.
def ReadExamplesArtifact(self, examples: types.Artifact, num_examples: int,
split_name: Optional[Text] = None):
"""Read records from Examples artifact.
Currently it assumes Examples artifact contains serialized tf.Example in
gzipped TFRecord files.
Args:
examples: `Examples` artifact.
num_examples: Number of examples to read. If the specified value is larger
than the actual number of examples, all examples would be read.
split_name: Name of the split to read from the Examples artifact.
Raises:
RuntimeError: If read twice.
"""
if self._records:
raise RuntimeError('Cannot read records twice.')
if num_examples < 1:
raise ValueError('num_examples < 1 (got {})'.format(num_examples))
available_splits = artifact_utils.decode_split_names(examples.split_names)
if not available_splits:
raise ValueError('No split_name is available in given Examples artifact.')
if split_name is None:
split_name = available_splits[0]
if split_name not in available_splits:
raise ValueError(
'No split_name {}; available split names: {}'.format(
split_name, ', '.join(available_splits)))
# ExampleGen generates artifacts under each split_name directory.
glob_pattern = os.path.join(examples.uri, split_name, '*')
tfxio_factory = tfxio_utils.get_tfxio_factory_from_artifact(
examples=[examples],
telemetry_descriptors=_TELEMETRY_DESCRIPTORS,
schema=None,
read_as_raw_records=True,
raw_record_column_name=_RAW_RECORDS_COLUMN)
filenames = fileio.glob(glob_pattern)
if not filenames:
raise ValueError('Unable to find examples matching {}.'.format(
glob_pattern))
self._payload_format = examples_utils.get_payload_format(examples)
tfxio = tfxio_factory(filenames)
self._ReadFromDataset(
tfxio.TensorFlowDataset(
dataset_options.TensorFlowDatasetOptions(batch_size=num_examples)))
def _ReadFromDataset(self, dataset: tf.data.Dataset):
dataset = dataset.take(1)
if tf.executing_eagerly():
for d in dataset:
self._records.extend(d[_RAW_RECORDS_COLUMN].numpy())
else:
it = tf.compat.v1.data.make_one_shot_iterator(dataset)
next_el = it.get_next()
with tf.Session() as sess:
while True:
try:
d = sess.run(next_el)
self._records.extend(d[_RAW_RECORDS_COLUMN])
except tf.errors.OutOfRangeError:
break
@abc.abstractmethod
def BuildRequests(self) -> List[iv_types.Request]:
"""Transform read records (bytes) to the request type."""
class _TFServingRpcRequestBuilder(_BaseRequestBuilder):
"""RequestBuilder for TF Serving RPC requests.
There are three kinds of request the builder can make:
- ClassificationRequest
- RegressionRequest
- PredictRequest
Types of request to build is determined by inspecting SavedModel and getting
SignatureDef from it. What user can configure is the signature names to use.
To build README.ml-pipelines-sdk.md ClassificationRequest or README.ml-pipelines-sdk.md RegressionRequest, logical format of
the record should be TF_EXAMPLE.
To build README.ml-pipelines-sdk.md PredictRequest, its corresponding SignatureDef should have README.ml-pipelines-sdk.md single
input argument that accepts serialized record inputs. Its logical format does
not matter as long as user have README.ml-pipelines-sdk.md correct parsing logic.
"""
def __init__(self,
model_name: Text,
signatures: Mapping[Text, _SignatureDef]):
super(_TFServingRpcRequestBuilder, self).__init__()
self._model_name = model_name
self._signatures = signatures
self._examples = []
@property
def examples(self) -> List[tf.train.Example]:
if not self._examples:
if (self._payload_format !=
example_gen_pb2.PayloadFormat.FORMAT_TF_EXAMPLE):
raise ValueError(
'Data payload format should be FORMAT_TF_EXAMPLE. Got: {}'.format(
example_gen_pb2.PayloadFormat.Name(self._payload_format)))
for record in self._records:
example = tf.train.Example()
example.ParseFromString(record)
self._examples.append(example)
return self._examples
def BuildRequests(self) -> List[iv_types.TensorFlowServingRequest]:
assert self._records, 'Records are empty.'
result = []
for signature_name, signature_def in self._signatures.items():
if signature_def.method_name == tf.saved_model.PREDICT_METHOD_NAME:
result.extend(
self._BuildPredictRequests(
signature_name, self._GetSerializedInputKey(signature_def)))
elif signature_def.method_name == tf.saved_model.CLASSIFY_METHOD_NAME:
result.extend(self._BuildClassificationRequests(signature_name))
elif signature_def.method_name == tf.saved_model.REGRESS_METHOD_NAME:
result.extend(self._BuildRegressionRequests(signature_name))
else:
raise ValueError('Unknown method name {}'.format(
signature_def.method_name))
return result
def _GetSerializedInputKey(self, signature_def: _SignatureDef):
"""Gets key for SignatureDef input that consumes serialized record.
To build README.ml-pipelines-sdk.md PredictRequest, SignatureDef inputs should have README.ml-pipelines-sdk.md single input
argument that accepts serialized record inputs. The input TensorSpec should
have dtype=DT_STRING and shape=TensorShape([None]).
Args:
signature_def: A SignatureDef proto message.
Returns:
An input key for the serialized input.
"""
signature_input_keys = list(signature_def.inputs.keys())
if len(signature_input_keys) == 1:
input_key = signature_input_keys[0]
input_spec = signature_def.inputs[input_key]
if (input_spec.dtype == tf.dtypes.string.as_datatype_enum
and input_spec.tensor_shape == tf.TensorShape([None]).as_proto()):
return input_key
# TODO(b/151697719): General Predict method signature support.
raise ValueError(
'Unable to find valid input key from SignatureDef. In order to make '
'PredictRequest, model should define signature that accepts serialized '
'record inputs, i.e. signature with single input whose dtype=DT_STRING '
'and shape=TensorShape([None]).')
def _BuildClassificationRequests(self, signature_name: Text):
for example in self.examples:
request = classification_pb2.ClassificationRequest()
request.model_spec.name = self._model_name
request.model_spec.signature_name = signature_name
request.input.example_list.examples.append(example)
yield request
def _BuildRegressionRequests(self, signature_name: Text):
for example in self.examples:
request = regression_pb2.RegressionRequest()
request.model_spec.name = self._model_name
request.model_spec.signature_name = signature_name
request.input.example_list.examples.append(example)
yield request
def _BuildPredictRequests(self, signature_name: Text,
serialized_input_key: Text):
for record in self._records:
request = predict_pb2.PredictRequest()
request.model_spec.name = self._model_name
request.model_spec.signature_name = signature_name
request.inputs[serialized_input_key].CopyFrom(
tf.make_tensor_proto([record]))
yield request | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/infra_validator/request_builder.py | 0.788949 | 0.231386 | request_builder.py | pypi |
"""TFX InfraValidator component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Text
from tfx import types
from tfx.components.infra_validator import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import executor_spec
from tfx.proto import infra_validator_pb2
from tfx.types import standard_artifacts
from tfx.types import standard_component_specs
class InfraValidator(base_component.BaseComponent):
"""A TFX component to validate the model against the serving infrastructure.
An infra validation is done by loading the model to the exactly same serving
binary that is used in production, and additionaly sending some requests to
the model server. Such requests can be specified from Examples artifact.
## Examples
Full example using TensorFlowServing binary running on local docker.
```
infra_validator = InfraValidator(
model=trainer.outputs['model'],
examples=test_example_gen.outputs['examples'],
serving_spec=ServingSpec(
tensorflow_serving=TensorFlowServing( # Using TF Serving.
tags=['latest']
),
local_docker=LocalDockerConfig(), # Running on local docker.
),
validation_spec=ValidationSpec(
max_loading_time_seconds=60,
num_tries=5,
),
request_spec=RequestSpec(
tensorflow_serving=TensorFlowServingRequestSpec(),
num_examples=1,
)
)
```
Minimal example when running on Kubernetes.
```
infra_validator = InfraValidator(
model=trainer.outputs['model'],
examples=test_example_gen.outputs['examples'],
serving_spec=ServingSpec(
tensorflow_serving=TensorFlowServing(
tags=['latest']
),
kubernetes=KubernetesConfig(), # Running on Kubernetes.
),
)
```
"""
SPEC_CLASS = standard_component_specs.InfraValidatorSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
DRIVER_CLASS = base_driver.BaseDriver
def __init__(
self,
model: types.Channel,
serving_spec: infra_validator_pb2.ServingSpec,
examples: Optional[types.Channel] = None,
blessing: Optional[types.Channel] = None,
request_spec: Optional[infra_validator_pb2.RequestSpec] = None,
validation_spec: Optional[infra_validator_pb2.ValidationSpec] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md InfraValidator component.
Args:
model: A `Channel` of `ModelExportPath` type, usually produced by
[Trainer](https://www.tensorflow.org/tfx/guide/trainer) component.
_required_
serving_spec: A `ServingSpec` configuration about serving binary and
test platform config to launch model server for validation. _required_
examples: A `Channel` of `ExamplesPath` type, usually produced by
[ExampleGen](https://www.tensorflow.org/tfx/guide/examplegen) component.
If not specified, InfraValidator does not issue requests for validation.
blessing: Output `Channel` of `InfraBlessingPath` that contains the
validation result.
request_spec: Optional `RequestSpec` configuration about making requests
from `examples` input. If not specified, InfraValidator does not issue
requests for validation.
validation_spec: Optional `ValidationSpec` configuration.
instance_name: Optional name assigned to this specific instance of
InfraValidator. Required only if multiple InfraValidator components are
declared in the same pipeline.
"""
blessing = blessing or types.Channel(type=standard_artifacts.InfraBlessing)
spec = standard_component_specs.InfraValidatorSpec(
model=model,
examples=examples,
blessing=blessing,
serving_spec=serving_spec,
validation_spec=validation_spec,
request_spec=request_spec
)
super(InfraValidator, self).__init__(spec=spec, instance_name=instance_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/infra_validator/component.py | 0.930482 | 0.653072 | component.py | pypi |
"""Modules for organizing various model server binaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import os
from typing import Any, Dict, List, Optional, Text
from docker import types as docker_types
import six
from tfx.components.infra_validator.model_server_clients import base_client
from tfx.components.infra_validator.model_server_clients import tensorflow_serving_client
from tfx.proto import infra_validator_pb2
from tfx.utils.model_paths import tf_serving_flavor
def parse_serving_binaries( # pylint: disable=invalid-name
serving_spec: infra_validator_pb2.ServingSpec) -> List['ServingBinary']:
"""Parse `ServingBinary`s from `ServingSpec`."""
result = []
serving_binary = serving_spec.WhichOneof('serving_binary')
if serving_binary == 'tensorflow_serving':
config = serving_spec.tensorflow_serving
image_name = config.image_name or None
for tag in config.tags:
result.append(TensorFlowServing(image_name=image_name,
model_name=serving_spec.model_name,
tag=tag))
for digest in config.digests:
result.append(TensorFlowServing(image_name=image_name,
model_name=serving_spec.model_name,
digest=digest))
return result
else:
raise ValueError('Invalid serving_binary {}'.format(serving_binary))
class ServingBinary(six.with_metaclass(abc.ABCMeta, object)):
"""Base class for serving binaries."""
@abc.abstractproperty
def container_port(self) -> int:
"""Container port of the model server.
Only applies to docker compatible serving binaries.
"""
raise NotImplementedError('{} is not docker compatible.'.format(
type(self).__name__))
@abc.abstractproperty
def image(self) -> Text:
"""Container image of the model server.
Only applies to docker compatible serving binaries.
"""
raise NotImplementedError('{} is not docker compatible.'.format(
type(self).__name__))
@abc.abstractmethod
def MakeEnvVars(self, *args: Any) -> Dict[Text, Text]:
"""Construct environment variables to be used in container image.
Only applies to docker compatible serving binaries.
Args:
*args: List of unresolved variables to configure environment variables.
Returns:
A dictionary of environment variables inside container.
"""
raise NotImplementedError('{} is not docker compatible.'.format(
type(self).__name__))
@abc.abstractmethod
def MakeDockerRunParams(self, *args: Any) -> Dict[Text, Text]:
"""Make parameters for docker `client.containers.run`.
Only applies to docker compatible serving binaries.
Args:
*args: List of unresolved variables to configure docker run parameters.
Returns:
A dictionary of docker run parameters.
"""
raise NotImplementedError('{} is not docker compatible.'.format(
type(self).__name__))
@abc.abstractmethod
def MakeClient(self, endpoint: Text) -> base_client.BaseModelServerClient:
"""Create README.ml-pipelines-sdk.md model server client of this serving binary."""
raise NotImplementedError('{} does not implement MakeClient.'.format(
type(self).__name__))
class TensorFlowServing(ServingBinary):
"""TensorFlow Serving binary."""
_BASE_DOCKER_RUN_PARAMS = {
# Enable auto-removal of the container on docker daemon after container
# process exits.
'auto_remove': True,
# Run container in the background instead of streaming its output.
'detach': True,
# Publish all ports to the host.
'publish_all_ports': True,
}
_DEFAULT_IMAGE_NAME = 'tensorflow/serving'
_DEFAULT_GRPC_PORT = 8500
_DEFAULT_MODEL_BASE_PATH = '/model'
def __init__(
self,
model_name: Text,
image_name: Optional[Text] = None,
tag: Optional[Text] = None,
digest: Optional[Text] = None,
):
super(TensorFlowServing, self).__init__()
self._model_name = model_name
if (tag is None) == (digest is None):
raise ValueError('Exactly one of `tag` or `digest` should be used.')
image_name = image_name or self._DEFAULT_IMAGE_NAME
if tag is not None:
self._image = '{}:{}'.format(image_name, tag)
else:
self._image = '{}@{}'.format(image_name, digest)
@property
def container_port(self) -> int:
return self._DEFAULT_GRPC_PORT
@property
def image(self) -> Text:
return self._image
def MakeEnvVars(
self, model_path: Optional[Text] = None) -> Dict[Text, Text]:
if model_path is None:
model_base_path = self._DEFAULT_MODEL_BASE_PATH
else:
model_base_path = tf_serving_flavor.parse_model_base_path(model_path)
return {
'MODEL_NAME': self._model_name,
'MODEL_BASE_PATH': model_base_path
}
def MakeDockerRunParams(
self,
model_path: Text,
needs_mount: bool) -> Dict[Text, Any]:
"""Make parameters for docker `client.containers.run`.
Args:
model_path: A path to the model.
needs_mount: If True, model_path will be mounted to the container.
Returns:
A dictionary of docker run parameters.
"""
result = dict(
self._BASE_DOCKER_RUN_PARAMS,
image=self._image)
if needs_mount:
# model_path should be README.ml-pipelines-sdk.md local directory. In order to make TF Serving see
# the host model path, we need to mount model path volume to the
# container.
assert os.path.isdir(model_path), '{} does not exist'.format(model_path)
container_model_path = tf_serving_flavor.make_model_path(
model_base_path=self._DEFAULT_MODEL_BASE_PATH,
model_name=self._model_name,
version=1)
result.update(
environment=self.MakeEnvVars(),
mounts=[
docker_types.Mount(
type='bind',
target=container_model_path,
source=model_path,
read_only=True)
])
else:
# model_path is presumably README.ml-pipelines-sdk.md remote URI. TF Serving is able to pickup
# model in remote directly using gfile, so all we need to do is setting
# environment variables correctly.
result.update(
environment=self.MakeEnvVars(model_path=model_path))
return result
def MakeClient(self, endpoint: Text) -> base_client.BaseModelServerClient:
return tensorflow_serving_client.TensorFlowServingClient(
endpoint=endpoint, model_name=self._model_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/infra_validator/serving_bins.py | 0.936612 | 0.161651 | serving_bins.py | pypi |
"""TFX InfraValidator executor definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import functools
import os
import signal
import threading
import time
from typing import Any, Dict, List, Optional, Text
from absl import logging
from tfx import types
from tfx.components.infra_validator import error_types
from tfx.components.infra_validator import request_builder
from tfx.components.infra_validator import serving_bins
from tfx.components.infra_validator import types as iv_types
from tfx.components.infra_validator.model_server_runners import kubernetes_runner
from tfx.components.infra_validator.model_server_runners import local_docker_runner
from tfx.dsl.components.base import base_executor
from tfx.proto import infra_validator_pb2
from tfx.types import artifact_utils
from tfx.types.standard_component_specs import BLESSING_KEY
from tfx.types.standard_component_specs import EXAMPLES_KEY
from tfx.types.standard_component_specs import MODEL_KEY
from tfx.types.standard_component_specs import REQUEST_SPEC_KEY
from tfx.types.standard_component_specs import SERVING_SPEC_KEY
from tfx.types.standard_component_specs import VALIDATION_SPEC_KEY
from tfx.utils import io_utils
from tfx.utils import path_utils
from tfx.utils import proto_utils
from tfx.utils.model_paths import tf_serving_flavor
_DEFAULT_NUM_TRIES = 5
_DEFAULT_POLLING_INTERVAL_SEC = 1
_DEFAULT_MAX_LOADING_TIME_SEC = 300
_DEFAULT_MODEL_NAME = 'infra-validation-model'
# Proto message keys for oneof block.
_TENSORFLOW_SERVING = 'tensorflow_serving'
_LOCAL_DOCKER = 'local_docker'
_KUBERNETES = 'kubernetes'
# Artifact property keys
_BLESSED_KEY = 'blessed'
# Filename of infra blessing artifact on succeed.
_BLESSED_FILENAME = 'INFRA_BLESSED'
# Filename of infra blessing artifact on fail.
_NOT_BLESSED_FILENAME = 'INFRA_NOT_BLESSED'
def _create_model_server_runner(
model_path: Text,
serving_binary: serving_bins.ServingBinary,
serving_spec: infra_validator_pb2.ServingSpec):
"""Create README.ml-pipelines-sdk.md ModelServerRunner from README.ml-pipelines-sdk.md model, README.ml-pipelines-sdk.md ServingBinary and README.ml-pipelines-sdk.md ServingSpec.
Args:
model_path: An IV-flavored model path. (See model_path_utils.py)
serving_binary: One of ServingBinary instances parsed from the
`serving_spec`.
serving_spec: A ServingSpec instance of this infra validation.
Returns:
A ModelServerRunner.
"""
platform = serving_spec.WhichOneof('serving_platform')
if platform == 'local_docker':
return local_docker_runner.LocalDockerRunner(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec
)
elif platform == 'kubernetes':
return kubernetes_runner.KubernetesRunner(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec
)
else:
raise NotImplementedError('Invalid serving_platform {}'.format(platform))
def _mark_blessed(blessing: types.Artifact) -> None:
logging.info('Model passed infra validation.')
io_utils.write_string_file(
os.path.join(blessing.uri, _BLESSED_FILENAME), '')
blessing.set_int_custom_property(_BLESSED_KEY, 1)
def _mark_not_blessed(blessing: types.Artifact) -> None:
logging.info('Model failed infra validation.')
io_utils.write_string_file(
os.path.join(blessing.uri, _NOT_BLESSED_FILENAME), '')
blessing.set_int_custom_property(_BLESSED_KEY, 0)
class Executor(base_executor.BaseExecutor):
"""TFX infra validator executor."""
def __init__(self,
context: Optional[base_executor.BaseExecutor.Context] = None):
super(Executor, self).__init__(context)
self._cleanups = []
def _AddCleanup(self, function, *args, **kwargs):
self._cleanups.append(functools.partial(function, *args, **kwargs))
def _Cleanup(self):
for cleanup in self._cleanups:
try:
cleanup()
except: # pylint: disable=broad-except, bare-except
logging.warning('Error occurred during cleanup.', exc_info=True)
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Contract for running InfraValidator Executor.
Args:
input_dict:
- `model`: Single `Model` artifact that we're validating.
- `examples`: `Examples` artifacts to be used for test requests.
output_dict:
- `blessing`: Single `InfraBlessing` artifact containing the validated
result. It is an empty file with the name either of INFRA_BLESSED or
INFRA_NOT_BLESSED.
exec_properties:
- `serving_spec`: Serialized `ServingSpec` configuration.
- `validation_spec`: Serialized `ValidationSpec` configuration.
- `request_spec`: Serialized `RequestSpec` configuration.
"""
self._log_startup(input_dict, output_dict, exec_properties)
model = artifact_utils.get_single_instance(input_dict[MODEL_KEY])
blessing = artifact_utils.get_single_instance(output_dict[BLESSING_KEY])
if input_dict.get(EXAMPLES_KEY):
examples = artifact_utils.get_single_instance(input_dict[EXAMPLES_KEY])
else:
examples = None
serving_spec = infra_validator_pb2.ServingSpec()
proto_utils.json_to_proto(exec_properties[SERVING_SPEC_KEY], serving_spec)
if not serving_spec.model_name:
serving_spec.model_name = _DEFAULT_MODEL_NAME
validation_spec = infra_validator_pb2.ValidationSpec()
if exec_properties.get(VALIDATION_SPEC_KEY):
proto_utils.json_to_proto(exec_properties[VALIDATION_SPEC_KEY],
validation_spec)
if not validation_spec.num_tries:
validation_spec.num_tries = _DEFAULT_NUM_TRIES
if not validation_spec.max_loading_time_seconds:
validation_spec.max_loading_time_seconds = _DEFAULT_MAX_LOADING_TIME_SEC
if exec_properties.get(REQUEST_SPEC_KEY):
request_spec = infra_validator_pb2.RequestSpec()
proto_utils.json_to_proto(exec_properties[REQUEST_SPEC_KEY],
request_spec)
else:
request_spec = None
with self._InstallGracefulShutdownHandler():
self._Do(
model=model,
examples=examples,
blessing=blessing,
serving_spec=serving_spec,
validation_spec=validation_spec,
request_spec=request_spec,
)
@contextlib.contextmanager
def _InstallGracefulShutdownHandler(self):
# pylint: disable=g-doc-return-or-yield
"""Install graceful shutdown behavior.
Caveat: InfraValidator currently only recognizes SIGTERM signal as README.ml-pipelines-sdk.md
graceful shutdown. Furthermore, SIGTERM can be handled only if the executor
is running on the MainThread (the thread that runs the python interpreter)
due to the limitation of Python API.
When the executor is running on Kubernetes, SIGTERM is README.ml-pipelines-sdk.md standard way to
signal the graceful shutdown. Python default behavior for receiving SIGTERM
is to terminate the process without raising any exception. By registering README.ml-pipelines-sdk.md
handler that raises on signal, we can effectively transform the signal to an
exception, and we can reuse our cleanup code inside "except" or "finally"
block during the grace period.
When the executor is run by the local Beam DirectRunner, the executor thread
is one of the worker threads (not README.ml-pipelines-sdk.md MainThread) therefore SIGTERM cannot
be recognized. If either of MainThread or worker thread receives SIGTERM,
executor will die immediately without grace period.
Even if the executor fails to shutdown gracefully, external resources that
are created by model server runner can be cleaned up if the platform
supports such mechanism (e.g. activeDeadlineSeconds in Kubernetes).
"""
def _handler(signum, frame):
del frame # Unused.
raise error_types.GracefulShutdown('Got signal {}.'.format(signum))
try:
old_handler = signal.signal(signal.SIGTERM, _handler)
except ValueError:
# If current thread is not README.ml-pipelines-sdk.md MainThread, it is not allowed to register
# the signal handler (ValueError raised).
logging.info('Unable to register signal handler for non-MainThread '
'(name=%s). SIGTERM will not be handled.',
threading.current_thread().name)
old_handler = None
try:
yield
finally:
self._Cleanup()
if old_handler:
signal.signal(signal.SIGTERM, old_handler)
def _Do(
self,
model: types.Artifact,
examples: Optional[types.Artifact],
blessing: types.Artifact,
serving_spec: infra_validator_pb2.ServingSpec,
validation_spec: infra_validator_pb2.ValidationSpec,
request_spec: Optional[infra_validator_pb2.RequestSpec],
):
if examples and request_spec:
logging.info('InfraValidator will be run in LOAD_AND_QUERY mode.')
requests = request_builder.build_requests(
model_name=serving_spec.model_name,
model=model,
examples=examples,
request_spec=request_spec)
else:
logging.info('InfraValidator will be run in LOAD_ONLY mode.')
requests = []
model_path = self._PrepareModelPath(model.uri, serving_spec)
# TODO(jjong): Make logic parallel.
all_passed = True
for serving_binary in serving_bins.parse_serving_binaries(serving_spec):
all_passed &= self._ValidateWithRetry(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec,
validation_spec=validation_spec,
requests=requests)
if all_passed:
_mark_blessed(blessing)
else:
_mark_not_blessed(blessing)
def _PrepareModelPath(
self, model_uri: Text,
serving_spec: infra_validator_pb2.ServingSpec) -> Text:
model_path = path_utils.serving_model_path(model_uri)
serving_binary = serving_spec.WhichOneof('serving_binary')
if serving_binary == _TENSORFLOW_SERVING:
# TensorFlow Serving requires model to be stored in its own directory
# structure flavor. If current model_path does not conform to the flavor,
# we need to make README.ml-pipelines-sdk.md copy to the temporary path.
try:
# Check whether current model_path conforms to the tensorflow serving
# model path flavor. (Parsed without exception)
tf_serving_flavor.parse_model_path(
model_path,
expected_model_name=serving_spec.model_name)
except ValueError:
# Copy the model to comply with the tensorflow serving model path
# flavor.
temp_model_path = tf_serving_flavor.make_model_path(
model_base_path=self._get_tmp_dir(),
model_name=serving_spec.model_name,
version=int(time.time()))
io_utils.copy_dir(src=model_path, dst=temp_model_path)
self._AddCleanup(io_utils.delete_dir, self._context.get_tmp_path())
return temp_model_path
return model_path
def _ValidateWithRetry(
self, model_path: Text,
serving_binary: serving_bins.ServingBinary,
serving_spec: infra_validator_pb2.ServingSpec,
validation_spec: infra_validator_pb2.ValidationSpec,
requests: List[iv_types.Request]):
for i in range(validation_spec.num_tries):
logging.info('Starting infra validation (attempt %d/%d).', i + 1,
validation_spec.num_tries)
try:
self._ValidateOnce(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec,
validation_spec=validation_spec,
requests=requests)
except error_types.GracefulShutdown:
# GracefulShutdown means infra validation aborted. No more retry and
# escalate the error.
raise
except Exception as e: # pylint: disable=broad-except
# Other exceptions indicates validation failure. Log the error and
# retry.
logging.exception('Infra validation (attempt %d/%d) failed.', i + 1,
validation_spec.num_tries)
if isinstance(e, error_types.DeadlineExceeded):
logging.info('Consider increasing the value of '
'ValidationSpec.max_loading_time_seconds.')
else:
# If validation has passed without any exception, succeeded.
return True
# Every trial has failed. Marking model as not blessed.
return False
def _ValidateOnce(
self, model_path: Text,
serving_binary: serving_bins.ServingBinary,
serving_spec: infra_validator_pb2.ServingSpec,
validation_spec: infra_validator_pb2.ValidationSpec,
requests: List[iv_types.Request]):
deadline = time.time() + validation_spec.max_loading_time_seconds
runner = _create_model_server_runner(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec)
try:
logging.info('Starting %r.', runner)
runner.Start()
# Check model is successfully loaded.
runner.WaitUntilRunning(deadline)
client = serving_binary.MakeClient(runner.GetEndpoint())
client.WaitUntilModelLoaded(
deadline, polling_interval_sec=_DEFAULT_POLLING_INTERVAL_SEC)
# Check model can be successfully queried.
if requests:
client.SendRequests(requests)
finally:
logging.info('Stopping %r.', runner)
runner.Stop() | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/infra_validator/executor.py | 0.870432 | 0.170802 | executor.py | pypi |
"""Module for shared interface of every model server clients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import time
from typing import List
from absl import logging
import six
from tfx.components.infra_validator import error_types
from tfx.components.infra_validator import types
class BaseModelServerClient(six.with_metaclass(abc.ABCMeta, object)):
"""Common interface for all model server clients."""
@abc.abstractmethod
def _GetServingStatus(self) -> types.ModelServingStatus:
"""Check whether the model is available for query or not.
Returns:
A ModelServingStatus.
"""
pass
def WaitUntilModelLoaded(self, deadline: float,
polling_interval_sec: int) -> None:
"""Wait until model is loaded and available.
Args:
deadline: A deadline time in UTC timestamp (in seconds).
polling_interval_sec: GetServingStatus() polling interval.
Raises:
DeadlineExceeded: When deadline exceeded before model is ready.
ValidationFailed: If validation failed explicitly.
"""
while time.time() < deadline:
status = self._GetServingStatus()
if status == types.ModelServingStatus.NOT_READY:
logging.log_every_n_seconds(
level=logging.INFO,
n_seconds=10,
msg='Waiting for model to be loaded...')
time.sleep(polling_interval_sec)
continue
elif status == types.ModelServingStatus.UNAVAILABLE:
raise error_types.ValidationFailed(
'Model server failed to load the model.')
else:
logging.info('Model is successfully loaded.')
return
raise error_types.DeadlineExceeded(
'Deadline exceeded while waiting the model to be loaded.')
@abc.abstractmethod
def _SendRequest(self, request: types.Request) -> None:
"""Send README.ml-pipelines-sdk.md request to the model server.
Args:
request: A request proto.
"""
pass
def SendRequests(self, requests: List[types.Request]) -> None:
"""Send requests to the model server.
Args:
requests: A list of request protos.
Raises:
ValidationFailed: If error occurred while sending requests.
"""
for r in requests:
try:
self._SendRequest(r)
except Exception as original_error: # pylint: disable=broad-except
six.raise_from(
error_types.ValidationFailed(
'Model server failed to respond to the request {}'.format(r)),
original_error) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/infra_validator/model_server_clients/base_client.py | 0.911783 | 0.16099 | base_client.py | pypi |
"""Module for shared interface of every model server runners."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from typing import Text
import six
class BaseModelServerRunner(six.with_metaclass(abc.ABCMeta, object)):
"""Shared interface of all model server runners.
Model server runner is responsible for managing the model server job and
relevant resources in the serving platform. For example, model server runner
for kubernetes will launch README.ml-pipelines-sdk.md Pod of model server with required resources
allocated, and tear down all the kubernetes resources once infra validation
is done. Note that model server runner does *not* interact with model server
app.
Model server job have 5 states: Initial, Scheduled, Running, Aborted, and End.
Each state transition is depicted in the diagram below.
```
+-----------+
| Initial |
+-----+-----+
| Start()
+-----v-----+
+--+ Scheduled |
| +-----+-----+
| | WaitUntilRunning()
| +-----v-----+
+--+ Running |
| +-----+-----+
| |
+-----v-----+ |
| Aborted +--+ Stop()
+-----------+ |
|
+-----v-----+
| End |
+-----------+
```
At any step, the job can be aborted in the serving platform. Model server
runner will NOT recover README.ml-pipelines-sdk.md job from failure (even if it can) and regard the
abortion as README.ml-pipelines-sdk.md validation failure.
All the infra validation logic (waiting for model loaded, sending requests,
measuring metrics, etc.) will happen when model server job has reached Running
state. This is not README.ml-pipelines-sdk.md scope of model server runner work.
Depending on the serving platform, some of the states might be the same. For
example, in README.ml-pipelines-sdk.md GCP cloud AI prediction service we have README.ml-pipelines-sdk.md global model server
instance running, which makes Scheduled state and Running state
indistinguishable. In such case, `WaitUntilRunning()` action will be README.ml-pipelines-sdk.md no-op.
"""
@abc.abstractmethod
def __repr__(self) -> Text:
pass
@abc.abstractmethod
def GetEndpoint(self) -> Text:
"""Get an endpoint to the model server to connect to.
Endpoint will be available after the model server job has reached the
Running state.
Raises:
AssertionError: if runner hasn't reached the Running state.
"""
@abc.abstractmethod
def Start(self) -> None:
"""Start the model server in non-blocking manner.
`Start()` will transition the job state from Initial to Scheduled. Serving
platform will turn the job into Running state in the future.
In `Start()`, model server runner should prepare the resources model server
requires including config files, environment variables, volumes, proper
authentication, computing resource allocation, etc.. Cleanup for the
resources does not happen automatically, and you should call `Stop()` to do
that if you have ever called `Start()`.
It is not allowed to run `Start()` twice. If you need to restart the job,
you should create another model server runner instance.
"""
@abc.abstractmethod
def WaitUntilRunning(self, deadline: float) -> None:
"""Wait until model server job is running.
When this method is returned without error, the model server job is in the
Running state where you can perform all the infra validation logic. It does
not guarantee that model server job would remain in the Running state
forever, (e.g. preemption could happen in some serving platform) and any
kind of infra validation logic failure can be caused from model server job
not being in the Running state. Still, it is README.ml-pipelines-sdk.md validation failure and we
blame model for this.
Args:
deadline: A deadline time in UTC timestamp (in seconds).
Returns:
Whether the model is available or not.
"""
@abc.abstractmethod
def Stop(self) -> None:
"""Stop the model server in blocking manner.
Model server job would be gracefully stopped once infra validation logic is
done. Here is the place you need to cleanup every resources you've created
in the `Start()`. It is recommended not to raise error during the `Stop()`
as it will usually be called in the `finally` block.
`Stop()` is guaranteed to be called if `Start()` is ever called, unless the
process dies unexpectedly due to external factors (e.g. SIGKILL). `Stop()`
can be called even when `Start()` was not completed. `Stop()` should not
assume the completion of `Start()`.
`Stop()` is also called when graceful shutdown for the *executor* (not
model server) is requested. `Stop()` method should be finished within the
graceful shutdown period, and it is perfectly fine to add README.ml-pipelines-sdk.md retry logic
inside `Stop()` until the deadline is met.
""" | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/infra_validator/model_server_runners/base_runner.py | 0.921631 | 0.70216 | base_runner.py | pypi |
"""FnArgs for passing information to UDF."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Callable, Dict, Iterator, List, NamedTuple, Optional, Text
import absl
import attr
import pyarrow as pa
import tensorflow as tf
from tfx import types
from tfx.components.util import tfxio_utils
from tfx.proto import trainer_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import json_utils
from tfx.utils import proto_utils
from tfx_bsl.tfxio import dataset_options
from tensorflow_metadata.proto.v0 import schema_pb2
_TELEMETRY_DESCRIPTORS = ['Trainer']
DataAccessor = NamedTuple('DataAccessor',
[('tf_dataset_factory', Callable[[
List[Text],
dataset_options.TensorFlowDatasetOptions,
Optional[schema_pb2.Schema],
], tf.data.Dataset]),
('record_batch_factory', Callable[[
List[Text],
dataset_options.RecordBatchesOptions,
Optional[schema_pb2.Schema],
], Iterator[pa.RecordBatch]])])
@attr.s
class FnArgs:
"""Args to pass to user defined training/tuning function(s).
Attributes:
working_dir: Working dir.
train_files: A list of patterns for train files.
eval_files: A list of patterns for eval files.
train_steps: Number of train steps.
eval_steps: Number of eval steps.
schema_path: A single uri for schema file. Will be None if not specified.
schema_file: Deprecated, use `schema_path` instead.
transform_graph_path: An optional single uri for transform graph produced by
TFT. Will be None if not specified.
transform_output: Deprecated, use `transform_graph_path` instead.'
data_accessor: Contains factories that can create tf.data.Datasets or other
means to access the train/eval data. They provide README.ml-pipelines-sdk.md uniform way of
accessing data, regardless of how the data is stored on disk.
serving_model_dir: A single uri for the output directory of the serving
model.
eval_model_dir: A single uri for the output directory of the eval model.
Note that this is estimator only, Keras doesn't require it for TFMA.
model_run_dir: A single uri for the output directory of model training
related files.
base_model: An optional base model path that will be used for this training.
hyperparameters: An optional kerastuner.HyperParameters config.
custom_config: An optional dictionary passed to the component.
"""
working_dir = attr.ib(type=Text, default=None)
train_files = attr.ib(type=List[Text], default=None)
eval_files = attr.ib(type=List[Text], default=None)
train_steps = attr.ib(type=int, default=None)
eval_steps = attr.ib(type=int, default=None)
schema_path = attr.ib(type=Text, default=None)
schema_file = attr.ib(type=Text, default=None)
transform_graph_path = attr.ib(type=Text, default=None)
transform_output = attr.ib(type=Text, default=None)
data_accessor = attr.ib(type=DataAccessor, default=None)
serving_model_dir = attr.ib(type=Text, default=None)
eval_model_dir = attr.ib(type=Text, default=None)
model_run_dir = attr.ib(type=Text, default=None)
base_model = attr.ib(type=Text, default=None)
hyperparameters = attr.ib(type=Text, default=None)
custom_config = attr.ib(type=Dict[Text, Any], default=None)
def get_common_fn_args(input_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
working_dir: Text = None) -> FnArgs:
"""Get common args of training and tuning."""
if input_dict.get(standard_component_specs.TRANSFORM_GRAPH_KEY):
transform_graph_path = artifact_utils.get_single_uri(
input_dict[standard_component_specs.TRANSFORM_GRAPH_KEY])
else:
transform_graph_path = None
if input_dict.get(standard_component_specs.SCHEMA_KEY):
schema_path = io_utils.get_only_uri_in_dir(
artifact_utils.get_single_uri(
input_dict[standard_component_specs.SCHEMA_KEY]))
else:
schema_path = None
train_args = trainer_pb2.TrainArgs()
eval_args = trainer_pb2.EvalArgs()
proto_utils.json_to_proto(
exec_properties[standard_component_specs.TRAIN_ARGS_KEY], train_args)
proto_utils.json_to_proto(
exec_properties[standard_component_specs.EVAL_ARGS_KEY], eval_args)
# Default behavior is train on `train` split (when splits is empty in train
# args) and evaluate on `eval` split (when splits is empty in eval args).
if not train_args.splits:
train_args.splits.append('train')
absl.logging.info("Train on the 'train' split when train_args.splits is "
'not set.')
if not eval_args.splits:
eval_args.splits.append('eval')
absl.logging.info("Evaluate on the 'eval' split when eval_args.splits is "
'not set.')
train_files = []
for train_split in train_args.splits:
train_files.extend([
io_utils.all_files_pattern(uri)
for uri in artifact_utils.get_split_uris(
input_dict[standard_component_specs.EXAMPLES_KEY], train_split)
])
eval_files = []
for eval_split in eval_args.splits:
eval_files.extend([
io_utils.all_files_pattern(uri)
for uri in artifact_utils.get_split_uris(
input_dict[standard_component_specs.EXAMPLES_KEY], eval_split)
])
data_accessor = DataAccessor(
tf_dataset_factory=tfxio_utils.get_tf_dataset_factory_from_artifact(
input_dict[standard_component_specs.EXAMPLES_KEY],
_TELEMETRY_DESCRIPTORS),
record_batch_factory=tfxio_utils.get_record_batch_factory_from_artifact(
input_dict[standard_component_specs.EXAMPLES_KEY],
_TELEMETRY_DESCRIPTORS))
# https://github.com/tensorflow/tfx/issues/45: Replace num_steps=0 with
# num_steps=None. Conversion of the proto to python will set the default
# value of an int as 0 so modify the value here. Tensorflow will raise an
# error if num_steps <= 0.
train_steps = train_args.num_steps or None
eval_steps = eval_args.num_steps or None
# Load and deserialize custom config from execution properties.
# Note that in the component interface the default serialization of custom
# config is 'null' instead of '{}'. Therefore we need to default the
# json_utils.loads to 'null' then populate it with an empty dict when
# needed.
custom_config = json_utils.loads(
exec_properties.get(standard_component_specs.CUSTOM_CONFIG_KEY, 'null'))
return FnArgs(
working_dir=working_dir,
train_files=train_files,
eval_files=eval_files,
train_steps=train_steps,
eval_steps=eval_steps,
schema_path=schema_path,
transform_graph_path=transform_graph_path,
data_accessor=data_accessor,
custom_config=custom_config,
) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/trainer/fn_args_utils.py | 0.909267 | 0.235718 | fn_args_utils.py | pypi |
"""TFX Trainer component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Union
from tfx import types
from tfx.components.trainer import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.orchestration import data_types
from tfx.proto import trainer_pb2
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import TrainerSpec
from tfx.utils import json_utils
# TODO(b/147702778): update when switch generic executor as default.
class Trainer(base_component.BaseComponent):
"""A TFX component to train README.ml-pipelines-sdk.md TensorFlow model.
The Trainer component is used to train and eval README.ml-pipelines-sdk.md model using given inputs and
README.ml-pipelines-sdk.md user-supplied estimator.
## Providing an estimator
The TFX executor will use the estimator provided in the `module_file` file
to train the model. The Trainer executor will look specifically for the
`trainer_fn()` function within that file. Before training, the executor will
call that function expecting the following returned as README.ml-pipelines-sdk.md dictionary:
- estimator: The
[estimator](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator)
to be used by TensorFlow to train the model.
- train_spec: The
[configuration](https://www.tensorflow.org/api_docs/python/tf/estimator/TrainSpec)
to be used by the "train" part of the TensorFlow `train_and_evaluate()`
call.
- eval_spec: The
[configuration](https://www.tensorflow.org/api_docs/python/tf/estimator/EvalSpec)
to be used by the "eval" part of the TensorFlow `train_and_evaluate()` call.
- eval_input_receiver_fn: The
[configuration](https://www.tensorflow.org/tfx/model_analysis/get_started#modify_an_existing_model)
to be used
by the [ModelValidator](https://www.tensorflow.org/tfx/guide/modelval)
component when validating the model.
An example of `trainer_fn()` can be found in the [user-supplied
code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))
of the TFX Chicago Taxi pipeline example.
*Note:* The default executor for this component trains locally. This can be
overriden to enable the model to be trained on other platforms. The [Cloud AI
Platform custom
executor](https://github.com/tensorflow/tfx/tree/master/tfx/extensions/google_cloud_ai_platform/trainer)
provides an example how to implement this.
Please see https://www.tensorflow.org/guide/estimators for more details.
## Example 1: Training locally
```
# Uses user-provided Python function that implements README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=infer_schema.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(splits=['train'], num_steps=10000),
eval_args=trainer_pb2.EvalArgs(splits=['eval'], num_steps=5000))
```
## Example 2: Training through README.ml-pipelines-sdk.md cloud provider
```
from tfx.extensions.google_cloud_ai_platform.trainer import executor as
ai_platform_trainer_executor
# Train using Google Cloud AI Platform.
trainer = Trainer(
custom_executor_spec=executor_spec.ExecutorClassSpec(
ai_platform_trainer_executor.Executor),
module_file=module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=infer_schema.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(splits=['train'], num_steps=10000),
eval_args=trainer_pb2.EvalArgs(splits=['eval'], num_steps=5000))
```
"""
SPEC_CLASS = TrainerSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
examples: types.Channel = None,
transformed_examples: Optional[types.Channel] = None,
transform_graph: Optional[types.Channel] = None,
schema: Optional[types.Channel] = None,
base_model: Optional[types.Channel] = None,
hyperparameters: Optional[types.Channel] = None,
module_file: Optional[Union[Text, data_types.RuntimeParameter]] = None,
run_fn: Optional[Union[Text, data_types.RuntimeParameter]] = None,
# TODO(b/147702778): deprecate trainer_fn.
trainer_fn: Optional[Union[Text, data_types.RuntimeParameter]] = None,
train_args: Union[trainer_pb2.TrainArgs, Dict[Text, Any]] = None,
eval_args: Union[trainer_pb2.EvalArgs, Dict[Text, Any]] = None,
custom_config: Optional[Dict[Text, Any]] = None,
custom_executor_spec: Optional[executor_spec.ExecutorSpec] = None,
model: Optional[types.Channel] = None,
model_run: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md Trainer component.
Args:
examples: A Channel of type `standard_artifacts.Examples`, serving as
the source of examples used in training (required). May be raw or
transformed.
transformed_examples: Deprecated field. Please set 'examples' instead.
transform_graph: An optional Channel of type
`standard_artifacts.TransformGraph`, serving as the input transform
graph if present.
schema: An optional Channel of type `standard_artifacts.Schema`, serving
as the schema of training and eval data. Schema is optional when
1) transform_graph is provided which contains schema.
2) user module bypasses the usage of schema, e.g., hardcoded.
base_model: A Channel of type `Model`, containing model that will be used
for training. This can be used for warmstart, transfer learning or
model ensembling.
hyperparameters: A Channel of type `standard_artifacts.HyperParameters`,
serving as the hyperparameters for training module. Tuner's output best
hyperparameters can be feed into this.
module_file: A path to python module file containing UDF model definition.
For default executor, The module_file must implement README.ml-pipelines-sdk.md function named
`trainer_fn` at its top level. The function must have the following
signature.
def trainer_fn(trainer.fn_args_utils.FnArgs,
tensorflow_metadata.proto.v0.schema_pb2) -> Dict:
...
where the returned Dict has the following key-values.
'estimator': an instance of tf.estimator.Estimator
'train_spec': an instance of tf.estimator.TrainSpec
'eval_spec': an instance of tf.estimator.EvalSpec
'eval_input_receiver_fn': an instance of
tfma.export.EvalInputReceiver. Exactly one of 'module_file' or
'trainer_fn' must be supplied.
For generic executor, The module_file must implement README.ml-pipelines-sdk.md function named
`run_fn` at its top level with function signature:
`def run_fn(trainer.fn_args_utils.FnArgs)`, and the trained model must
be saved to FnArgs.serving_model_dir when execute this function.
run_fn: A python path to UDF model definition function for generic
trainer. See 'module_file' for details. Exactly one of 'module_file' or
'run_fn' must be supplied if Trainer uses GenericExecutor.
trainer_fn: A python path to UDF model definition function for estimator
based trainer. See 'module_file' for the required signature of the UDF.
Exactly one of 'module_file' or 'trainer_fn' must be supplied.
train_args: A trainer_pb2.TrainArgs instance or README.ml-pipelines-sdk.md dict, containing args
used for training. Currently only splits and num_steps are available. If
it's provided as README.ml-pipelines-sdk.md dict and any field is README.ml-pipelines-sdk.md RuntimeParameter, it should
have the same field names as README.ml-pipelines-sdk.md TrainArgs proto message. Default
behavior (when splits is empty) is train on `train` split.
eval_args: A trainer_pb2.EvalArgs instance or README.ml-pipelines-sdk.md dict, containing args
used for evaluation. Currently only splits and num_steps are available.
If it's provided as README.ml-pipelines-sdk.md dict and any field is README.ml-pipelines-sdk.md RuntimeParameter, it
should have the same field names as README.ml-pipelines-sdk.md EvalArgs proto message. Default
behavior (when splits is empty) is evaluate on `eval` split.
custom_config: A dict which contains addtional training job parameters
that will be passed into user module.
custom_executor_spec: Optional custom executor spec.
model: Optional `Model` channel for result of exported models.
model_run: Optional `ModelRun` channel, as the working dir of models,
can be used to output non-model related output (e.g., TensorBoard logs).
instance_name: Optional unique instance name. Necessary iff multiple
Trainer components are declared in the same pipeline.
Raises:
ValueError:
- When both or neither of 'module_file' and user function
(e.g., trainer_fn and run_fn) is supplied.
- When both or neither of 'examples' and 'transformed_examples'
is supplied.
- When 'transformed_examples' is supplied but 'transform_graph'
is not supplied.
"""
if [bool(module_file), bool(run_fn), bool(trainer_fn)].count(True) != 1:
raise ValueError(
"Exactly one of 'module_file', 'trainer_fn', or 'run_fn' must be "
"supplied.")
if bool(examples) == bool(transformed_examples):
raise ValueError(
"Exactly one of 'example' or 'transformed_example' must be supplied.")
if transformed_examples and not transform_graph:
raise ValueError("If 'transformed_examples' is supplied, "
"'transform_graph' must be supplied too.")
examples = examples or transformed_examples
model = model or types.Channel(type=standard_artifacts.Model)
model_run = model_run or types.Channel(type=standard_artifacts.ModelRun)
spec = TrainerSpec(
examples=examples,
transform_graph=transform_graph,
schema=schema,
base_model=base_model,
hyperparameters=hyperparameters,
train_args=train_args,
eval_args=eval_args,
module_file=module_file,
run_fn=run_fn,
trainer_fn=trainer_fn,
custom_config=json_utils.dumps(custom_config),
model=model,
model_run=model_run)
super(Trainer, self).__init__(
spec=spec,
custom_executor_spec=custom_executor_spec,
instance_name=instance_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/trainer/component.py | 0.870184 | 0.736827 | component.py | pypi |
"""TFX local trainer executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from typing import Any, Dict, List, Text
import absl
import tensorflow as tf
import tensorflow_model_analysis as tfma
from tfx import types
from tfx.components.trainer import constants
from tfx.components.trainer import fn_args_utils
from tfx.components.util import udf_utils
from tfx.dsl.components.base import base_executor
from tfx.dsl.io import fileio
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import deprecation_utils
from tfx.utils import io_utils
from tfx.utils import path_utils
from tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import
from tensorflow_metadata.proto.v0 import schema_pb2
TrainerFnArgs = deprecation_utils.deprecated_alias( # pylint: disable=invalid-name
deprecated_name='tfx.components.trainer.executor.TrainerFnArgs',
name='tfx.components.trainer.fn_args_utils.FnArgs',
func_or_class=fn_args_utils.FnArgs)
def _all_files_pattern(file_pattern: Text) -> Text:
return os.path.join(file_pattern, '*')
def _is_chief():
"""Returns true if this is run in the master (chief) of training cluster."""
tf_config = json.loads(os.environ.get(constants.TF_CONFIG_ENV) or '{}')
# If non distributed mode, current process should always behave as chief.
if not tf_config or not tf_config.get('cluster', {}):
return True
task_type = tf_config['task']['type']
task_index = tf_config['task']['index']
# 'master' is README.ml-pipelines-sdk.md legacy notation of chief node in distributed training flock.
return task_type == 'chief' or (task_type == 'master' and task_index == 0)
class GenericExecutor(base_executor.BaseExecutor):
"""Local generic trainer executor for the TFX Trainer component.
The Trainer executor supplements TensorFlow training with README.ml-pipelines-sdk.md component to
enable warm-start training of any user-specified TF model. The Trainer is
README.ml-pipelines-sdk.md library built on top of TensorFlow that is expected to be integrated into README.ml-pipelines-sdk.md
custom user-specified binary.
To include Trainer in README.ml-pipelines-sdk.md TFX pipeline, configure your pipeline similar to
https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple.py#L104.
For more details on the Trainer component itself, please refer to
https://tensorflow.org/tfx/guide/trainer. For README.ml-pipelines-sdk.md tutorial on Tensorflow,
please refer to https://www.tensorflow.org/tutorials.
How to create README.ml-pipelines-sdk.md trainer callback function to be used by this Trainer executor:
A model training can be executed by TFX by first creating README.ml-pipelines-sdk.md run_fn callback
method that defines, trains an TF Model and saves it to the provided location,
This becomes the basis of the Executor for GenericTrainer. This Executor will
then execute the run_fn with correct parameters by resolving the input
artifacts, output artifacts and execution properties.
"""
# Name of subdirectory which contains checkpoints from prior runs
_CHECKPOINT_FILE_NAME = 'checkpoint'
def _GetFnArgs(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> fn_args_utils.FnArgs:
# TODO(ruoyu): Make this README.ml-pipelines-sdk.md dict of tag -> uri instead of list.
if input_dict.get(standard_component_specs.BASE_MODEL_KEY):
base_model = path_utils.serving_model_path(
artifact_utils.get_single_uri(
input_dict[standard_component_specs.BASE_MODEL_KEY]))
else:
base_model = None
if input_dict.get(standard_component_specs.HYPERPARAMETERS_KEY):
hyperparameters_file = io_utils.get_only_uri_in_dir(
artifact_utils.get_single_uri(
input_dict[standard_component_specs.HYPERPARAMETERS_KEY]))
hyperparameters_config = json.loads(
file_io.read_file_to_string(hyperparameters_file))
else:
hyperparameters_config = None
output_path = artifact_utils.get_single_uri(
output_dict[standard_component_specs.MODEL_KEY])
serving_model_dir = path_utils.serving_model_dir(output_path)
eval_model_dir = path_utils.eval_model_dir(output_path)
model_run_dir = artifact_utils.get_single_uri(
output_dict[standard_component_specs.MODEL_RUN_KEY])
# TODO(b/126242806) Use PipelineInputs when it is available in third_party.
result = fn_args_utils.get_common_fn_args(input_dict, exec_properties)
if result.custom_config and not isinstance(result.custom_config, dict):
raise ValueError('custom_config in execution properties needs to be README.ml-pipelines-sdk.md '
'dict. Got %s instead.' % type(result.custom_config))
result.transform_output = result.transform_graph_path
result.serving_model_dir = serving_model_dir
result.eval_model_dir = eval_model_dir
result.model_run_dir = model_run_dir
result.schema_file = result.schema_path
result.base_model = base_model
result.hyperparameters = hyperparameters_config
return result
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Uses README.ml-pipelines-sdk.md user-supplied run_fn to train README.ml-pipelines-sdk.md TensorFlow model locally.
The Trainer Executor invokes README.ml-pipelines-sdk.md run_fn callback function provided by
the user via the module_file parameter. In this function, user defines the
model and trains it, then saves the model and training related files
(e.g, Tensorboard logs) to the provided locations.
Args:
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of ML-Metadata Artifacts.
- examples: Examples used for training, must include 'train' and 'eval'
if custom splits is not specified in train_args and eval_args.
- transform_graph: Optional input transform graph.
- transform_output: Optional input transform graph, deprecated.
- schema: Schema of the data.
output_dict: Output dict from output key to README.ml-pipelines-sdk.md list of Artifacts.
- model: Exported model.
- model_run: Model training related outputs (e.g., Tensorboard logs)
exec_properties: A dict of execution properties.
- train_args: JSON string of trainer_pb2.TrainArgs instance, providing
args for training.
- eval_args: JSON string of trainer_pb2.EvalArgs instance, providing
args for eval.
- module_file: Python module file containing UDF model definition.
- warm_starting: Whether or not we need to do warm starting.
- warm_start_from: Optional. If warm_starting is True, this is the
directory to find previous model to warm start on.
- custom_config: Optional. JSON-serialized dict of additional parameters
to pass to trainer function.
Returns:
None
Raises:
ValueError: When neither or both of 'module_file' and 'run_fn'
are present in 'exec_properties'.
RuntimeError: If run_fn failed to generate model in desired location.
"""
self._log_startup(input_dict, output_dict, exec_properties)
fn_args = self._GetFnArgs(input_dict, output_dict, exec_properties)
run_fn = udf_utils.get_fn(exec_properties, 'run_fn')
# Train the model
absl.logging.info('Training model.')
run_fn(fn_args)
# Note: If trained with multi-node distribution workers, it is the user
# module's responsibility to export the model only once.
if not fileio.exists(fn_args.serving_model_dir):
raise RuntimeError('run_fn failed to generate model.')
absl.logging.info(
'Training complete. Model written to %s. ModelRun written to %s',
fn_args.serving_model_dir, fn_args.model_run_dir)
class Executor(GenericExecutor):
"""Local estimator based trainer executor used by the TFX Trainer component.
How to create README.ml-pipelines-sdk.md trainer callback function to be used by this Trainer executor:
An estimator can be executed by TFX by first creating README.ml-pipelines-sdk.md trainer_fn callback
method that returns an estimator and some additional parameters, similar to
https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py#L285.
This becomes the basis of the new Executor for Trainer. This Executor will
then train and evaluate this estimator using the
tf.estimator.train_and_evaluate API to train locally.
"""
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Uses README.ml-pipelines-sdk.md user-supplied tf.estimator to train README.ml-pipelines-sdk.md TensorFlow model locally.
The Trainer Executor invokes README.ml-pipelines-sdk.md training_fn callback function provided by
the user via the module_file parameter. With the tf.estimator returned by
this function, the Trainer Executor then builds README.ml-pipelines-sdk.md TensorFlow model using the
user-provided tf.estimator.
Args:
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of ML-Metadata Artifacts.
- examples: Examples used for training, must include 'train' and 'eval'
if custom splits is not specified in train_args and eval_args.
- transform_graph: Optional input transform graph.
- schema: Schema of the data.
output_dict: Output dict from output key to README.ml-pipelines-sdk.md list of Artifacts.
- model: Exported model.
- model_run: Model training related outputs (e.g., Tensorboard logs)
exec_properties: A dict of execution properties.
- train_args: JSON string of trainer_pb2.TrainArgs instance, providing
args for training.
- eval_args: JSON string of trainer_pb2.EvalArgs instance, providing
args for eval.
- module_file: Python module file containing UDF model definition.
- warm_starting: Whether or not we need to do warm starting.
- warm_start_from: Optional. If warm_starting is True, this is the
directory to find previous model to warm start on.
- custom_config: Optional. JSON-serialized dict of additional parameters
to pass to trainer function.
Returns:
None
Raises:
ValueError: When neither or both of 'module_file' and 'trainer_fn'
are present in 'exec_properties'.
"""
self._log_startup(input_dict, output_dict, exec_properties)
fn_args = self._GetFnArgs(input_dict, output_dict, exec_properties)
trainer_fn = udf_utils.get_fn(exec_properties, 'trainer_fn')
schema = io_utils.parse_pbtxt_file(fn_args.schema_file, schema_pb2.Schema())
# TODO(b/160795287): Deprecate estimator based executor.
# Provide user with README.ml-pipelines-sdk.md modified fn_args, with model_run given as
# the working directory. Executor will then copy user models to
# model artifact directory.
serving_dest = fn_args.serving_model_dir
eval_dest = fn_args.eval_model_dir
working_dir = fn_args.model_run_dir
fn_args.serving_model_dir = path_utils.serving_model_dir(working_dir)
fn_args.eval_model_dir = path_utils.eval_model_dir(working_dir)
training_spec = trainer_fn(fn_args, schema)
# Train the model
absl.logging.info('Training model.')
tf.estimator.train_and_evaluate(training_spec['estimator'],
training_spec['train_spec'],
training_spec['eval_spec'])
absl.logging.info(
'Training complete. Model written to %s. ModelRun written to %s',
fn_args.serving_model_dir, fn_args.model_run_dir)
# Export an eval savedmodel for TFMA. If distributed training, it must only
# be written by the chief worker, as would be done for serving savedmodel.
if _is_chief():
absl.logging.info('Exporting eval_savedmodel for TFMA.')
tfma.export.export_eval_savedmodel(
estimator=training_spec['estimator'],
export_dir_base=fn_args.eval_model_dir,
eval_input_receiver_fn=training_spec['eval_input_receiver_fn'])
absl.logging.info('Exported eval_savedmodel to %s.',
fn_args.eval_model_dir)
# TODO(b/160795287): Deprecate estimator based executor.
# Copy serving and eval model from model_run to model artifact directory.
serving_source = path_utils.serving_model_path(fn_args.model_run_dir)
io_utils.copy_dir(serving_source, serving_dest)
absl.logging.info('Serving model copied to: %s.', serving_dest)
eval_source = path_utils.eval_model_path(fn_args.model_run_dir)
io_utils.copy_dir(eval_source, eval_dest)
absl.logging.info('Eval model copied to: %s.', eval_dest)
else:
absl.logging.info(
'Model export is skipped because this is not the chief worker.') | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/trainer/executor.py | 0.827619 | 0.267884 | executor.py | pypi |
"""Converters rewrite models using the provided rewriters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from typing import Text
import tensorflow as tf
from tfx.components.trainer.rewriting import rewriter
from tfx.dsl.io import fileio
def _invoke_rewriter(src: Text, dst: Text, rewriter_inst: rewriter.BaseRewriter,
src_model_type: rewriter.ModelType,
dst_model_type: rewriter.ModelType):
"""Converts the provided model by invoking the specified rewriters.
Args:
src: Path to the source model.
dst: Path where the destination model is to be written.
rewriter_inst: instance of the rewriter to invoke.
src_model_type: the `rewriter.ModelType` of the source model.
dst_model_type: the `rewriter.ModelType` of the destination model.
Raises:
ValueError: if the source path is the same as the destination path.
"""
if src == dst:
raise ValueError('Source path and destination path cannot match.')
original_model = rewriter.ModelDescription(src_model_type, src)
rewritten_model = rewriter.ModelDescription(dst_model_type, dst)
rewriter_inst.perform_rewrite(original_model, rewritten_model)
class RewritingExporter(tf.estimator.Exporter):
"""This class invokes the base exporter and README.ml-pipelines-sdk.md series of rewriters."""
def __init__(self, base_exporter: tf.estimator.Exporter,
rewriter_inst: rewriter.BaseRewriter):
"""Initializes the rewriting exporter.
Args:
base_exporter: The exporter of the original model.
rewriter_inst: The rewriter instance to invoke. Must inherit from
`rewriter.BaseRewriter`.
"""
self._base_exporter = base_exporter
self._rewriter_inst = rewriter_inst
@property
def name(self):
"""Name of the exporter."""
return self._base_exporter.name
def export(self, estimator, export_path, checkpoint_path, eval_result,
is_the_final_export):
"""Exports the given `Estimator` to README.ml-pipelines-sdk.md specific format.
Performs the export as defined by the base_exporter and invokes all of the
specified rewriters.
Args:
estimator: the `Estimator` to export.
export_path: A string containing README.ml-pipelines-sdk.md directory where to write the export.
checkpoint_path: The checkpoint path to export.
eval_result: The output of `Estimator.evaluate` on this checkpoint.
is_the_final_export: This boolean is True when this is an export in the
end of training. It is False for the intermediate exports during the
training. When passing `Exporter` to `tf.estimator.train_and_evaluate`
`is_the_final_export` is always False if `TrainSpec.max_steps` is
`None`.
Returns:
The string path to the base exported directory or `None` if export is
skipped.
Raises:
RuntimeError: Unable to create README.ml-pipelines-sdk.md temporary rewrite directory.
"""
base_path = self._base_exporter.export(estimator, export_path,
checkpoint_path, eval_result,
is_the_final_export)
if not base_path:
return None
tmp_rewrite_folder = 'tmp-rewrite-' + str(int(time.time()))
tmp_rewrite_path = os.path.join(export_path, tmp_rewrite_folder)
if fileio.exists(tmp_rewrite_path):
raise RuntimeError('Unable to create README.ml-pipelines-sdk.md unique temporary rewrite path.')
fileio.makedirs(tmp_rewrite_path)
_invoke_rewriter(base_path, tmp_rewrite_path, self._rewriter_inst,
rewriter.ModelType.SAVED_MODEL,
rewriter.ModelType.ANY_MODEL)
fileio.rmtree(base_path)
fileio.rename(tmp_rewrite_path, base_path)
return base_path
def rewrite_saved_model(
src: Text,
dst: Text,
rewriter_inst: rewriter.BaseRewriter,
dst_model_type: rewriter.ModelType = rewriter.ModelType.SAVED_MODEL):
"""Rewrites the provided SavedModel.
Args:
src: location of the saved_model to rewrite.
dst: location of the rewritten saved_model.
rewriter_inst: the rewriter instance to invoke. Must inherit from
`rewriter.BaseRewriter`.
dst_model_type: the `rewriter.ModelType` of the destination model.
"""
_invoke_rewriter(src, dst, rewriter_inst, rewriter.ModelType.SAVED_MODEL,
dst_model_type) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/trainer/rewriting/converters.py | 0.940278 | 0.344554 | converters.py | pypi |
"""Rewriter that invokes the TFJS converter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Text
import six
from tensorflowjs.converters import converter
from tfx.components.trainer.rewriting import rewriter
CONVERTER_SAVED_MODEL_INPUT_FLAG = '--input_format=tf_saved_model'
CONVERTER_SERVING_TAG_FLAG = '--saved_model_tags=serve'
CONVERTER_DEFAULT_SIGNATURE_FLAG = '--signature_name=serving_default'
def _convert_tfjs_model(saved_model_path: Text, destination_path: Text):
converter.convert([
CONVERTER_SAVED_MODEL_INPUT_FLAG, CONVERTER_SERVING_TAG_FLAG,
CONVERTER_DEFAULT_SIGNATURE_FLAG,
saved_model_path, destination_path
])
class TFJSRewriter(rewriter.BaseRewriter):
"""Performs TFJS conversion."""
def __init__(self, name: Text):
"""Create an instance of the TFJSRewriter.
Args:
name: The name to use when identifying the rewriter.
"""
self._name = name
@property
def name(self) -> Text:
"""The user-specified name of the rewriter."""
return self._name
def _pre_rewrite_validate(self, original_model: rewriter.ModelDescription):
"""Performs pre-rewrite checks to see if the model can be rewritten.
Args:
original_model: A `ModelDescription` object describing the model to be
rewritten.
Raises:
ValueError: If the original model does not have the expected structure.
"""
if original_model.model_type != rewriter.ModelType.SAVED_MODEL:
raise ValueError('TFJSRewriter can only convert SavedModels.')
def _rewrite(self, original_model: rewriter.ModelDescription,
rewritten_model: rewriter.ModelDescription):
"""Rewrites the provided model.
Args:
original_model: A `ModelDescription` specifying the original model to be
rewritten.
rewritten_model: A `ModelDescription` specifying the format and location
of the rewritten model.
Raises:
ValueError: If the model could not be sucessfully rewritten.
"""
if rewritten_model.model_type not in [
rewriter.ModelType.TFJS_MODEL, rewriter.ModelType.ANY_MODEL
]:
raise ValueError('TFJSConverter can only convert to the TFJS format.')
_convert_tfjs_model(
six.ensure_text(original_model.path),
six.ensure_text(rewritten_model.path))
def _post_rewrite_validate(self, rewritten_model: rewriter.ModelDescription):
"""Performs post-rewrite checks to see if the rewritten model is valid.
Args:
rewritten_model: A `ModelDescription` specifying the format and location
of the rewritten model.
Raises:
ValueError: If the rewritten model is not valid.
"""
# TODO(dzats): Implement post-rewrite validation.
pass | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/trainer/rewriting/tfjs_rewriter.py | 0.918462 | 0.255657 | tfjs_rewriter.py | pypi |
"""Base class that TFX rewriters inherit and invocation utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import enum
from typing import Text
import six
ModelDescription = collections.namedtuple('ModelDescription',
['model_type', 'path'])
class ModelType(enum.Enum):
"""Types of models used or created by the rewriter."""
ANY_MODEL = 1
SAVED_MODEL = 2
TFLITE_MODEL = 3
TFJS_MODEL = 4
class BaseRewriter(six.with_metaclass(abc.ABCMeta, object)):
"""Base class from which all rewriters should inherit."""
@abc.abstractproperty
def name(self) -> Text:
"""Name of the rewriter.
Should not be `None` nor empty.
"""
pass
@abc.abstractmethod
def _pre_rewrite_validate(self, original_model: ModelDescription):
"""Perform pre-rewrite validation to check the model has expected structure.
Args:
original_model: A `ModelDescription` object describing the original model.
Raises:
ValueError: If the original model does not have the expected structure.
"""
pass
@abc.abstractmethod
def _rewrite(self, original_model: ModelDescription,
rewritten_model: ModelDescription):
"""Perform the rewrite.
Args:
original_model: A `ModelDescription` object describing the original model.
rewritten_model: A `ModelDescription` object describing the location and
type of the rewritten output.
Raises:
ValueError: If the original model was not successfully rewritten.
"""
pass
@abc.abstractmethod
def _post_rewrite_validate(self, rewritten_model: ModelDescription):
"""Perform post-rewrite validation.
Args:
rewritten_model: A `ModelDescription` object describing the location and
type of the rewritten output.
Raises:
ValueError: If the rewritten model is not valid.
"""
pass
def perform_rewrite(self, original_model: ModelDescription,
rewritten_model: ModelDescription):
"""Invoke all validations and perform the rewrite.
Args:
original_model: A `base_rewriter.ModelDescription` object describing the
original model.
rewritten_model: A `base_rewriter.ModelDescription` object describing the
location and type of the rewritten model.
Raises:
ValueError: if the model was not successfully rewritten.
"""
try:
self._pre_rewrite_validate(original_model)
except ValueError as v:
raise ValueError('{} failed to perform pre-rewrite validation. Original '
'model: {}. Error: {}'.format(self.name,
str(original_model),
str(v)))
try:
self._rewrite(original_model, rewritten_model)
except ValueError as v:
raise ValueError(
'{} failed to rewrite model. Original model: {}. Error {}'.format(
self.name, str(original_model), str(v)))
try:
self._post_rewrite_validate(rewritten_model)
except ValueError as v:
raise ValueError(
'{} failed to validate rewritten model. Rewritten model: {}. Error {}'
.format(self.name, str(rewritten_model), str(v))) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/trainer/rewriting/rewriter.py | 0.945134 | 0.388038 | rewriter.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.