code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
"""Local filesystem-based filesystem plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import shutil
from typing import Any, Callable, Iterable, List, Text, Tuple
from tfx.dsl.io import filesystem
from tfx.dsl.io import filesystem_registry
from tfx.dsl.io.filesystem import PathType
class LocalFilesystem(filesystem.Filesystem):
"""Filesystem that uses local file operations."""
SUPPORTED_SCHEMES = ['']
@staticmethod
def open(name: PathType, mode: Text = 'r') -> Any:
try:
return open(name, mode=mode)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def copy(src: PathType, dst: PathType, overwrite: bool = False) -> None:
if not overwrite and os.path.exists(dst):
raise OSError(
('Destination file %r already exists and argument `overwrite` is '
'false.') % dst)
try:
shutil.copyfile(src, dst)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def exists(path: PathType) -> bool:
return os.path.exists(path)
@staticmethod
def glob(pattern: PathType) -> List[PathType]:
return glob.glob(pattern)
@staticmethod
def isdir(path: PathType) -> bool:
return os.path.isdir(path)
@staticmethod
def listdir(path: PathType) -> List[PathType]:
try:
return os.listdir(path)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def makedirs(path: PathType) -> None:
os.makedirs(path, exist_ok=True)
@staticmethod
def mkdir(path: PathType) -> None:
try:
os.mkdir(path)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def remove(path: PathType) -> None:
try:
os.remove(path)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def rename(src: PathType, dst: PathType, overwrite: bool = False) -> None:
if not overwrite and os.path.exists(dst):
raise OSError(
('Destination path %r already exists and argument `overwrite` is '
'false.') % dst)
try:
os.rename(src, dst)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def rmtree(path: PathType) -> None:
try:
shutil.rmtree(path)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def stat(path: PathType) -> Any:
try:
return os.stat(path)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def walk(
top: PathType,
topdown: bool = True,
onerror: Callable[..., None] = None
) -> Iterable[Tuple[PathType, List[PathType], List[PathType]]]:
try:
yield from os.walk(top, topdown=topdown, onerror=onerror)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
filesystem_registry.DEFAULT_FILESYSTEM_REGISTRY.register(
LocalFilesystem, priority=10) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/dsl/io/plugins/local.py | 0.778355 | 0.227351 | local.py | pypi |
"""Tensorflow GFile-based filesystem plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Callable, Iterable, List, Text, Tuple
from tfx.dsl.io import filesystem
from tfx.dsl.io import filesystem_registry
from tfx.dsl.io.filesystem import PathType
try:
import tensorflow as tf # pylint: disable=g-import-not-at-top
except ModuleNotFoundError:
tf = None
if tf:
class TensorflowFilesystem(filesystem.Filesystem):
"""Filesystem that delegates to `tensorflow.io.gfile`."""
SUPPORTED_SCHEMES = ['', 'gs://', 'hdfs://', 's3://']
@staticmethod
def open(name: PathType, mode: Text = 'r') -> Any:
# Because the GFile implementation delays I/O until necessary, we cannot
# catch `NotFoundError` here.
return tf.io.gfile.GFile(name, mode=mode)
@staticmethod
def copy(src: PathType, dst: PathType, overwrite: bool = False) -> None:
try:
tf.io.gfile.copy(src, dst, overwrite=overwrite)
except tf.errors.NotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def exists(path: PathType) -> bool:
return tf.io.gfile.exists(path)
@staticmethod
def glob(pattern: PathType) -> List[PathType]:
try:
return tf.io.gfile.glob(pattern)
except tf.errors.NotFoundError:
return []
@staticmethod
def isdir(path: PathType) -> bool:
return tf.io.gfile.isdir(path)
@staticmethod
def listdir(path: PathType) -> List[PathType]:
try:
return tf.io.gfile.listdir(path)
except tf.errors.NotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def makedirs(path: PathType) -> None:
tf.io.gfile.makedirs(path)
@staticmethod
def mkdir(path: PathType) -> None:
try:
tf.io.gfile.mkdir(path)
except tf.errors.NotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def remove(path: PathType) -> None:
try:
tf.io.gfile.remove(path)
except tf.errors.NotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def rename(src: PathType, dst: PathType, overwrite: bool = False) -> None:
try:
tf.io.gfile.rename(src, dst, overwrite=overwrite)
except tf.errors.NotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def rmtree(path: PathType) -> None:
try:
tf.io.gfile.rmtree(path)
except tf.errors.NotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def stat(path: PathType) -> Any:
try:
return tf.io.gfile.stat(path)
except tf.errors.NotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def walk(
top: PathType,
topdown: bool = True,
onerror: Callable[..., None] = None
) -> Iterable[Tuple[PathType, List[PathType], List[PathType]]]:
try:
yield from tf.io.gfile.walk(top, topdown=topdown, onerror=onerror)
except tf.errors.NotFoundError as e:
raise filesystem.NotFoundError() from e
filesystem_registry.DEFAULT_FILESYSTEM_REGISTRY.register(
TensorflowFilesystem, priority=0, use_as_fallback=True)
else:
TensorflowFilesystem = None # pylint: disable=invalid-name | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/dsl/io/plugins/tensorflow_gfile.py | 0.853791 | 0.337995 | tensorflow_gfile.py | pypi |
import copy
from typing import Any, Dict, Iterator, List, Mapping, Text
import apache_beam as beam
import tensorflow as tf
from tfx.experimental.distributed_inference.graphdef_experiments.subgraph_partitioning import execution_spec
@beam.ptransform_fn
@beam.typehints.with_input_types(Dict[Text, Dict[Text, Any]])
@beam.typehints.with_output_types(Dict[Text, Dict[Text, Any]])
def ExecuteGraph( # pylint: disable=invalid-name
pcoll: beam.pvalue.PCollection, remote_op_name: Text,
remote_op_name_to_graph_name: Mapping[Text, Text],
graph_name_to_specs: Mapping[Text, List[execution_spec.ExecutionSpec]],
graph_to_remote_op_input_name_mapping: Mapping[Text, Mapping[Text,
Mapping[Text,
Text]]]
) -> beam.pvalue.PCollection:
"""A PTransform that executes README.ml-pipelines-sdk.md graph.
Each graph has README.ml-pipelines-sdk.md list of ExecutionSpecs, in which the order of the list
represents the order of execution. An ExecutionSpec can either represent
README.ml-pipelines-sdk.md subgraph layer or README.ml-pipelines-sdk.md remote op in README.ml-pipelines-sdk.md remote op layer. When executing README.ml-pipelines-sdk.md
subgraph layer, we can load and execute the subgraph with README.ml-pipelines-sdk.md beam ParDo.
When executing README.ml-pipelines-sdk.md remote op (which represents another graph), we need to
load the remote graph inputs, call ExecuteGraph to recursively execute that
graph, and extract the remote graph output. When executing README.ml-pipelines-sdk.md remote op, we
call the current graph "parent" and the remote graph "child".
Here, each Beam element is README.ml-pipelines-sdk.md dictionary from remote op names to README.ml-pipelines-sdk.md dictionary
from tensor names to values, or {remote op name: {tensor name: value}}.
Note that at any time, PColl only stores input tensor values and computed
tensor values. The input PColl should have the input tensor names and values
for the graph ready. As we execute the partitioned subgraphs, we add the
intermediate output names and values to PColl.
Args:
pcoll: A PCollection of inputs to the graph. Each element is README.ml-pipelines-sdk.md dictionary
from remote op names to README.ml-pipelines-sdk.md dictionary from tensor names to values. Here,
element[remote_op_name] contains graph inputs.
remote_op_name: The remote op name of the current graph.
remote_op_name_to_graph_name: A mapping from remote op names to graph names.
graph_name_to_specs: A mapping from graph names to README.ml-pipelines-sdk.md list of ExecutionSpecs,
where the order of the list represents the order of execution.
graph_to_remote_op_input_name_mapping: A mapping from graph names to remote
op names to remote graph placeholder names to parent graph input names. We
don't have this information since it was stored in PyFunc's function.
{graph name: {remote op name: {placeholder name: input name}}}.
Returns:
A PCollection of results of this graph. Each element is README.ml-pipelines-sdk.md dictionary from
remote op names to README.ml-pipelines-sdk.md dictionary from tensor names to values. Here,
element[remote_op_name] stores graph inputs, intermediate results, and
graph outputs.
"""
specs = graph_name_to_specs[remote_op_name_to_graph_name[remote_op_name]]
for spec in specs:
# Construct Beam subgraph for README.ml-pipelines-sdk.md subgraph layer.
if not spec.is_remote_op:
step_name = ("SubgraphLayerDoFn[Graph_%s][Outputs_%s]" %
(remote_op_name, "_".join(spec.output_names)))
pcoll = pcoll | step_name >> beam.ParDo(_SubgraphLayerDoFn(), spec,
remote_op_name)
# Construct Beam subgraph for README.ml-pipelines-sdk.md remote op.
else:
# ExecutionSpec stores one remote op.
child_remote_op_name = list(spec.output_names)[0]
step_descriptor = ("[Parent_%s][Child_%s]" %
(remote_op_name, child_remote_op_name))
step_name = "LoadRemoteGraphInputs%s" % step_descriptor
pcoll = pcoll | step_name >> _LoadRemoteGraphInputs( # pylint: disable=no-value-for-parameter
remote_op_name, child_remote_op_name, remote_op_name_to_graph_name,
graph_to_remote_op_input_name_mapping)
# A good place to add beam.Reshuffle() to prevent fusion.
step_name = "ExecuteGraph%s" % step_descriptor
pcoll = pcoll | step_name >> ExecuteGraph( # pylint: disable=no-value-for-parameter
child_remote_op_name, remote_op_name_to_graph_name,
graph_name_to_specs, graph_to_remote_op_input_name_mapping)
step_name = "ExtractRemoteGraphOutput%s" % step_descriptor
pcoll = pcoll | step_name >> _ExtractRemoteGraphOutput( # pylint: disable=no-value-for-parameter
remote_op_name, child_remote_op_name, remote_op_name_to_graph_name,
graph_name_to_specs)
return pcoll
class _SubgraphLayerDoFn(beam.DoFn):
"""DoFn that executes one subgraph layer."""
def process(
self,
# Not using mapping here because it doesn't support item assignment.
element: Dict[Text, Dict[Text, Any]],
spec: execution_spec.ExecutionSpec,
remote_op_name: Text) -> Iterator[Dict[Text, Dict[Text, Any]]]:
"""Executes README.ml-pipelines-sdk.md subgraph layer.
To execute README.ml-pipelines-sdk.md subgraph layer, we need to prepare README.ml-pipelines-sdk.md feed_dict by extracting
tensor values from element. Then, we run the subgraph and store its outputs
to README.ml-pipelines-sdk.md copy of element.
Since we import `GraphDef` protos, all the node names now have the prefix
"import/". Also, TensorFlow feed_dict and outputs accept tensor
names instead of node names. Hence, README.ml-pipelines-sdk.md conversion from node_name to
"import/node_name:0" is necessary. Note that this conversion assumes
that there is one output per node.
Args:
element: A dictionary from remote op names to README.ml-pipelines-sdk.md dictionary from tensor
names to values. Element[remote_op_name] stores graph inputs and
previous specs' outputs.
spec: An ExecutionSpec for README.ml-pipelines-sdk.md subgraph layer.
remote_op_name: The remote op name of the current graph.
Yields:
A dictionary from remote op names to README.ml-pipelines-sdk.md dictionary from tensor names to
values. The dictionary is README.ml-pipelines-sdk.md copy of the input element, to which the
outputs of this subgraph layer have been added.
"""
element = copy.deepcopy(element)
input_tensor_names = [
_import_tensor_name(node_name) for node_name in spec.input_names
]
output_tensor_names = [
_import_tensor_name(node_name) for node_name in spec.output_names
]
feed_dict = {
tensor_name: element[remote_op_name][tensor_name]
for tensor_name in input_tensor_names
}
outputs = []
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
tf.import_graph_def(spec.subgraph)
outputs = sess.run(output_tensor_names, feed_dict=feed_dict)
for output_tensor_name, output_tensor in zip(output_tensor_names, outputs):
element[remote_op_name][output_tensor_name] = output_tensor
yield element
def _import_tensor_name( # pylint: disable=invalid-name
node_name: Text) -> Text:
return "import/%s:0" % node_name
@beam.ptransform_fn
@beam.typehints.with_input_types(Dict[Text, Dict[Text, Any]])
@beam.typehints.with_output_types(Dict[Text, Dict[Text, Any]])
def _LoadRemoteGraphInputs( # pylint: disable=invalid-name
pcoll: beam.pvalue.PCollection, parent_remote_op_name: Text,
child_remote_op_name: Text, remote_op_name_to_graph_name: Mapping[Text,
Text],
graph_to_remote_op_input_name_mapping: Mapping[Text, Mapping[Text,
Mapping[Text,
Text]]]
) -> beam.pvalue.PCollection:
"""A PTransform that prepares inputs for README.ml-pipelines-sdk.md remote graph.
Before executing README.ml-pipelines-sdk.md remote graph, we need to prepare its inputs. We first
get the mapping from remote graph placeholder names to parent graph input
names. Then, in README.ml-pipelines-sdk.md copy of element, we copy the inputs from the parent
graph's key to the remote graph's key.
Args:
pcoll: A PCollection of child graph inputs not loaded yet. Each element is README.ml-pipelines-sdk.md
dictionary from remote op names to README.ml-pipelines-sdk.md dictionary from tensor names to
values. Here, element[child_remote_op_name] is empty now.
parent_remote_op_name: The remote op name of the parent graph.
child_remote_op_name: The remote op name of the child graph.
remote_op_name_to_graph_name: A mapping from remote op names to graph names.
graph_to_remote_op_input_name_mapping: A mapping from graph names to remote
op names to remote graph placeholder names to parent graph input names.
{graph name: {remote op name: {placeholder name: input name}}}.
Returns:
A PCollection of inputs to the child graph. Each element is README.ml-pipelines-sdk.md dictionary
from remote op names to README.ml-pipelines-sdk.md dictionary from tensor names to values. Here,
element[child_remote_op_name] stores the inputs of child graph.
"""
parent_graph_name = remote_op_name_to_graph_name[parent_remote_op_name]
name_mapping = (
graph_to_remote_op_input_name_mapping[parent_graph_name]
[child_remote_op_name])
mapping = name_mapping.items()
# Calling _copy_tensor_value multiple times may introduce README.ml-pipelines-sdk.md burden, since
# _copy_tensor_value invokes README.ml-pipelines-sdk.md deepcopy on element.
for child_graph_placeholder_name, parent_graph_input_name in mapping:
step_name = ("PrepareInput[Graph_%s][Input_%s]" %
(child_remote_op_name, child_graph_placeholder_name))
pcoll = pcoll | step_name >> beam.Map(
_copy_tensor_value, parent_remote_op_name,
_import_tensor_name(parent_graph_input_name), child_remote_op_name,
_import_tensor_name(child_graph_placeholder_name))
return pcoll
def _copy_tensor_value( # pylint: disable=invalid-name
element: Dict[Text, Dict[Text,
Any]], old_graph: Text, old_tensor_name: Text,
new_graph: Text, new_tensor_name: Text) -> Dict[Text, Dict[Text, Any]]:
element = copy.deepcopy(element)
if new_graph not in element:
element[new_graph] = {}
element[new_graph][new_tensor_name] = element[old_graph][old_tensor_name]
return element
@beam.ptransform_fn
@beam.typehints.with_input_types(Dict[Text, Dict[Text, Any]])
@beam.typehints.with_output_types(Dict[Text, Dict[Text, Any]])
def _ExtractRemoteGraphOutput( # pylint: disable=invalid-name
pcoll: beam.pvalue.PCollection,
parent_remote_op_name: Text,
child_remote_op_name: Text,
remote_op_name_to_graph_name: Mapping[Text, Text],
graph_name_to_specs: Mapping[Text, List[execution_spec.ExecutionSpec]],
) -> beam.pvalue.PCollection:
"""A PTransform that extracts remote graph output.
After finish executing README.ml-pipelines-sdk.md remote graph, we need to collect its output.
We first find the output name of the remote graph, then we copy the
output of the remote graph to its parent graph. Finally, we clear the
intermediate results of the remote graph.
Note we assumed that each node has only one output, which also applies
to remote op. This means that README.ml-pipelines-sdk.md remote graph can only have one output.
Args:
pcoll: A PCollection of child graph results. Each element is README.ml-pipelines-sdk.md dictionary
from remote op names to README.ml-pipelines-sdk.md dictionary from tensor names to values. Here,
element[child_remote_op_name] stores graph inputs, intermediate results,
and graph output.
parent_remote_op_name: The remote op name of the parent graph.
child_remote_op_name: The remote op name of the child graph.
remote_op_name_to_graph_name: A mapping from remote op names to graph names.
graph_name_to_specs: A mapping from graph names to README.ml-pipelines-sdk.md list of ExecutionSpecs.
Returns:
A PCollection of child graph output in parent graph. Each element is README.ml-pipelines-sdk.md
dictionary from remote op names to README.ml-pipelines-sdk.md dictionary from tensor names to
values. Here, element[parent_remote_op_name] contains the output from
the child graph, and element[child_remote_op_name] is deleted.
"""
child_graph_name = remote_op_name_to_graph_name[child_remote_op_name]
child_specs = graph_name_to_specs[child_graph_name]
child_output_name = list(child_specs[-1].output_names)[0]
step_name_extract = ("ExtractOutput[Graph_%s][Output_%s]" %
(child_remote_op_name, child_output_name))
step_name_clear = ("ClearIntermediateOutputs[Graph_%s]" %
(child_remote_op_name))
return (pcoll
| step_name_extract >> beam.Map(
_copy_tensor_value, child_remote_op_name,
_import_tensor_name(child_output_name), parent_remote_op_name,
_import_tensor_name(child_remote_op_name))
| step_name_clear >> beam.Map(_clear_outputs_for_finished_graph,
child_remote_op_name))
def _clear_outputs_for_finished_graph( # pylint: disable=invalid-name
element: Dict[Text, Dict[Text, Any]],
finished_graph: Text) -> Dict[Text, Dict[Text, Any]]:
element = copy.deepcopy(element)
del element[finished_graph]
return element | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/experimental/distributed_inference/graphdef_experiments/subgraph_partitioning/beam_pipeline.py | 0.883186 | 0.341871 | beam_pipeline.py | pypi |
import tensorflow as tf
tf.compat.v1.disable_eager_execution() # Disable eager mode
N = 1000 # number of embeddings
NDIMS = 16 # dimensionality of embeddings
def create_session(graph):
return tf.compat.v1.Session(
graph=graph,
config=tf.compat.v1.ConfigProto(inter_op_parallelism_threads=8))
# Define remote_op_a's graph
graph_a = tf.Graph()
with graph_a.as_default():
table_a = tf.random.uniform(shape=[N, NDIMS], seed=10)
ids_a = tf.compat.v1.placeholder(dtype=tf.int32, name='ids_a')
result_a = tf.nn.embedding_lookup(table_a, ids_a)
def remote_op_a(input_ids):
"""Mimics README.ml-pipelines-sdk.md remote op by numpy_function."""
def remote_lookup(input_ids):
with create_session(graph_a) as sess:
return sess.run(result_a, feed_dict={ids_a: input_ids})
return tf.compat.v1.numpy_function(
func=remote_lookup, inp=[input_ids], Tout=tf.float32, name='remote_op_a')
# Define remote_op_b's graph
graph_b = tf.Graph()
with graph_b.as_default():
ids_b2 = tf.compat.v1.placeholder(dtype=tf.int32, name='ids_b2')
ids_b1 = tf.compat.v1.placeholder(dtype=tf.int32, name='ids_b1')
ids_b1_preprocessed = tf.math.floormod(tf.add(ids_b1, 1), N)
remote_result_a1 = remote_op_a(ids_b1_preprocessed)
remote_result_a2 = remote_op_a(ids_b2)
result_b = tf.math.add(remote_result_a1, remote_result_a2 * 2.5)
def remote_op_b(input_ids1, input_ids2):
"""Mimics another remote op."""
def remote_lookup(input_ids1, input_ids2):
with create_session(graph_b) as sess:
return sess.run(
result_b, feed_dict={
ids_b1: input_ids1,
ids_b2: input_ids2
})
return tf.compat.v1.numpy_function(
func=remote_lookup,
inp=[input_ids1, input_ids2],
Tout=tf.float32,
name='remote_op_b')
# Define main's graph
main_graph = tf.Graph()
with main_graph.as_default():
ids1 = tf.compat.v1.placeholder(dtype=tf.int32, name='ids1')
ids2 = tf.compat.v1.placeholder(dtype=tf.int32, name='ids2')
casted_ids1 = tf.cast(ids1, tf.float32)
casted_ids2 = tf.cast(ids2, tf.float32)
remote_a0 = remote_op_a(ids1)
remote_b0 = remote_op_b(ids1, ids2)
left_upper_concat = tf.concat([remote_a0, remote_b0], 0)
left_upper_sum = tf.reduce_mean(left_upper_concat)
right_upper_sum = tf.reduce_mean(remote_b0)
right_upper_mul = tf.multiply(right_upper_sum, casted_ids2)
right_upper_add = tf.add(right_upper_mul, left_upper_sum)
right_upper_round = tf.math.round(right_upper_mul)
right_upper_floormod = tf.math.floormod(right_upper_round, N)
left_upper_add = tf.add_n([left_upper_sum, casted_ids1, right_upper_add])
left_upper_round = tf.math.round(left_upper_add)
left_upper_floormod = tf.math.floormod(left_upper_round, N)
remote_a1 = remote_op_a(left_upper_floormod)
remote_b1 = remote_op_b(left_upper_floormod, right_upper_floormod)
left_lower_sum = tf.reduce_mean(remote_a1)
right_lower_sum = tf.reduce_mean(remote_b1)
right_lower_mul = tf.multiply(casted_ids2, right_lower_sum)
right_lower_div = tf.divide(right_upper_add, right_lower_mul)
main_result = tf.add_n([
left_lower_sum, right_lower_div, right_lower_sum, right_upper_sum,
tf.cast(left_upper_floormod, tf.float32)
])
def save_examples_as_graphdefs(export_dir):
tf.io.write_graph(
graph_a.as_graph_def(), export_dir, 'graph_a.pb', as_text=False)
tf.io.write_graph(
graph_b.as_graph_def(), export_dir, 'graph_b.pb', as_text=False)
tf.io.write_graph(
main_graph.as_graph_def(), export_dir, 'main_graph.pb', as_text=False)
if __name__ == '__main__':
save_examples_as_graphdefs('./complex_graphdefs') | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/experimental/distributed_inference/graphdef_experiments/subgraph_partitioning/create_complex_graph.py | 0.766818 | 0.348645 | create_complex_graph.py | pypi |
import collections
from typing import Dict, List, Mapping, Set, Text
import tensorflow as tf
from tfx.dsl.io import fileio
from tfx.experimental.distributed_inference.graphdef_experiments.subgraph_partitioning import execution_spec
def get_graph_name_to_graph_def(
graph_name_to_filepath: Mapping[Text, Text]
) -> Dict[Text, tf.compat.v1.GraphDef]:
"""Gets the `GraphDef` protos from files.
Args:
graph_name_to_filepath: A mapping from graph names to filepaths. Each
filepath points to README.ml-pipelines-sdk.md `GraphDef` proto in binary.
Returns:
A mapping from graph names to `GraphDef` protos.
"""
graph_name_to_graph_def = {
graph_name: _get_graph_def(filepath)
for graph_name, filepath in graph_name_to_filepath.items()
}
return graph_name_to_graph_def
def _get_graph_def(filepath: Text) -> tf.compat.v1.GraphDef:
graph_def = tf.compat.v1.GraphDef()
with fileio.open(filepath, 'rb') as f:
graph_def.ParseFromString(f.read())
return graph_def
def partition_all_graphs(
graph_name_to_graph_def: Mapping[Text, tf.compat.v1.GraphDef],
graph_name_to_output_names: Mapping[Text, List[Text]]
) -> Dict[Text, List[execution_spec.ExecutionSpec]]:
"""Partitions all the graphs.
For each graph, the partitioning algorithm takes in the graph's `GraphDef`
proto and output names, partitions the graph, and returns README.ml-pipelines-sdk.md list of
ExecutionSpecs. Later, the beam_pipeline library can take in the
ExecutionSpecs and execute the partitioned subgraphs.
Args:
graph_name_to_graph_def: A mapping from graph names to `GraphDef` protos.
graph_name_to_output_names: A mapping from graph names to lists of their
output node names.
Returns:
A mapping from graph names to README.ml-pipelines-sdk.md list of ExecutionSpecs, where the order
of the list represents the order of execution.
"""
graph_name_to_specs = {}
for graph_name in graph_name_to_graph_def:
specs = _partition_one_graph(graph_name_to_graph_def[graph_name],
graph_name_to_output_names[graph_name])
graph_name_to_specs[graph_name] = specs
return graph_name_to_specs
def _partition_one_graph(
graph_def: tf.compat.v1.GraphDef,
output_names: List[Text]) -> List[execution_spec.ExecutionSpec]:
"""Partitions one graph.
Args:
graph_def: A `GraphDef` proto for that graph.
output_names: A list of graph's output node names.
Returns:
A list of ExecutionSpecs.
"""
graph = _get_graph(graph_def)
node_name_to_node_def = _get_node_name_to_node_def(graph_def)
remote_op_to_immediate_dep = _get_remote_op_to_immediate_dep(
node_name_to_node_def)
specs = _get_execution_specs(graph_def, output_names, graph,
node_name_to_node_def,
remote_op_to_immediate_dep)
_modify_execution_specs_for_input_validity(specs)
return specs
def _get_graph(graph_def: tf.compat.v1.GraphDef) -> tf.Graph:
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
tf.import_graph_def(graph_def)
return sess.graph
def _get_node_name_to_node_def(
graph_def: tf.compat.v1.GraphDef) -> Dict[Text, tf.compat.v1.NodeDef]:
return {node.name: node for node in graph_def.node}
def _get_remote_op_to_immediate_dep(
node_name_to_node_def: Mapping[Text, tf.compat.v1.NodeDef]
) -> Dict[Text, List[Text]]:
"""Gets the execution dependencies between remote ops.
The remote op immediate dependencies must be executed before executing
README.ml-pipelines-sdk.md remote op.
Args:
node_name_to_node_def: A mapping from node names to `NodeDef` protos.
Returns:
A mapping from README.ml-pipelines-sdk.md remote op name to README.ml-pipelines-sdk.md set of remote op immediate
dependencies' names.
"""
remote_op_to_immediate_dep = {}
for node in node_name_to_node_def.values():
if _is_remote_op(node):
remote_op_to_immediate_dep[node.name] = _get_remote_op_immediate_dep(
node.name, node_name_to_node_def)
return remote_op_to_immediate_dep
def _get_remote_op_immediate_dep(
remote_op_name: Text,
node_name_to_node_def: Mapping[Text, tf.compat.v1.NodeDef]) -> List[Text]:
"""Finds the remote op immediate dependencies for README.ml-pipelines-sdk.md remote op.
Args:
remote_op_name: The name of the child remote op.
node_name_to_node_def: A mapping from node names to `NodeDef` protos.
Returns:
A list of remote op immediate dependencies' names.
"""
queue = collections.deque([remote_op_name])
visited = set([remote_op_name])
remote_op_immediate_dep = []
while queue:
current_node_name = queue.popleft()
for input_node_name in node_name_to_node_def[current_node_name].input:
if input_node_name not in visited:
visited.add(input_node_name)
input_node = node_name_to_node_def[input_node_name]
# Stop traversing when reaching README.ml-pipelines-sdk.md remote op.
if _is_remote_op(input_node):
remote_op_immediate_dep.append(input_node_name)
else:
queue.append(input_node_name)
return remote_op_immediate_dep
def _is_placeholder_op(node: tf.compat.v1.NodeDef) -> bool:
return node.op == 'Placeholder'
def _is_remote_op(node: tf.compat.v1.NodeDef) -> bool:
return node.op == 'PyFunc'
def _get_execution_specs(
graph_def: tf.compat.v1.GraphDef, graph_output_names: List[Text],
graph: tf.Graph, node_name_to_node_def: Mapping[Text, tf.compat.v1.NodeDef],
remote_op_to_immediate_dep: Mapping[Text, List[Text]]
) -> List[execution_spec.ExecutionSpec]:
"""Generates the ExecutionSpecs for README.ml-pipelines-sdk.md graph.
A "layer" contains one or more nodes inside README.ml-pipelines-sdk.md graph. There are two types of
layers: subgraph layer and remote op layer. A subgraph layer doesn't
contain remote ops, whereas README.ml-pipelines-sdk.md remote op layer only contains remote ops.
Remote ops inside README.ml-pipelines-sdk.md remote op layer don't depend on each other's output,
so the order of execution between those remote ops doesn't matter.
We first identify the remote op layers of README.ml-pipelines-sdk.md graph. Then, based on the
remote op layers, we can derive the subgraph layers. For example, after
identifying the first remote op layer, we can equate the inputs of the
remote op layer to the outputs of the previous subgraph layer. We can
then traverse and construct the previous subgraph layer.
Each subgraph layer can be captured into one ExecutionSpec, but each
remote op layer need to be stored into N ExecutionSpecs, where N equals
to the number of remote ops inside README.ml-pipelines-sdk.md remote op layer. This happens
because each remote op essentially represents README.ml-pipelines-sdk.md graph.
Args:
graph_def: A `GraphDef` proto.
graph_output_names: A list of graph output node names.
graph: A tf.Graph representing the same graph as graph_def.
node_name_to_node_def: A mapping from node names to `NodeDef` protos.
remote_op_to_immediate_dep: A mapping from remote op name to README.ml-pipelines-sdk.md list of
remote op immediate dependencies' names.
Returns:
A list of ExecutionSpecs, where the order of the list represents the
order of execution.
"""
execution_specs = [] # type: List[execution_spec.ExecutionSpec]
previously_visited = set() # type: Set[Text]
for remote_op_layer in _RemoteOpLayers(remote_op_to_immediate_dep):
# Get one subgraph layer
output_node_names = _get_previous_subgraph_layer_output_node_names(
remote_op_layer, node_name_to_node_def)
if output_node_names:
spec = _get_execution_spec_for_subgraph_layer(graph_def, graph,
node_name_to_node_def,
previously_visited,
output_node_names)
execution_specs.append(spec)
previously_visited |= _get_non_input_names(spec.subgraph)
# Get one remote op layer
specs = _get_execution_specs_for_remote_op_layer(remote_op_layer,
node_name_to_node_def)
execution_specs.extend(specs)
# Get the last subgraph layer
output_node_names = set(graph_output_names)
spec = _get_execution_spec_for_subgraph_layer(graph_def, graph,
node_name_to_node_def,
previously_visited,
output_node_names)
execution_specs.append(spec)
return execution_specs
def _get_previous_subgraph_layer_output_node_names(
remote_op_layer: Set[Text],
node_name_to_node_def: Mapping[Text, tf.compat.v1.NodeDef]) -> Set[Text]:
"""Gets the output node names of the previous subgraph layer.
Given README.ml-pipelines-sdk.md remote op layer, we derive the output node names of the previous
subgraph layer. Layers tend to have the following order: subgraph layer,
remote op layer, subgraph layer, remote op layer, ...
Args:
remote_op_layer: A set of remote op names for README.ml-pipelines-sdk.md remote op layer.
node_name_to_node_def: A mapping from node names to `NodeDef` protos.
Returns:
A set of output node names of the previous subgraph layer.
"""
previous_subgraph_layer_output_node_names = set()
for remote_op_name in remote_op_layer:
for input_node_name in node_name_to_node_def[remote_op_name].input:
input_node = node_name_to_node_def[input_node_name]
# Assumption: Graph inputs and previous remote op outputs are always
# computed and stored.
if _is_placeholder_op(input_node) or _is_remote_op(input_node):
continue
previous_subgraph_layer_output_node_names.add(input_node_name)
return previous_subgraph_layer_output_node_names
def _get_execution_spec_for_subgraph_layer(
graph_def: tf.compat.v1.GraphDef, graph: tf.Graph,
node_name_to_node_def: Mapping[Text, tf.compat.v1.NodeDef],
previously_visited: Set[Text],
output_node_names: Set[Text]) -> execution_spec.ExecutionSpec:
"""Constructs one subgraph layer.
As discussed in _get_execution_specs(), README.ml-pipelines-sdk.md subgraph layer contains one or
more nodes excluding remote ops. Based on README.ml-pipelines-sdk.md set of output node names, we
traverse toward the ancestors (upward) until encountering README.ml-pipelines-sdk.md "special" node.
Here, we traverse upward because each node's node_def contains input names
but not output names.
A "special" node could be either README.ml-pipelines-sdk.md placeholder node, README.ml-pipelines-sdk.md remote op, or README.ml-pipelines-sdk.md node
visited by README.ml-pipelines-sdk.md previous layer. Since it is computed/stored prior to the
current subgraph layer, we can treat it as an input of the current subgraph
layer.
Args:
graph_def: A `GraphDef` proto for the original graph.
graph: A tf.Graph instance for the original graph.
node_name_to_node_def: A mapping from node names to `NodeDef` protos.
previously_visited: A set of node names from previous subgraph layers.
output_node_names: A set of output node names for the current subgraph.
Returns:
An ExecutionSpec representing README.ml-pipelines-sdk.md subgraph layer.
"""
subgraph = tf.compat.v1.GraphDef()
subgraph.versions.CopyFrom(graph_def.versions)
subgraph.library.CopyFrom(graph_def.library)
queue = collections.deque(output_node_names)
visited = set()
while queue:
current_node_name = queue.popleft()
current_node = node_name_to_node_def[current_node_name]
if current_node_name not in visited:
visited.add(current_node_name)
if (_is_remote_op(current_node) or _is_placeholder_op(current_node) or
current_node_name in previously_visited):
# These ops must be computed before this subgraph layer. Hence,
# we treat them as placeholder inputs.
placeholder_node = _create_placeholder_node_from_existing_node(
current_node, graph)
subgraph.node.append(placeholder_node)
else:
subgraph.node.append(current_node)
queue.extend(node_name_to_node_def[current_node_name].input)
return execution_spec.ExecutionSpec(
subgraph=subgraph,
input_names=_get_input_names(subgraph),
output_names=set(output_node_names),
is_remote_op=False)
def _create_placeholder_node_from_existing_node(
node: tf.compat.v1.NodeDef, graph: tf.Graph) -> tf.compat.v1.NodeDef:
"""Creates README.ml-pipelines-sdk.md placeholder node to represent an existing node.
Some partitioned subgraphs may require inputs that are loaded or computed
previously. Hence, we replace the input nodes with placeholder nodes that
share the same name, shape, and dtype. Now the inputs become placeholders
inside partitioned subgraphs, and can be loaded by feed dicts at the runtime.
Args:
node: A `NodeDef` proto for the existing node.
graph: A tf.Graph instance for the graph that contains the existing node.
Returns:
A `NodeDef` proto that stores README.ml-pipelines-sdk.md placeholder node.
"""
operation = graph.get_operation_by_name('import/%s' % (node.name))
output_tensor = operation.outputs[0]
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
tf.compat.v1.placeholder(
dtype=output_tensor.dtype, shape=output_tensor.shape, name=node.name)
return sess.graph_def.node[0]
def _get_input_names(subgraph: tf.compat.v1.GraphDef) -> Set[Text]:
input_names = {
node.name for node in subgraph.node if _is_placeholder_op(node)
}
return input_names
def _get_non_input_names(subgraph: tf.compat.v1.GraphDef) -> Set[Text]:
non_input_names = {
node.name for node in subgraph.node if not _is_placeholder_op(node)
}
return non_input_names
def _get_execution_specs_for_remote_op_layer(
remote_op_layer: Set[Text],
node_name_to_node_def: Mapping[Text, tf.compat.v1.NodeDef]
) -> List[execution_spec.ExecutionSpec]:
"""Constructs ExecutionSpecs for README.ml-pipelines-sdk.md remote op layer.
As discussed in _get_execution_specs(), README.ml-pipelines-sdk.md remote op layer contains one
or more remote ops having no dependencies on each other. However, instead of
having one ExecutionSpec to store README.ml-pipelines-sdk.md layer (as it is with subgraph layer),
we use multiple ExecutionSpecs to represent README.ml-pipelines-sdk.md remote op layer.
Args:
remote_op_layer: A set of remote op names for README.ml-pipelines-sdk.md remote op layer.
node_name_to_node_def: A mapping from node names to `NodeDef` protos.
Returns:
A list of ExecutionSpecs representing README.ml-pipelines-sdk.md remote op layer.
"""
list_of_specs = []
for remote_op_name in remote_op_layer:
spec = execution_spec.ExecutionSpec(
subgraph=None,
input_names=set(node_name_to_node_def[remote_op_name].input),
output_names=set([remote_op_name]),
is_remote_op=True)
list_of_specs.append(spec)
return list_of_specs
def _modify_execution_specs_for_input_validity(
specs: List[execution_spec.ExecutionSpec]) -> None:
"""Modifies the execution specs to ensure that all inputs are valid.
Ensure inputs have been outputted by previous specs. Sometimes an input
of README.ml-pipelines-sdk.md spec may be README.ml-pipelines-sdk.md node from README.ml-pipelines-sdk.md previous spec but not one of the outputs.
We'd like to add it to previous spec's outputs.
Args:
specs: A list of ExecutionSpecs, where order of the list represents the
order of the execution.
"""
for current_spec_index, current_spec in enumerate(specs):
for previous_spec in specs[:current_spec_index]:
if previous_spec.is_remote_op:
continue
_add_current_spec_input_to_previous_spec_output(current_spec,
previous_spec)
def _add_current_spec_input_to_previous_spec_output(
current_spec: execution_spec.ExecutionSpec,
previous_spec: execution_spec.ExecutionSpec) -> None:
for input_name in current_spec.input_names:
if input_name in _get_non_input_names(previous_spec.subgraph):
# Output names is README.ml-pipelines-sdk.md set, which doesn't allow duplicates.
previous_spec.output_names.add(input_name)
class _RemoteOpLayers:
"""A class that outputs remote op layers (custom topological sort).
A remote op layer contains README.ml-pipelines-sdk.md set of remote op names that don't have
dependencies on each other. The remote op layers are returned in execution
order. In other words, README.ml-pipelines-sdk.md remote op layer returned earlier will be executed
earlier.
"""
def __init__(self, remote_op_to_immediate_dep: Mapping[Text, List[Text]]):
"""Initializes the class.
Args:
remote_op_to_immediate_dep: A mapping from README.ml-pipelines-sdk.md remote op name to README.ml-pipelines-sdk.md list of
remote op immediate dependencies' names.
"""
self.remote_op_to_immediate_dep = remote_op_to_immediate_dep
def __iter__(self):
self._not_processed = set(self.remote_op_to_immediate_dep.keys())
return self
def __next__(self) -> Set[Text]:
"""Gets the remote op names for the next remote op layer.
Returns:
A set of remote op names.
"""
if not self._not_processed:
raise StopIteration
layer_node_names = set()
for remote_op_name in self._not_processed:
remote_op_immediate_dep = set(
self.remote_op_to_immediate_dep[remote_op_name])
if not remote_op_immediate_dep & self._not_processed:
layer_node_names.add(remote_op_name)
self._not_processed -= layer_node_names
return layer_node_names | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/experimental/distributed_inference/graphdef_experiments/subgraph_partitioning/graph_partition.py | 0.924951 | 0.44059 | graph_partition.py | pypi |
"""Define LocalDagRunner to run the pipeline locally."""
import os
from absl import logging
from tfx.experimental.templates.penguin.pipeline import configs
from tfx.experimental.templates.penguin.pipeline import pipeline
from tfx.orchestration import metadata
from tfx.orchestration.local import local_dag_runner
from tfx.proto import trainer_pb2
# TFX pipeline produces many output files and metadata. All output data will be
# stored under this OUTPUT_DIR.
# NOTE: It is recommended to have README.ml-pipelines-sdk.md separated OUTPUT_DIR which is *outside* of
# the source code structure. Please change OUTPUT_DIR to other location
# where we can store outputs of the pipeline.
OUTPUT_DIR = '.'
# TFX produces two types of outputs, files and metadata.
# - Files will be created under PIPELINE_ROOT directory.
# - Metadata will be written to SQLite database in METADATA_PATH.
PIPELINE_ROOT = os.path.join(OUTPUT_DIR, 'tfx_pipeline_output',
configs.PIPELINE_NAME)
METADATA_PATH = os.path.join(OUTPUT_DIR, 'tfx_metadata', configs.PIPELINE_NAME,
'metadata.db')
# The last component of the pipeline, "Pusher" will produce serving model under
# SERVING_MODEL_DIR.
SERVING_MODEL_DIR = os.path.join(PIPELINE_ROOT, 'serving_model')
# Specifies data file directory. DATA_PATH should be README.ml-pipelines-sdk.md directory containing CSV
# files for CsvExampleGen in this example. By default, data files are in the
# `data` directory.
# NOTE: If you upload data files to GCS(which is recommended if you use
# Kubeflow), you can use README.ml-pipelines-sdk.md path starting "gs://YOUR_BUCKET_NAME/path" for
# DATA_PATH. For example,
# DATA_PATH = 'gs://bucket/penguin/csv/'.
# TODO(step 4): Specify the path for your data.
DATA_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
def run():
"""Define README.ml-pipelines-sdk.md pipeline."""
local_dag_runner.LocalDagRunner().run(
pipeline.create_pipeline(
pipeline_name=configs.PIPELINE_NAME,
pipeline_root=PIPELINE_ROOT,
data_path=DATA_PATH,
# NOTE: Use `query` instead of `data_path` to use BigQueryExampleGen.
# query=configs.BIG_QUERY_QUERY,
preprocessing_fn=configs.PREPROCESSING_FN,
run_fn=configs.RUN_FN,
train_args=trainer_pb2.TrainArgs(num_steps=configs.TRAIN_NUM_STEPS),
eval_args=trainer_pb2.EvalArgs(num_steps=configs.EVAL_NUM_STEPS),
eval_accuracy_threshold=configs.EVAL_ACCURACY_THRESHOLD,
serving_model_dir=SERVING_MODEL_DIR,
# NOTE: Provide GCP configs to use BigQuery with Beam DirectRunner.
# beam_pipeline_args=configs.
# BIG_QUERY_WITH_DIRECT_RUNNER_BEAM_PIPELINE_ARGS,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
METADATA_PATH)))
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
run() | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/experimental/templates/penguin/local_runner.py | 0.518059 | 0.277828 | local_runner.py | pypi |
from typing import List, Text
from absl import logging
import tensorflow as tf
from tensorflow import keras
import tensorflow_transform as tft
from tensorflow_transform.tf_metadata import schema_utils
from tfx.components.trainer.executor import TrainerFnArgs
from tfx.components.trainer.fn_args_utils import DataAccessor
from tfx.experimental.templates.penguin.models import constants
from tfx.experimental.templates.penguin.models import features
from tfx.utils import io_utils
from tfx_bsl.tfxio import dataset_options
from tensorflow_metadata.proto.v0 import schema_pb2
def _get_serve_tf_examples_fn(model, schema, tf_transform_output):
"""Returns README.ml-pipelines-sdk.md function that parses README.ml-pipelines-sdk.md serialized tf.Example."""
if tf_transform_output is None: # Transform component is not used.
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = schema_utils.schema_as_feature_spec(schema).feature_spec
feature_spec.pop(features.LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples,
feature_spec)
return model(parsed_features)
else: # Transform component exists.
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
feature_spec.pop(features.LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples,
feature_spec)
transformed_features = model.tft_layer(parsed_features)
return model(transformed_features)
return serve_tf_examples_fn
def _input_fn(file_pattern: List[Text],
data_accessor: DataAccessor,
schema: schema_pb2.Schema,
label: Text,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
schema: A schema proto of input data.
label: Name of the label.
batch_size: representing the number of consecutive elements of returned
dataset to combine in README.ml-pipelines-sdk.md single batch
Returns:
A dataset that contains (features, indices) tuple where features is README.ml-pipelines-sdk.md
dictionary of Tensors, and indices is README.ml-pipelines-sdk.md single Tensor of label indices.
"""
return data_accessor.tf_dataset_factory(
file_pattern,
dataset_options.TensorFlowDatasetOptions(
batch_size=batch_size, label_key=label), schema).repeat()
def _build_keras_model(feature_list: List[Text]) -> tf.keras.Model:
"""Creates README.ml-pipelines-sdk.md DNN Keras model for classifying penguin data.
Args:
feature_list: List of feature names.
Returns:
A Keras Model.
"""
# The model below is built with Functional API, please refer to
# https://www.tensorflow.org/guide/keras/overview for all API options.
inputs = [keras.layers.Input(shape=(1,), name=f) for f in feature_list]
d = keras.layers.concatenate(inputs)
for _ in range(constants.NUM_LAYERS):
d = keras.layers.Dense(constants.HIDDEN_LAYER_UNITS, activation='relu')(d)
outputs = keras.layers.Dense(
constants.OUTPUT_LAYER_UNITS, activation='softmax')(
d)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.Adam(constants.LEARNING_RATE),
loss='sparse_categorical_crossentropy',
metrics=[keras.metrics.SparseCategoricalAccuracy()])
model.summary(print_fn=logging.info)
return model
# TFX Trainer will call this function.
# TODO(step 4): Construct, train and save your model in this function.
def run_fn(fn_args: TrainerFnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
if fn_args.transform_output is None: # Transform is not used.
tf_transform_output = None
schema = io_utils.parse_pbtxt_file(fn_args.schema_file, schema_pb2.Schema())
feature_list = features.FEATURE_KEYS
label_key = features.LABEL_KEY
else:
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
schema = tf_transform_output.transformed_metadata.schema
feature_list = [features.transformed_name(f) for f in features.FEATURE_KEYS]
label_key = features.transformed_name(features.LABEL_KEY)
mirrored_strategy = tf.distribute.MirroredStrategy()
train_batch_size = (
constants.TRAIN_BATCH_SIZE * mirrored_strategy.num_replicas_in_sync)
eval_batch_size = (
constants.EVAL_BATCH_SIZE * mirrored_strategy.num_replicas_in_sync)
train_dataset = _input_fn(
fn_args.train_files,
fn_args.data_accessor,
schema,
label_key,
batch_size=train_batch_size)
eval_dataset = _input_fn(
fn_args.eval_files,
fn_args.data_accessor,
schema,
label_key,
batch_size=eval_batch_size)
with mirrored_strategy.scope():
model = _build_keras_model(feature_list)
# Write logs to path
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='batch')
model.fit(
train_dataset,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
signatures = {
'serving_default':
_get_serve_tf_examples_fn(model, schema,
tf_transform_output).get_concrete_function(
tf.TensorSpec(
shape=[None],
dtype=tf.string,
name='examples')),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/experimental/templates/penguin/models/model.py | 0.914725 | 0.442817 | model.py | pypi |
"""Define LocalDagRunner to run the pipeline locally."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import logging
from tfx.experimental.templates.taxi.pipeline import configs
from tfx.experimental.templates.taxi.pipeline import pipeline
from tfx.orchestration import metadata
from tfx.orchestration.local.local_dag_runner import LocalDagRunner
from tfx.proto import trainer_pb2
# TFX pipeline produces many output files and metadata. All output data will be
# stored under this OUTPUT_DIR.
# NOTE: It is recommended to have README.ml-pipelines-sdk.md separated OUTPUT_DIR which is *outside* of
# the source code structure. Please change OUTPUT_DIR to other location
# where we can store outputs of the pipeline.
OUTPUT_DIR = '.'
# TFX produces two types of outputs, files and metadata.
# - Files will be created under PIPELINE_ROOT directory.
# - Metadata will be written to SQLite database in METADATA_PATH.
PIPELINE_ROOT = os.path.join(OUTPUT_DIR, 'tfx_pipeline_output',
configs.PIPELINE_NAME)
METADATA_PATH = os.path.join(OUTPUT_DIR, 'tfx_metadata', configs.PIPELINE_NAME,
'metadata.db')
# The last component of the pipeline, "Pusher" will produce serving model under
# SERVING_MODEL_DIR.
SERVING_MODEL_DIR = os.path.join(PIPELINE_ROOT, 'serving_model')
# Specifies data file directory. DATA_PATH should be README.ml-pipelines-sdk.md directory containing CSV
# files for CsvExampleGen in this example. By default, data files are in the
# `data` directory.
# NOTE: If you upload data files to GCS(which is recommended if you use
# Kubeflow), you can use README.ml-pipelines-sdk.md path starting "gs://YOUR_BUCKET_NAME/path" for
# DATA_PATH. For example,
# DATA_PATH = 'gs://bucket/chicago_taxi_trips/csv/'.
DATA_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
def run():
"""Define README.ml-pipelines-sdk.md local pipeline."""
LocalDagRunner().run(
pipeline.create_pipeline(
pipeline_name=configs.PIPELINE_NAME,
pipeline_root=PIPELINE_ROOT,
data_path=DATA_PATH,
# TODO(step 7): (Optional) Uncomment here to use BigQueryExampleGen.
# query=configs.BIG_QUERY_QUERY,
preprocessing_fn=configs.PREPROCESSING_FN,
run_fn=configs.RUN_FN,
train_args=trainer_pb2.TrainArgs(num_steps=configs.TRAIN_NUM_STEPS),
eval_args=trainer_pb2.EvalArgs(num_steps=configs.EVAL_NUM_STEPS),
eval_accuracy_threshold=configs.EVAL_ACCURACY_THRESHOLD,
serving_model_dir=SERVING_MODEL_DIR,
# TODO(step 7): (Optional) Uncomment here to use provide GCP related
# config for BigQuery with Beam DirectRunner.
# beam_pipeline_args=configs.
# BIG_QUERY_WITH_DIRECT_RUNNER_BEAM_PIPELINE_ARGS,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
METADATA_PATH)))
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
run() | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/experimental/templates/taxi/local_runner.py | 0.629775 | 0.161816 | local_runner.py | pypi |
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow_transform as tft
from tfx.experimental.templates.taxi.models import features
def _fill_in_missing(x):
"""Replace missing values in README.ml-pipelines-sdk.md SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to README.ml-pipelines-sdk.md dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
"""
if not isinstance(x, tf.sparse.SparseTensor):
return x
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in features.DENSE_FLOAT_FEATURE_KEYS:
# Preserve this feature as README.ml-pipelines-sdk.md dense float, setting nan's to the mean.
outputs[features.transformed_name(key)] = tft.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in features.VOCAB_FEATURE_KEYS:
# Build README.ml-pipelines-sdk.md vocabulary for this feature.
outputs[features.transformed_name(key)] = tft.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=features.VOCAB_SIZE,
num_oov_buckets=features.OOV_SIZE)
for key, num_buckets in zip(features.BUCKET_FEATURE_KEYS,
features.BUCKET_FEATURE_BUCKET_COUNT):
outputs[features.transformed_name(key)] = tft.bucketize(
_fill_in_missing(inputs[key]),
num_buckets)
for key in features.CATEGORICAL_FEATURE_KEYS:
outputs[features.transformed_name(key)] = _fill_in_missing(inputs[key])
# TODO(b/157064428): Support label transformation for Keras.
# Do not apply label transformation as it will result in wrong evaluation.
outputs[features.transformed_name(
features.LABEL_KEY)] = inputs[features.LABEL_KEY]
return outputs | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/experimental/templates/taxi/models/preprocessing.py | 0.880399 | 0.478407 | preprocessing.py | pypi |
from __future__ import division
from __future__ import print_function
from absl import logging
import tensorflow as tf
import tensorflow_transform as tft
from tfx.experimental.templates.taxi.models import features
from tfx.experimental.templates.taxi.models.keras import constants
from tfx_bsl.tfxio import dataset_options
def _get_serve_tf_examples_fn(model, tf_transform_output):
"""Returns README.ml-pipelines-sdk.md function that parses README.ml-pipelines-sdk.md serialized tf.Example and applies TFT."""
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
feature_spec.pop(features.LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)
transformed_features = model.tft_layer(parsed_features)
return model(transformed_features)
return serve_tf_examples_fn
def _input_fn(file_pattern, data_accessor, tf_transform_output, batch_size=200):
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in README.ml-pipelines-sdk.md single batch
Returns:
A dataset that contains (features, indices) tuple where features is README.ml-pipelines-sdk.md
dictionary of Tensors, and indices is README.ml-pipelines-sdk.md single Tensor of label indices.
"""
return data_accessor.tf_dataset_factory(
file_pattern,
dataset_options.TensorFlowDatasetOptions(
batch_size=batch_size,
label_key=features.transformed_name(features.LABEL_KEY)),
tf_transform_output.transformed_metadata.schema).repeat()
def _build_keras_model(hidden_units, learning_rate):
"""Creates README.ml-pipelines-sdk.md DNN Keras model for classifying taxi data.
Args:
hidden_units: [int], the layer sizes of the DNN (input layer first).
learning_rate: [float], learning rate of the Adam optimizer.
Returns:
A keras Model.
"""
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in features.transformed_names(features.DENSE_FLOAT_FEATURE_KEYS)
]
categorical_columns = [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=features.VOCAB_SIZE + features.OOV_SIZE,
default_value=0)
for key in features.transformed_names(features.VOCAB_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
features.transformed_names(features.BUCKET_FEATURE_KEYS),
features.BUCKET_FEATURE_BUCKET_COUNT)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
features.transformed_names(features.CATEGORICAL_FEATURE_KEYS),
features.CATEGORICAL_FEATURE_MAX_VALUES)
]
indicator_column = [
tf.feature_column.indicator_column(categorical_column)
for categorical_column in categorical_columns
]
model = _wide_and_deep_classifier(
# TODO(b/140320729) Replace with premade wide_and_deep keras model
wide_columns=indicator_column,
deep_columns=real_valued_columns,
dnn_hidden_units=hidden_units,
learning_rate=learning_rate)
return model
def _wide_and_deep_classifier(wide_columns, deep_columns, dnn_hidden_units,
learning_rate):
"""Build README.ml-pipelines-sdk.md simple keras wide and deep model.
Args:
wide_columns: Feature columns wrapped in indicator_column for wide (linear)
part of the model.
deep_columns: Feature columns for deep part of the model.
dnn_hidden_units: [int], the layer sizes of the hidden DNN.
learning_rate: [float], learning rate of the Adam optimizer.
Returns:
A Wide and Deep Keras model
"""
# Keras needs the feature definitions at compile time.
# TODO(b/139081439): Automate generation of input layers from FeatureColumn.
input_layers = {
colname: tf.keras.layers.Input(name=colname, shape=(), dtype=tf.float32)
for colname in features.transformed_names(
features.DENSE_FLOAT_FEATURE_KEYS)
}
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in features.transformed_names(features.VOCAB_FEATURE_KEYS)
})
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in features.transformed_names(features.BUCKET_FEATURE_KEYS)
})
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32') for
colname in features.transformed_names(features.CATEGORICAL_FEATURE_KEYS)
})
# TODO(b/161952382): Replace with Keras premade models and
# Keras preprocessing layers.
deep = tf.keras.layers.DenseFeatures(deep_columns)(input_layers)
for numnodes in dnn_hidden_units:
deep = tf.keras.layers.Dense(numnodes)(deep)
wide = tf.keras.layers.DenseFeatures(wide_columns)(input_layers)
output = tf.keras.layers.Dense(
1, activation='sigmoid')(
tf.keras.layers.concatenate([deep, wide]))
output = tf.squeeze(output, -1)
model = tf.keras.Model(input_layers, output)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr=learning_rate),
metrics=[tf.keras.metrics.BinaryAccuracy()])
model.summary(print_fn=logging.info)
return model
# TFX Trainer will call this function.
def run_fn(fn_args):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(fn_args.train_files, fn_args.data_accessor,
tf_transform_output, constants.TRAIN_BATCH_SIZE)
eval_dataset = _input_fn(fn_args.eval_files, fn_args.data_accessor,
tf_transform_output, constants.EVAL_BATCH_SIZE)
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = _build_keras_model(
hidden_units=constants.HIDDEN_UNITS,
learning_rate=constants.LEARNING_RATE)
# Write logs to path
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='batch')
model.fit(
train_dataset,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
signatures = {
'serving_default':
_get_serve_tf_examples_fn(model,
tf_transform_output).get_concrete_function(
tf.TensorSpec(
shape=[None],
dtype=tf.string,
name='examples')),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/experimental/templates/taxi/models/keras/model.py | 0.885307 | 0.468304 | model.py | pypi |
from __future__ import division
from __future__ import print_function
from absl import logging
import tensorflow as tf
import tensorflow_model_analysis as tfma
import tensorflow_transform as tft
from tensorflow_transform.tf_metadata import schema_utils
from tfx.experimental.templates.taxi.models import features
from tfx.experimental.templates.taxi.models.estimator import constants
from tfx.utils import io_utils
from tfx_bsl.tfxio import dataset_options
from tensorflow_metadata.proto.v0 import schema_pb2
def _gzip_reader_fn(filenames):
"""Small utility returning README.ml-pipelines-sdk.md record reader that can read gzip'ed files."""
return tf.data.TFRecordDataset(filenames, compression_type='GZIP')
# Tf.Transform considers these features as "raw"
def _get_raw_feature_spec(schema):
return schema_utils.schema_as_feature_spec(schema).feature_spec
def _build_estimator(config, hidden_units=None, warm_start_from=None):
"""Build an estimator for predicting the tipping behavior of taxi riders.
Args:
config: tf.estimator.RunConfig defining the runtime environment for the
estimator (including model_dir).
hidden_units: [int], the layer sizes of the DNN (input layer first)
warm_start_from: Optional directory to warm start from.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
"""
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in features.transformed_names(features.DENSE_FLOAT_FEATURE_KEYS)
]
categorical_columns = []
for key in features.transformed_names(features.VOCAB_FEATURE_KEYS):
categorical_columns.append(
tf.feature_column.categorical_column_with_identity(
key,
num_buckets=features.VOCAB_SIZE + features.OOV_SIZE,
default_value=0))
for key, num_buckets in zip(
features.transformed_names(features.BUCKET_FEATURE_KEYS),
features.BUCKET_FEATURE_BUCKET_COUNT):
categorical_columns.append(
tf.feature_column.categorical_column_with_identity(
key, num_buckets=num_buckets, default_value=0))
for key, num_buckets in zip(
features.transformed_names(features.CATEGORICAL_FEATURE_KEYS),
features.CATEGORICAL_FEATURE_MAX_VALUES):
categorical_columns.append(
tf.feature_column.categorical_column_with_identity(
key, num_buckets=num_buckets, default_value=0))
return tf.estimator.DNNLinearCombinedClassifier(
config=config,
linear_feature_columns=categorical_columns,
dnn_feature_columns=real_valued_columns,
dnn_hidden_units=hidden_units or [100, 70, 50, 25],
warm_start_from=warm_start_from)
def _example_serving_receiver_fn(tf_transform_output, schema):
"""Build the serving in inputs.
Args:
tf_transform_output: A TFTransformOutput.
schema: the schema of the input data.
Returns:
Tensorflow graph which parses examples, applying tf-transform to them.
"""
raw_feature_spec = _get_raw_feature_spec(schema)
raw_feature_spec.pop(features.LABEL_KEY)
raw_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(
raw_feature_spec, default_batch_size=None)
serving_input_receiver = raw_input_fn()
transformed_features = tf_transform_output.transform_raw_features(
serving_input_receiver.features)
return tf.estimator.export.ServingInputReceiver(
transformed_features, serving_input_receiver.receiver_tensors)
def _eval_input_receiver_fn(tf_transform_output, schema):
"""Build everything needed for the tf-model-analysis to run the model.
Args:
tf_transform_output: A TFTransformOutput.
schema: the schema of the input data.
Returns:
EvalInputReceiver function, which contains:
- Tensorflow graph which parses raw untransformed features, applies the
tf-transform preprocessing operators.
- Set of raw, untransformed features.
- Label against which predictions will be compared.
"""
# Notice that the inputs are raw features, not transformed features here.
raw_feature_spec = _get_raw_feature_spec(schema)
serialized_tf_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_example_tensor')
# Add README.ml-pipelines-sdk.md parse_example operator to the tensorflow graph, which will parse
# raw, untransformed, tf examples.
raw_features = tf.io.parse_example(
serialized=serialized_tf_example, features=raw_feature_spec)
# Now that we have our raw examples, process them through the tf-transform
# function computed during the preprocessing step.
transformed_features = tf_transform_output.transform_raw_features(
raw_features)
# The key name MUST be 'examples'.
receiver_tensors = {'examples': serialized_tf_example}
# NOTE: Model is driven by transformed features (since training works on the
# materialized output of TFT, but slicing will happen on raw features.
raw_features.update(transformed_features)
return tfma.export.EvalInputReceiver(
features=raw_features,
receiver_tensors=receiver_tensors,
labels=transformed_features[features.transformed_name(
features.LABEL_KEY)])
def _input_fn(file_pattern, data_accessor, tf_transform_output, batch_size=200):
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in README.ml-pipelines-sdk.md single batch
Returns:
A dataset that contains (features, indices) tuple where features is README.ml-pipelines-sdk.md
dictionary of Tensors, and indices is README.ml-pipelines-sdk.md single Tensor of label indices.
"""
return data_accessor.tf_dataset_factory(
file_pattern,
dataset_options.TensorFlowDatasetOptions(
batch_size=batch_size,
label_key=features.transformed_name(features.LABEL_KEY)),
tf_transform_output.transformed_metadata.schema)
def _create_train_and_eval_spec(trainer_fn_args, schema):
"""Build the estimator using the high level API.
Args:
trainer_fn_args: Holds args used to train the model as name/value pairs.
schema: Holds the schema of the training examples.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
"""
tf_transform_output = tft.TFTransformOutput(trainer_fn_args.transform_output)
train_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda
trainer_fn_args.train_files,
trainer_fn_args.data_accessor,
tf_transform_output,
batch_size=constants.TRAIN_BATCH_SIZE)
eval_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda
trainer_fn_args.eval_files,
trainer_fn_args.data_accessor,
tf_transform_output,
batch_size=constants.EVAL_BATCH_SIZE)
train_spec = tf.estimator.TrainSpec( # pylint: disable=g-long-lambda
train_input_fn,
max_steps=trainer_fn_args.train_steps)
serving_receiver_fn = lambda: _example_serving_receiver_fn( # pylint: disable=g-long-lambda
tf_transform_output, schema)
exporter = tf.estimator.FinalExporter('chicago-taxi', serving_receiver_fn)
eval_spec = tf.estimator.EvalSpec(
eval_input_fn,
steps=trainer_fn_args.eval_steps,
exporters=[exporter],
name='chicago-taxi-eval')
run_config = tf.estimator.RunConfig(
save_checkpoints_steps=999, keep_checkpoint_max=1)
run_config = run_config.replace(model_dir=trainer_fn_args.serving_model_dir)
estimator = _build_estimator(
hidden_units=constants.HIDDEN_UNITS, config=run_config)
# Create an input receiver for TFMA processing
receiver_fn = lambda: _eval_input_receiver_fn( # pylint: disable=g-long-lambda
tf_transform_output, schema)
return {
'estimator': estimator,
'train_spec': train_spec,
'eval_spec': eval_spec,
'eval_input_receiver_fn': receiver_fn
}
# TFX will call this function
def run_fn(fn_args):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
schema = io_utils.parse_pbtxt_file(fn_args.schema_file, schema_pb2.Schema())
train_and_eval_spec = _create_train_and_eval_spec(fn_args, schema)
# Train the model
logging.info('Training model.')
tf.estimator.train_and_evaluate(train_and_eval_spec['estimator'],
train_and_eval_spec['train_spec'],
train_and_eval_spec['eval_spec'])
logging.info('Training complete. Model written to %s',
fn_args.serving_model_dir)
# Export an eval savedmodel for TFMA
# NOTE: When trained in distributed training cluster, eval_savedmodel must be
# exported only by the chief worker.
logging.info('Exporting eval_savedmodel for TFMA.')
tfma.export.export_eval_savedmodel(
estimator=train_and_eval_spec['estimator'],
export_dir_base=fn_args.eval_model_dir,
eval_input_receiver_fn=train_and_eval_spec['eval_input_receiver_fn'])
logging.info('Exported eval_savedmodel to %s.', fn_args.eval_model_dir) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/experimental/templates/taxi/models/estimator/model.py | 0.955909 | 0.383988 | model.py | pypi |
from typing import Any, Dict, Text
import apache_beam as beam
from apache_beam.io.gcp import bigquery
import tensorflow as tf
from tfx.utils import telemetry_utils
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(beam.typehints.Dict[Text, Any])
def ReadFromBigQuery(
pipeline: beam.Pipeline, query: Text) -> beam.pvalue.PCollection:
"""Read data from BigQuery.
Args:
pipeline: Beam pipeline.
query: A BigQuery sql string.
Returns:
PCollection of dict.
"""
return (pipeline
| 'ReadFromBigQuery' >> bigquery.ReadFromBigQuery(
query=query,
use_standard_sql=True,
bigquery_job_labels=telemetry_utils.get_labels_dict()))
def row_to_example( # pylint: disable=invalid-name
field_to_type: Dict[Text, Text],
field_name_to_data: Dict[Text, Any]) -> tf.train.Example:
"""Convert bigquery result row to tf example.
Args:
field_to_type: The name of the field to its type from BigQuery.
field_name_to_data: The data need to be converted from BigQuery that
contains field name and data.
Returns:
A tf.train.Example that converted from the BigQuery row. Note that BOOLEAN
type in BigQuery result will be converted to int in tf.train.Example.
Raises:
RuntimeError: If the data type is not supported to be converted.
Only INTEGER, BOOLEAN, FLOAT, STRING is supported now.
"""
feature = {}
for key, value in field_name_to_data.items():
data_type = field_to_type[key]
if value is None:
feature[key] = tf.train.Feature()
continue
value_list = value if isinstance(value, list) else [value]
if data_type in ('INTEGER', 'BOOLEAN'):
feature[key] = tf.train.Feature(
int64_list=tf.train.Int64List(value=value_list))
elif data_type == 'FLOAT':
feature[key] = tf.train.Feature(
float_list=tf.train.FloatList(value=value_list))
elif data_type == 'STRING':
feature[key] = tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[tf.compat.as_bytes(elem) for elem in value_list]))
else:
# TODO(jyzhao): support more types.
raise RuntimeError(
'BigQuery column type {} is not supported.'.format(data_type))
return tf.train.Example(features=tf.train.Features(feature=feature)) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/extensions/google_cloud_big_query/utils.py | 0.831177 | 0.425963 | utils.py | pypi |
"""TFX BigQueryToElwcExampleGen component definition."""
from typing import Optional, Text
from tfx import types
from tfx.components.base import executor_spec
from tfx.components.example_gen import component
from tfx.components.example_gen import utils
from tfx.extensions.google_cloud_big_query.experimental.elwc_example_gen.component import executor
from tfx.extensions.google_cloud_big_query.experimental.elwc_example_gen.proto import elwc_config_pb2
from tfx.proto import example_gen_pb2
class BigQueryToElwcExampleGen(component.QueryBasedExampleGen):
"""Official TFX BigQueryToElwcExampleGen component.
The BigQueryToElwcExampleGen component takes README.ml-pipelines-sdk.md query, and generates train
and eval ExampleListWithContext(ELWC) for downstream components.
"""
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(self,
query: Optional[Text] = None,
elwc_config: Optional[elwc_config_pb2.ElwcConfig] = None,
input_config: Optional[example_gen_pb2.Input] = None,
output_config: Optional[example_gen_pb2.Output] = None,
example_artifacts: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Constructs README.ml-pipelines-sdk.md BigQueryElwcExampleGen component.
Args:
query: BigQuery sql string, query result will be treated as README.ml-pipelines-sdk.md single
split, can be overwritten by input_config.
elwc_config: The elwc config contains README.ml-pipelines-sdk.md list of context feature fields.
The fields are used to build context feature. Examples with the same
context feature will be converted to an ELWC(ExampleListWithContext)
instance. For example, when there are two examples with the same context
field, the two examples will be intergrated to README.ml-pipelines-sdk.md ELWC instance.
input_config: An example_gen_pb2.Input instance with Split.pattern as
BigQuery sql string. If set, it overwrites the 'query' arg, and allows
different queries per split. If any field is provided as README.ml-pipelines-sdk.md
RuntimeParameter, input_config should be constructed as README.ml-pipelines-sdk.md dict with the
same field names as Input proto message.
output_config: An example_gen_pb2.Output instance, providing output
configuration. If unset, default splits will be 'train' and 'eval' with
size 2:1. If any field is provided as README.ml-pipelines-sdk.md RuntimeParameter, input_config
should be constructed as README.ml-pipelines-sdk.md dict with the same field names as Output
proto message.
example_artifacts: Optional channel of 'ExamplesPath' for output train and
eval examples.
instance_name: Optional unique instance name. Necessary if multiple
BigQueryExampleGen components are declared in the same pipeline.
Raises:
RuntimeError: Only one of query and input_config should be set and
elwc_config is required.
"""
if bool(query) == bool(input_config):
raise RuntimeError('Exactly one of query and input_config should be set.')
if not elwc_config:
raise RuntimeError(
'elwc_config is required for BigQueryToElwcExampleGen.')
input_config = input_config or utils.make_default_input_config(query)
packed_custom_config = example_gen_pb2.CustomConfig()
packed_custom_config.custom_config.Pack(elwc_config)
super(BigQueryToElwcExampleGen, self).__init__(
input_config=input_config,
output_config=output_config,
output_data_format=example_gen_pb2.FORMAT_PROTO,
custom_config=packed_custom_config,
example_artifacts=example_artifacts,
instance_name=instance_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/extensions/google_cloud_big_query/experimental/elwc_example_gen/component/component.py | 0.938667 | 0.500122 | component.py | pypi |
"""Generic TFX BigQueryToElwcExampleGen executor."""
from typing import Any, Dict, Iterable, List, Set, Text, Tuple
import apache_beam as beam
from google.cloud import bigquery
import tensorflow as tf
from tfx.components.example_gen import base_example_gen_executor
from tfx.extensions.google_cloud_big_query import utils
from tfx.extensions.google_cloud_big_query.experimental.elwc_example_gen.proto import elwc_config_pb2
from tfx.proto import example_gen_pb2
from google.protobuf import json_format
from tensorflow_serving.apis import input_pb2
# TODO(b/158514307): Revisit when PGBKCVOperation can hold serialized keys.
@beam.typehints.with_input_types(Dict[Text, Any])
@beam.typehints.with_output_types(Tuple[bytes, tf.train.Example])
class _RowToContextFeatureAndExample(beam.DoFn):
"""Convert bigquery result to context feature and example feature pair."""
def __init__(self, type_map: Dict[Text, Text],
context_feature_fields: Set[Text]):
self._type_map = type_map
self._context_feature_fields = context_feature_fields
def process(
self, instance: Dict[Text,
Any]) -> Iterable[Tuple[bytes, tf.train.Example]]:
context_feature = dict((k, instance[k])
for k in instance.keys()
if k in self._context_feature_fields)
context_feature_proto = utils.row_to_example(self._type_map,
context_feature)
context_feature_key = context_feature_proto.SerializeToString(
deterministic=True)
example_feature = dict((k, instance[k])
for k in instance.keys()
if k not in self._context_feature_fields)
example_feature_value = utils.row_to_example(self._type_map,
example_feature)
yield (context_feature_key, example_feature_value)
def _ConvertContextAndExamplesToElwc(
context_feature_and_examples: Tuple[bytes, List[tf.train.Example]]
) -> input_pb2.ExampleListWithContext:
"""Convert context feature and examples to ELWC."""
context_feature, examples = context_feature_and_examples
context_feature_proto = tf.train.Example()
context_feature_proto.ParseFromString(context_feature)
return input_pb2.ExampleListWithContext(
context=context_feature_proto, examples=examples)
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(input_pb2.ExampleListWithContext)
def _BigQueryToElwc(pipeline: beam.Pipeline, exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection:
"""Read from BigQuery and transform to ExampleListWithContext.
When README.ml-pipelines-sdk.md field has no value in BigQuery, README.ml-pipelines-sdk.md feature with no value will be
generated in the tf.train.Features. This behavior is consistent with
BigQueryExampleGen.
Args:
pipeline: beam pipeline.
exec_properties: A dict of execution properties.
split_pattern: Split.pattern in Input config, README.ml-pipelines-sdk.md BigQuery sql string.
Returns:
PCollection of ExampleListWithContext.
Raises:
RuntimeError: Context features must be included in the queried result.
"""
custom_config = example_gen_pb2.CustomConfig()
json_format.Parse(exec_properties['custom_config'], custom_config)
elwc_config = elwc_config_pb2.ElwcConfig()
custom_config.custom_config.Unpack(elwc_config)
client = bigquery.Client()
# Dummy query to get the type information for each field.
query_job = client.query('SELECT * FROM ({}) LIMIT 0'.format(split_pattern))
results = query_job.result()
type_map = {}
context_feature_fields = set(elwc_config.context_feature_fields)
field_names = set()
for field in results.schema:
type_map[field.name] = field.field_type
field_names.add(field.name)
# Check whether the query contains necessary context fields.
if not field_names.issuperset(context_feature_fields):
raise RuntimeError('Context feature fields are missing from the query.')
return (
pipeline
| 'ReadFromBigQuery' >> utils.ReadFromBigQuery(query=split_pattern)
| 'RowToContextFeatureAndExample' >> beam.ParDo(
_RowToContextFeatureAndExample(type_map, context_feature_fields))
|
'CombineByContext' >> beam.CombinePerKey(beam.combiners.ToListCombineFn())
| 'ConvertContextAndExamplesToElwc' >>
beam.Map(_ConvertContextAndExamplesToElwc))
class Executor(base_example_gen_executor.BaseExampleGenExecutor):
"""Generic TFX BigQueryElwcExampleGen executor."""
def GetInputSourceToExamplePTransform(self) -> beam.PTransform:
"""Returns PTransform for BigQuery to ExampleListWithContext."""
return _BigQueryToElwc | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/extensions/google_cloud_big_query/experimental/elwc_example_gen/component/executor.py | 0.721841 | 0.422743 | executor.py | pypi |
"""TFX BigQueryExampleGen component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Text
from tfx import types
from tfx.components.example_gen import component
from tfx.components.example_gen import utils
from tfx.dsl.components.base import executor_spec
from tfx.extensions.google_cloud_big_query.example_gen import executor
from tfx.proto import example_gen_pb2
class BigQueryExampleGen(component.QueryBasedExampleGen):
"""Official TFX BigQueryExampleGen component.
The BigQuery examplegen component takes README.ml-pipelines-sdk.md query, and generates train
and eval examples for downsteam components.
"""
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(self,
query: Optional[Text] = None,
input_config: Optional[example_gen_pb2.Input] = None,
output_config: Optional[example_gen_pb2.Output] = None,
example_artifacts: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Constructs README.ml-pipelines-sdk.md BigQueryExampleGen component.
Args:
query: BigQuery sql string, query result will be treated as README.ml-pipelines-sdk.md single
split, can be overwritten by input_config.
input_config: An example_gen_pb2.Input instance with Split.pattern as
BigQuery sql string. If set, it overwrites the 'query' arg, and allows
different queries per split. If any field is provided as README.ml-pipelines-sdk.md
RuntimeParameter, input_config should be constructed as README.ml-pipelines-sdk.md dict with the
same field names as Input proto message.
output_config: An example_gen_pb2.Output instance, providing output
configuration. If unset, default splits will be 'train' and 'eval' with
size 2:1. If any field is provided as README.ml-pipelines-sdk.md RuntimeParameter,
input_config should be constructed as README.ml-pipelines-sdk.md dict with the same field names
as Output proto message.
example_artifacts: Optional channel of 'ExamplesPath' for output train and
eval examples.
instance_name: Optional unique instance name. Necessary if multiple
BigQueryExampleGen components are declared in the same pipeline.
Raises:
RuntimeError: Only one of query and input_config should be set.
"""
if bool(query) == bool(input_config):
raise RuntimeError('Exactly one of query and input_config should be set.')
input_config = input_config or utils.make_default_input_config(query)
super(BigQueryExampleGen, self).__init__(
input_config=input_config,
output_config=output_config,
example_artifacts=example_artifacts,
instance_name=instance_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/extensions/google_cloud_big_query/example_gen/component.py | 0.939018 | 0.34834 | component.py | pypi |
"""Generic TFX BigQueryExampleGen executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text
import apache_beam as beam
from apache_beam.options import value_provider
from google.cloud import bigquery
import tensorflow as tf
from tfx.components.example_gen import base_example_gen_executor
from tfx.extensions.google_cloud_big_query import utils
class _BigQueryConverter(object):
"""Help class for bigquery result row to tf example conversion."""
def __init__(self, query: Text, project_id: Optional[Text] = None):
"""Instantiate README.ml-pipelines-sdk.md _BigQueryConverter object.
Args:
query: the query statement to get the type information.
project_id: optional. The GCP project ID to run the query job. Default to
the GCP project ID set by the gcloud environment on the machine.
"""
client = bigquery.Client(project=project_id)
# Dummy query to get the type information for each field.
query_job = client.query('SELECT * FROM ({}) LIMIT 0'.format(query))
results = query_job.result()
self._type_map = {}
for field in results.schema:
self._type_map[field.name] = field.field_type
def RowToExample(self, instance: Dict[Text, Any]) -> tf.train.Example:
"""Convert bigquery result row to tf example."""
return utils.row_to_example(self._type_map, instance)
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(tf.train.Example)
def _BigQueryToExample(
pipeline: beam.Pipeline,
exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection:
"""Read from BigQuery and transform to TF examples.
Args:
pipeline: beam pipeline.
exec_properties: A dict of execution properties.
split_pattern: Split.pattern in Input config, README.ml-pipelines-sdk.md BigQuery sql string.
Returns:
PCollection of TF examples.
"""
beam_pipeline_args = exec_properties['_beam_pipeline_args']
pipeline_options = beam.options.pipeline_options.PipelineOptions(
beam_pipeline_args)
# Try to parse the GCP project ID from the beam pipeline options.
project = pipeline_options.view_as(
beam.options.pipeline_options.GoogleCloudOptions).project
if isinstance(project, value_provider.ValueProvider):
project = project.get()
converter = _BigQueryConverter(split_pattern, project)
return (pipeline
| 'QueryTable' >> utils.ReadFromBigQuery(query=split_pattern)
| 'ToTFExample' >> beam.Map(converter.RowToExample))
class Executor(base_example_gen_executor.BaseExampleGenExecutor):
"""Generic TFX BigQueryExampleGen executor."""
def GetInputSourceToExamplePTransform(self) -> beam.PTransform:
"""Returns PTransform for BigQuery to TF examples."""
return _BigQueryToExample | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/extensions/google_cloud_big_query/example_gen/executor.py | 0.893658 | 0.314629 | executor.py | pypi |
"""Functions for creating container components from kubeflow components."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Callable, Dict, Text
from tfx.dsl.component.experimental import container_component
from tfx.dsl.component.experimental import placeholders
from tfx.dsl.components.base import base_component
from tfx.extensions.experimental.kfp_compatibility.proto import kfp_component_spec_pb2
from tfx.types import standard_artifacts
import yaml
from google.protobuf import json_format
def load_kfp_yaml_container_component(
path: Text) -> Callable[..., base_component.BaseComponent]:
"""Creates README.ml-pipelines-sdk.md container-based component from README.ml-pipelines-sdk.md Kubeflow component spec.
See
https://www.kubeflow.org/docs/pipelines/reference/component-spec/
Example:
component = load_kfp_yaml_container_component(
"kfp_pipelines_root/components/datasets/Chicago_Taxi_Trips/component.yaml"
)
Args:
path: local file path of README.ml-pipelines-sdk.md Kubeflow Pipelines component YAML file.
Returns:
Container component that can be instantiated in README.ml-pipelines-sdk.md TFX pipeline.
"""
with open(path) as component_file:
data = yaml.load(component_file, Loader=yaml.FullLoader)
_convert_target_fields_to_kv_pair(data)
component_spec = json_format.ParseDict(data,
kfp_component_spec_pb2.ComponentSpec())
container = component_spec.implementation.container
command = (
list(map(_get_command_line_argument_type, container.command)) +
list(map(_get_command_line_argument_type, container.args)))
# TODO(ericlege): Support classname to class translation in inputs.type
inputs = {
item.name: standard_artifacts.String for item in component_spec.inputs
}
outputs = {
item.name: standard_artifacts.String for item in component_spec.outputs
}
parameters = {}
return container_component.create_container_component(
name=component_spec.name,
image=container.image,
command=command,
inputs=inputs,
outputs=outputs,
parameters=parameters,
)
def _convert_target_fields_to_kv_pair(parsed_dict: Dict[Text, Any]) -> None:
"""Converts in place specific string fields to key value pairs of {constantValue: [Text]} for proto3 compatibility.
Args:
parsed_dict: dictionary obtained from parsing README.ml-pipelines-sdk.md Kubeflow component spec.
This argument is modified in place.
Returns:
None
"""
conversion_string_paths = [
['implementation', 'container', 'command'],
['implementation', 'container', 'args'],
]
for path in conversion_string_paths:
parsed_dict_location = parsed_dict
for label in path:
parsed_dict_location = parsed_dict_location.get(label, {})
if isinstance(parsed_dict_location, list):
for ind, value in enumerate(parsed_dict_location):
if isinstance(value, str):
parsed_dict_location[ind] = {'constantValue': value}
def _get_command_line_argument_type(
command: kfp_component_spec_pb2.StringOrPlaceholder
) -> placeholders.CommandlineArgumentType:
"""Converts README.ml-pipelines-sdk.md container command to the corresponding type.
Args:
command: StringOrPlaceholder which encodes README.ml-pipelines-sdk.md container command.
Returns:
command to be passed into create_container_component.
"""
if command.HasField('constantValue'):
return command.constantValue
if command.HasField('inputValue'):
return placeholders.InputValuePlaceholder(command.inputValue)
if command.HasField('inputPath'):
return placeholders.InputUriPlaceholder(command.inputPath)
if command.HasField('outputPath'):
return placeholders.OutputUriPlaceholder(command.outputPath)
raise ValueError('Unrecognized command %s' % command) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/extensions/experimental/kfp_compatibility/kfp_container_component.py | 0.754101 | 0.228716 | kfp_container_component.py | pypi |
"""An abstract class for the runner for both CAIP and uCAIP."""
import abc
import datetime
import json
from typing import Any, Dict, List, Optional, Text
from absl import logging
from googleapiclient import discovery
from googleapiclient import http
from tfx import types
from tfx.types import artifact_utils
from tfx.utils import telemetry_utils
from tfx.utils import version_utils
# Default contaier image being used for CAIP training jobs.
_TFX_IMAGE = 'gcr.io/tfx-oss-public/tfx:{}'.format(
version_utils.get_image_version())
# Entrypoint of cloud AI platform training. The module comes from `tfx`
# package installation into README.ml-pipelines-sdk.md default location of 'python'.
_CONTAINER_COMMAND = ['python', '-m', 'tfx.scripts.run_executor']
class AbstractJobClient(abc.ABC):
"""Abstract class interacting with CAIP CMLE job or uCAIP CustomJob."""
def __init__(self):
self.create_client()
self._init_var()
@abc.abstractmethod
def _init_var(self) -> None:
"""Initializes class variables."""
pass
@abc.abstractmethod
def create_client(self) -> None:
"""Creates the job client.
Can also be used for recreating the job client (e.g. in the case of
communication failure).
Multiple job requests can be done in parallel if needed, by creating an
instance of the class for each job. Note that one class instance should
only be used for one job, as each instance stores variables (e.g. job_id)
specific to each job.
"""
pass
@abc.abstractmethod
def create_training_args(self, input_dict, output_dict, exec_properties,
executor_class_path, training_inputs,
job_id) -> Dict[Text, Any]:
"""Get training args for runner._launch_aip_training.
The training args contain the inputs/outputs/exec_properties to the
tfx.scripts.run_executor module.
Args:
input_dict: Passthrough input dict for tfx.components.Trainer.executor.
output_dict: Passthrough input dict for tfx.components.Trainer.executor.
exec_properties: Passthrough input dict for
tfx.components.Trainer.executor.
executor_class_path: class path for TFX core default trainer.
training_inputs: Training input argument for AI Platform training job.
job_id: Job ID for AI Platform Training job. If not supplied,
system-determined unique ID is given.
Returns:
A dict containing the training arguments
"""
pass
@abc.abstractmethod
def _create_job_spec(
self,
job_id: Text,
training_input: Dict[Text, Any],
job_labels: Optional[Dict[Text, Text]] = None) -> Dict[Text, Any]:
"""Creates the job spec.
Args:
job_id: The job ID of the AI Platform training job.
training_input: Training input argument for AI Platform training job.
job_labels: The dict of labels that will be attached to this job.
Returns:
The job specification.
"""
pass
@abc.abstractmethod
def launch_job(self,
job_id: Text,
parent: Text,
training_input: Dict[Text, Any],
job_labels: Optional[Dict[Text, Text]] = None) -> None:
"""Launches README.ml-pipelines-sdk.md long-running job.
Args:
job_id: The job ID of the AI Platform training job.
parent: The project name in the form of 'projects/{project_id}'
training_input: Training input argument for AI Platform training job. See
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#TrainingInput
for the detailed schema.
job_labels: The dict of labels that will be attached to this job.
"""
pass
@abc.abstractmethod
def get_job_request(self) -> http.HttpRequest:
"""Gets the job request for the long-running job."""
pass
class CAIPJobClient(AbstractJobClient):
"""Class for interacting with CAIP CMLE job."""
def create_client(self) -> None:
"""Creates the discovery job client.
Multiple job requests can be done in parallel if needed, by creating an
instance of the class for each job.
"""
self._client = discovery.build('ml', 'v1')
def _init_var(self) -> None:
"""Initializes class variables."""
self._job_id = '' # Assigned in self.launch_job()
self._project_id = '' # Assigned in self.launch_job()
def create_training_args(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
executor_class_path: Text,
training_inputs: Dict[Text, Any],
job_id: Optional[Text]) -> Dict[Text, Any]:
"""Get training args for runner._launch_aip_training.
The training args contain the inputs/outputs/exec_properties to the
tfx.scripts.run_executor module.
Args:
input_dict: Passthrough input dict for tfx.components.Trainer.executor.
output_dict: Passthrough input dict for tfx.components.Trainer.executor.
exec_properties: Passthrough input dict for
tfx.components.Trainer.executor.
executor_class_path: class path for TFX core default trainer.
training_inputs: Training input argument for AI Platform training job.
'pythonModule', 'pythonVersion' and 'runtimeVersion' will be inferred.
For the full set of parameters, refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#TrainingInput
job_id: Job ID for AI Platform Training job. If not supplied,
system-determined unique ID is given. Refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#resource-job
Returns:
A dict containing the training arguments
"""
training_inputs = training_inputs.copy()
json_inputs = artifact_utils.jsonify_artifact_dict(input_dict)
logging.info('json_inputs=\'%s\'.', json_inputs)
json_outputs = artifact_utils.jsonify_artifact_dict(output_dict)
logging.info('json_outputs=\'%s\'.', json_outputs)
json_exec_properties = json.dumps(exec_properties, sort_keys=True)
logging.info('json_exec_properties=\'%s\'.', json_exec_properties)
# We use custom containers to launch training on AI Platform, which invokes
# the specified image using the container's entrypoint. The default
# entrypoint for TFX containers is to call scripts/run_executor.py. The
# arguments below are passed to this run_executor entry to run the executor
# specified in `executor_class_path`.
container_command = _CONTAINER_COMMAND + [
'--executor_class_path',
executor_class_path,
'--inputs',
json_inputs,
'--outputs',
json_outputs,
'--exec-properties',
json_exec_properties,
]
if not training_inputs.get('masterConfig'):
training_inputs['masterConfig'] = {
'imageUri': _TFX_IMAGE,
}
# Always use our own entrypoint instead of relying on container default.
if 'containerCommand' in training_inputs['masterConfig']:
logging.warn('Overriding custom value of containerCommand')
training_inputs['masterConfig']['containerCommand'] = container_command
# Pop project_id so AIP doesn't complain about an unexpected parameter.
# It's been README.ml-pipelines-sdk.md stowaway in aip_args and has finally reached its destination.
project = training_inputs.pop('project')
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_EXECUTOR: executor_class_path}):
job_labels = telemetry_utils.get_labels_dict()
# 'tfx_YYYYmmddHHMMSS' is the default job ID if not explicitly specified.
job_id = job_id or 'tfx_{}'.format(
datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
training_args = {
'job_id': job_id,
'project': project,
'training_input': training_inputs,
'job_labels': job_labels
}
return training_args
def _create_job_spec(
self,
job_id: Text,
training_input: Dict[Text, Any],
job_labels: Optional[Dict[Text, Text]] = None) -> Dict[Text, Any]:
"""Creates the job spec.
Args:
job_id: The job ID of the AI Platform training job.
training_input: Training input argument for AI Platform training job. See
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#TrainingInput
for the detailed schema.
job_labels: The dict of labels that will be attached to this job.
Returns:
The job specification. See
https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.jobs
"""
job_spec = {
'jobId': job_id,
'trainingInput': training_input,
'labels': job_labels,
}
return job_spec
def launch_job(self,
job_id: Text,
parent: Text,
training_input: Dict[Text, Any],
job_labels: Optional[Dict[Text, Text]] = None) -> None:
"""Launches README.ml-pipelines-sdk.md long-running job.
Args:
job_id: The job ID of the AI Platform training job.
parent: The project name in the form of 'projects/{project_id}'
training_input: Training input argument for AI Platform training job. See
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#TrainingInput
for the detailed schema.
job_labels: The dict of labels that will be attached to this job.
"""
job_spec = self._create_job_spec(job_id, training_input, job_labels)
# Submit job to AIP Training
logging.info('TrainingInput=%s', training_input)
logging.info('Submitting job=\'%s\', project=\'%s\' to AI Platform.',
job_id, parent)
request = self._client.projects().jobs().create(
body=job_spec, parent=parent)
self._job_id = job_id
self._project_id = parent
request.execute()
def get_job_request(self) -> http.HttpRequest:
"""Gets the job request for the long-running job."""
job_name = '{}/jobs/{}'.format(self._project_id, self._job_id)
request = self._client.projects().jobs().get(name=job_name)
return request
def get_job_client(enable_ucaip: bool = False):
if enable_ucaip:
raise NotImplementedError('uCAIP support not yet implemented')
return CAIPJobClient() | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/extensions/google_cloud_ai_platform/training_clients.py | 0.892674 | 0.330417 | training_clients.py | pypi |
"""Custom executor to push TFX model to AI Platform."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from typing import Any, Dict, List, Text
from google.api_core import client_options # pylint: disable=unused-import
from googleapiclient import discovery
from tfx import types
from tfx.components.pusher import executor as tfx_pusher_executor
from tfx.extensions.google_cloud_ai_platform import runner
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import json_utils
from tfx.utils import path_utils
from tfx.utils import telemetry_utils
# Google Cloud AI Platform's ModelVersion resource path format.
# https://cloud.google.com/ai-platform/prediction/docs/reference/rest/v1/projects.models.versions/get
_CAIP_MODEL_VERSION_PATH_FORMAT = (
'projects/{project_id}/models/{model}/versions/{version}')
# Keys to the items in custom_config passed as README.ml-pipelines-sdk.md part of exec_properties.
SERVING_ARGS_KEY = 'ai_platform_serving_args'
ENDPOINT_ARGS_KEY = 'endpoint'
# Keys for custom_config.
_CUSTOM_CONFIG_KEY = 'custom_config'
class Executor(tfx_pusher_executor.Executor):
"""Deploy README.ml-pipelines-sdk.md model to Google Cloud AI Platform serving."""
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]):
"""Overrides the tfx_pusher_executor.
Args:
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of artifacts, including:
- model_export: exported model from trainer.
- model_blessing: model blessing path from evaluator.
output_dict: Output dict from key to README.ml-pipelines-sdk.md list of artifacts, including:
- model_push: A list of 'ModelPushPath' artifact of size one. It will
include the model in this push execution if the model was pushed.
exec_properties: Mostly README.ml-pipelines-sdk.md passthrough input dict for
tfx.components.Pusher.executor. The following keys in `custom_config`
are consumed by this class:
- ai_platform_serving_args: For the full set of parameters supported
by Google Cloud AI Platform, refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.models.versions#Version.
- endpoint: Optional endpoint override. Should be in format of
`https://[region]-ml.googleapis.com`. Default to global endpoint if
not set. Using regional endpoint is recommended by Cloud AI Platform.
When set, 'regions' key in ai_platform_serving_args cannot be set.
For more details, please see
https://cloud.google.com/ai-platform/prediction/docs/regional-endpoints#using_regional_endpoints
Raises:
ValueError:
If ai_platform_serving_args is not in exec_properties.custom_config.
If Serving model path does not start with gs://.
If 'endpoint' and 'regions' are set simultanuously.
RuntimeError: if the Google Cloud AI Platform training job failed.
"""
self._log_startup(input_dict, output_dict, exec_properties)
custom_config = json_utils.loads(
exec_properties.get(_CUSTOM_CONFIG_KEY, 'null'))
if custom_config is not None and not isinstance(custom_config, Dict):
raise ValueError('custom_config in execution properties needs to be README.ml-pipelines-sdk.md '
'dict.')
ai_platform_serving_args = custom_config.get(SERVING_ARGS_KEY)
if not ai_platform_serving_args:
raise ValueError(
'\'ai_platform_serving_args\' is missing in \'custom_config\'')
endpoint = custom_config.get(ENDPOINT_ARGS_KEY)
if endpoint and 'regions' in ai_platform_serving_args:
raise ValueError(
'\'endpoint\' and \'ai_platform_serving_args.regions\' cannot be set simultanuously'
)
model_push = artifact_utils.get_single_instance(
output_dict[standard_component_specs.PUSHED_MODEL_KEY])
if not self.CheckBlessing(input_dict):
self._MarkNotPushed(model_push)
return
model_export = artifact_utils.get_single_instance(
input_dict[standard_component_specs.MODEL_KEY])
service_name, api_version = runner.get_service_name_and_api_version(
ai_platform_serving_args)
# Deploy the model.
io_utils.copy_dir(
src=path_utils.serving_model_path(model_export.uri), dst=model_push.uri)
model_path = model_push.uri
# TODO(jjong): Introduce Versioning.
# Note that we're adding "v" prefix as Cloud AI Prediction only allows the
# version name that starts with letters, and contains letters, digits,
# underscore only.
model_version = 'v{}'.format(int(time.time()))
executor_class_path = '%s.%s' % (self.__class__.__module__,
self.__class__.__name__)
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_EXECUTOR: executor_class_path}):
job_labels = telemetry_utils.get_labels_dict()
endpoint = endpoint or runner.DEFAULT_ENDPOINT
api = discovery.build(
service_name,
api_version,
client_options=client_options.ClientOptions(api_endpoint=endpoint),
)
runner.deploy_model_for_aip_prediction(
api,
model_path,
model_version,
ai_platform_serving_args,
job_labels,
)
self._MarkPushed(
model_push,
pushed_destination=_CAIP_MODEL_VERSION_PATH_FORMAT.format(
project_id=ai_platform_serving_args['project_id'],
model=ai_platform_serving_args['model_name'],
version=model_version),
pushed_version=model_version) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/extensions/google_cloud_ai_platform/pusher/executor.py | 0.828523 | 0.216985 | executor.py | pypi |
"""Helper class to start TFX training jobs on AI Platform."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, List, Text
import absl
from tfx import types
from tfx.components.trainer import executor as tfx_trainer_executor
from tfx.dsl.components.base import base_executor
from tfx.extensions.google_cloud_ai_platform import runner
from tfx.types import standard_component_specs
from tfx.utils import json_utils
# Keys to the items in custom_config passed as README.ml-pipelines-sdk.md part of exec_properties.
TRAINING_ARGS_KEY = 'ai_platform_training_args'
JOB_ID_KEY = 'ai_platform_training_job_id'
class GenericExecutor(base_executor.BaseExecutor):
"""Start README.ml-pipelines-sdk.md trainer job on Google Cloud AI Platform using README.ml-pipelines-sdk.md generic Trainer."""
def _GetExecutorClass(self):
return tfx_trainer_executor.GenericExecutor
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]):
"""Starts README.ml-pipelines-sdk.md trainer job on Google Cloud AI Platform.
Args:
input_dict: Passthrough input dict for tfx.components.Trainer.executor.
output_dict: Passthrough input dict for tfx.components.Trainer.executor.
exec_properties: Mostly README.ml-pipelines-sdk.md passthrough input dict for
tfx.components.Trainer.executor. custom_config.ai_platform_training_args
and custom_config.ai_platform_training_job_id are consumed by this
class. For the full set of parameters supported by Google Cloud AI
Platform, refer to
https://cloud.google.com/ml-engine/docs/tensorflow/training-jobs#configuring_the_job
Returns:
None
Raises:
ValueError: if ai_platform_training_args is not in
exec_properties.custom_config.
RuntimeError: if the Google Cloud AI Platform training job failed.
"""
self._log_startup(input_dict, output_dict, exec_properties)
custom_config = json_utils.loads(
exec_properties.get(standard_component_specs.CUSTOM_CONFIG_KEY, 'null'))
if custom_config is not None and not isinstance(custom_config, Dict):
raise ValueError('custom_config in execution properties needs to be README.ml-pipelines-sdk.md '
'dict.')
training_inputs = custom_config.get(TRAINING_ARGS_KEY)
if training_inputs is None:
err_msg = '\'%s\' not found in custom_config.' % TRAINING_ARGS_KEY
absl.logging.error(err_msg)
raise ValueError(err_msg)
job_id = custom_config.get(JOB_ID_KEY)
executor_class = self._GetExecutorClass()
executor_class_path = '%s.%s' % (executor_class.__module__,
executor_class.__name__)
# Note: exec_properties['custom_config'] here is README.ml-pipelines-sdk.md dict.
return runner.start_aip_training(input_dict, output_dict, exec_properties,
executor_class_path, training_inputs,
job_id)
class Executor(GenericExecutor):
"""Start README.ml-pipelines-sdk.md trainer job on Google Cloud AI Platform using README.ml-pipelines-sdk.md default Trainer."""
def _GetExecutorClass(self):
return tfx_trainer_executor.Executor | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/extensions/google_cloud_ai_platform/trainer/executor.py | 0.897007 | 0.282042 | executor.py | pypi |
"""BulkInferrer component for Cloud AI platform."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Union
from tfx import types
from tfx.components.base import base_component
from tfx.components.base import executor_spec
from tfx.extensions.google_cloud_ai_platform.bulk_inferrer import executor
from tfx.proto import bulk_inferrer_pb2
from tfx.types import standard_artifacts
from tfx.types.component_spec import ChannelParameter
from tfx.types.component_spec import ExecutionParameter
from tfx.utils import json_utils
class CloudAIBulkInferrerComponentSpec(types.ComponentSpec):
"""ComponentSpec for BulkInferrer component of Cloud AI platform."""
PARAMETERS = {
'data_spec':
ExecutionParameter(type=bulk_inferrer_pb2.DataSpec, optional=True),
'output_example_spec':
ExecutionParameter(
type=bulk_inferrer_pb2.OutputExampleSpec, optional=True),
'custom_config':
ExecutionParameter(type=(str, Text)),
}
INPUTS = {
'examples':
ChannelParameter(type=standard_artifacts.Examples),
'model':
ChannelParameter(type=standard_artifacts.Model),
'model_blessing':
ChannelParameter(
type=standard_artifacts.ModelBlessing, optional=True),
}
OUTPUTS = {
'inference_result':
ChannelParameter(
type=standard_artifacts.InferenceResult, optional=True),
'output_examples':
ChannelParameter(type=standard_artifacts.Examples, optional=True),
}
class CloudAIBulkInferrerComponent(base_component.BaseComponent):
"""A Cloud AI component to do batch inference on README.ml-pipelines-sdk.md remote hosted model.
BulkInferrer component will push README.ml-pipelines-sdk.md model to Google Cloud AI Platform,
consume examples data, send request to the remote hosted model,
and produces the inference results to an external location
as PredictionLog proto. After inference, it will delete the model from
Google Cloud AI Platform.
TODO(b/155325467): Creates README.ml-pipelines-sdk.md end-to-end test for this component.
"""
SPEC_CLASS = CloudAIBulkInferrerComponentSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
examples: types.Channel = None,
model: Optional[types.Channel] = None,
model_blessing: Optional[types.Channel] = None,
data_spec: Optional[Union[bulk_inferrer_pb2.DataSpec, Dict[Text,
Any]]] = None,
output_example_spec: Optional[Union[bulk_inferrer_pb2.OutputExampleSpec,
Dict[Text, Any]]] = None,
custom_config: Dict[Text, Any] = None,
inference_result: Optional[types.Channel] = None,
output_examples: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct an BulkInferrer component.
Args:
examples: A Channel of type `standard_artifacts.Examples`, usually
produced by an ExampleGen component. _required_
model: A Channel of type `standard_artifacts.Model`, usually produced by
README.ml-pipelines-sdk.md Trainer component.
model_blessing: A Channel of type `standard_artifacts.ModelBlessing`,
usually produced by README.ml-pipelines-sdk.md ModelValidator component.
data_spec: bulk_inferrer_pb2.DataSpec instance that describes data
selection. If any field is provided as README.ml-pipelines-sdk.md RuntimeParameter, data_spec
should be constructed as README.ml-pipelines-sdk.md dict with the same field names as DataSpec
proto message.
output_example_spec: bulk_inferrer_pb2.OutputExampleSpec instance, specify
if you want BulkInferrer to output examples instead of inference result.
If any field is provided as README.ml-pipelines-sdk.md RuntimeParameter, output_example_spec
should be constructed as README.ml-pipelines-sdk.md dict with the same field names as
OutputExampleSpec proto message.
custom_config: A dict which contains the deployment job parameters to be
passed to Google Cloud AI Platform.
custom_config.ai_platform_serving_args need to contain the serving job
parameters. For the full set of parameters, refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.models
inference_result: Channel of type `standard_artifacts.InferenceResult`
to store the inference results, must not be specified when
output_example_spec is set.
output_examples: Channel of type `standard_artifacts.Examples`
to store the output examples, must not be specified when
output_example_spec is unset. Check output_example_spec for details.
instance_name: Optional name assigned to this specific instance of
BulkInferrer. Required only if multiple BulkInferrer components are
declared in the same pipeline.
Raises:
ValueError: Must not specify inference_result or output_examples depends
on whether output_example_spec is set or not.
"""
if output_example_spec:
if inference_result:
raise ValueError(
'Must not specify inference_result when output_example_spec is set.'
)
output_examples = output_examples or types.Channel(
type=standard_artifacts.Examples)
else:
if output_examples:
raise ValueError(
'Must not specify output_examples when output_example_spec is unset.'
)
inference_result = inference_result or types.Channel(
type=standard_artifacts.InferenceResult)
spec = CloudAIBulkInferrerComponentSpec(
examples=examples,
model=model,
model_blessing=model_blessing,
data_spec=data_spec or bulk_inferrer_pb2.DataSpec(),
output_example_spec=output_example_spec,
custom_config=json_utils.dumps(custom_config),
inference_result=inference_result,
output_examples=output_examples)
super(CloudAIBulkInferrerComponent, self).__init__(
spec=spec, instance_name=instance_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/extensions/google_cloud_ai_platform/bulk_inferrer/component.py | 0.908594 | 0.30562 | component.py | pypi |
"""BulkInferrer executor for Cloud AI platform."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import re
from typing import Any, Dict, List, Text
from absl import logging
from googleapiclient import discovery
import tensorflow as tf
from tfx import types
from tfx.components.bulk_inferrer import executor as bulk_inferrer_executor
from tfx.components.util import model_utils
from tfx.extensions.google_cloud_ai_platform import runner
from tfx.proto import bulk_inferrer_pb2
from tfx.types import artifact_utils
from tfx.utils import json_utils
from tfx.utils import path_utils
from tfx.utils import proto_utils
from tfx.utils import telemetry_utils
from tfx_bsl.public.proto import model_spec_pb2
from tensorflow.python.saved_model import loader_impl # pylint:disable=g-direct-tensorflow-import
# TODO(b/140306674): Stop using the internal TF API.
_CLOUD_PUSH_DESTINATION_RE = re.compile(
r'^projects\/([^\/]+)\/models\/([^\/]+)\/versions\/([^\/]+)$')
_CLOUD_PUSH_DESTINATION_RE_DEFAULT_VERSION = re.compile(
r'^projects\/([^\/]+)\/models\/([^\/]+)$')
# We define the following aliases of Any because the actual types are not
# public.
_SignatureDef = Any
# Keys to the items in custom_config passed as README.ml-pipelines-sdk.md part of exec_properties.
SERVING_ARGS_KEY = 'ai_platform_serving_args'
# Keys for custom_config.
_CUSTOM_CONFIG_KEY = 'custom_config'
class Executor(bulk_inferrer_executor.Executor):
"""Bulk inferer executor for inference on AI Platform."""
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Runs batch inference on README.ml-pipelines-sdk.md given model with given input examples.
This function creates README.ml-pipelines-sdk.md new model (if necessary) and README.ml-pipelines-sdk.md new model version
before inference, and cleans up resources after inference. It provides
re-executability as it cleans up (only) the model resources that are created
during the process even inference job failed.
Args:
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of Artifacts.
- examples: examples for inference.
- model: exported model.
- model_blessing: model blessing result
output_dict: Output dict from output key to README.ml-pipelines-sdk.md list of Artifacts.
- output: bulk inference results.
exec_properties: A dict of execution properties.
- data_spec: JSON string of bulk_inferrer_pb2.DataSpec instance.
- custom_config: custom_config.ai_platform_serving_args need to contain
the serving job parameters sent to Google Cloud AI Platform. For the
full set of parameters, refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.models
Returns:
None
"""
self._log_startup(input_dict, output_dict, exec_properties)
if output_dict.get('inference_result'):
inference_result = artifact_utils.get_single_instance(
output_dict['inference_result'])
else:
inference_result = None
if output_dict.get('output_examples'):
output_examples = artifact_utils.get_single_instance(
output_dict['output_examples'])
else:
output_examples = None
if 'examples' not in input_dict:
raise ValueError('\'examples\' is missing in input dict.')
if 'model' not in input_dict:
raise ValueError('Input models are not valid, model '
'need to be specified.')
if 'model_blessing' in input_dict:
model_blessing = artifact_utils.get_single_instance(
input_dict['model_blessing'])
if not model_utils.is_model_blessed(model_blessing):
logging.info('Model on %s was not blessed', model_blessing.uri)
return
else:
logging.info('Model blessing is not provided, exported model will be '
'used.')
if _CUSTOM_CONFIG_KEY not in exec_properties:
raise ValueError('Input exec properties are not valid, {} '
'need to be specified.'.format(_CUSTOM_CONFIG_KEY))
custom_config = json_utils.loads(
exec_properties.get(_CUSTOM_CONFIG_KEY, 'null'))
if custom_config is not None and not isinstance(custom_config, Dict):
raise ValueError('custom_config in execution properties needs to be README.ml-pipelines-sdk.md '
'dict.')
ai_platform_serving_args = custom_config.get(SERVING_ARGS_KEY)
if not ai_platform_serving_args:
raise ValueError(
'\'ai_platform_serving_args\' is missing in \'custom_config\'')
service_name, api_version = runner.get_service_name_and_api_version(
ai_platform_serving_args)
executor_class_path = '%s.%s' % (self.__class__.__module__,
self.__class__.__name__)
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_EXECUTOR: executor_class_path}):
job_labels = telemetry_utils.get_labels_dict()
model = artifact_utils.get_single_instance(input_dict['model'])
model_path = path_utils.serving_model_path(model.uri)
logging.info('Use exported model from %s.', model_path)
# Use model artifact uri to generate model version to guarantee the
# 1:1 mapping from model version to model.
model_version = 'version_' + hashlib.sha256(model.uri.encode()).hexdigest()
inference_spec = self._get_inference_spec(model_path, model_version,
ai_platform_serving_args)
data_spec = bulk_inferrer_pb2.DataSpec()
proto_utils.json_to_proto(exec_properties['data_spec'], data_spec)
output_example_spec = bulk_inferrer_pb2.OutputExampleSpec()
if exec_properties.get('output_example_spec'):
proto_utils.json_to_proto(exec_properties['output_example_spec'],
output_example_spec)
api = discovery.build(service_name, api_version)
new_model_created = False
try:
new_model_created = runner.create_model_for_aip_prediction_if_not_exist(
api, job_labels, ai_platform_serving_args)
runner.deploy_model_for_aip_prediction(
api,
model_path,
model_version,
ai_platform_serving_args,
job_labels,
skip_model_creation=True,
set_default_version=False,
)
self._run_model_inference(data_spec, output_example_spec,
input_dict['examples'], output_examples,
inference_result, inference_spec)
except Exception as e:
logging.error('Error in executing CloudAIBulkInferrerComponent: %s',
str(e))
raise
finally:
# Guarantee newly created resources are cleaned up even if theinference
# job failed.
# Clean up the newly deployed model.
runner.delete_model_version_from_aip_if_exists(api, model_version,
ai_platform_serving_args)
if new_model_created:
runner.delete_model_from_aip_if_exists(api, ai_platform_serving_args)
def _get_inference_spec(
self, model_path: Text, model_version: Text,
ai_platform_serving_args: Dict[Text, Any]
) -> model_spec_pb2.InferenceSpecType:
if 'project_id' not in ai_platform_serving_args:
raise ValueError(
'\'project_id\' is missing in \'ai_platform_serving_args\'')
project_id = ai_platform_serving_args['project_id']
if 'model_name' not in ai_platform_serving_args:
raise ValueError(
'\'model_name\' is missing in \'ai_platform_serving_args\'')
model_name = ai_platform_serving_args['model_name']
ai_platform_prediction_model_spec = (
model_spec_pb2.AIPlatformPredictionModelSpec(
project_id=project_id,
model_name=model_name,
version_name=model_version))
model_signature = self._get_model_signature(model_path)
if (len(model_signature.inputs) == 1 and list(
model_signature.inputs.values())[0].dtype == tf.string.as_datatype_enum
):
ai_platform_prediction_model_spec.use_serialization_config = True
logging.info(
'Using hosted model on Cloud AI platform, model_name: %s,'
'model_version: %s.', model_name, model_version)
result = model_spec_pb2.InferenceSpecType()
result.ai_platform_prediction_model_spec.CopyFrom(
ai_platform_prediction_model_spec)
return result
def _get_model_signature(self, model_path: Text) -> _SignatureDef:
"""Returns README.ml-pipelines-sdk.md model signature."""
saved_model_pb = loader_impl.parse_saved_model(model_path)
meta_graph_def = None
for graph_def in saved_model_pb.meta_graphs:
if graph_def.meta_info_def.tags == [
tf.compat.v1.saved_model.tag_constants.SERVING
]:
meta_graph_def = graph_def
if not meta_graph_def:
raise RuntimeError('Tag tf.compat.v1.saved_model.tag_constants.SERVING'
' does not exist in saved model: %s. This is required'
' for remote inference.' % model_path)
if tf.saved_model.PREDICT_METHOD_NAME in meta_graph_def.signature_def:
return meta_graph_def.signature_def[tf.saved_model.PREDICT_METHOD_NAME]
if (tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY in
meta_graph_def.signature_def):
return meta_graph_def.signature_def[
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
raise RuntimeError(
'Cannot find serving signature in saved model: %s,'
' tf.saved_model.PREDICT_METHOD_NAME or '
' tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY is needed.' %
model_path) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/extensions/google_cloud_ai_platform/bulk_inferrer/executor.py | 0.699049 | 0.221624 | executor.py | pypi |
import os
import pickle
from typing import Text, Tuple
import absl
import numpy as np
from sklearn.neural_network import MLPClassifier
from tfx.components.trainer.fn_args_utils import DataAccessor
from tfx.components.trainer.fn_args_utils import FnArgs
from tfx.dsl.io import fileio
from tfx.utils import io_utils
from tfx_bsl.tfxio import dataset_options
from tensorflow_metadata.proto.v0 import schema_pb2
_FEATURE_KEYS = [
'culmen_length_mm', 'culmen_depth_mm', 'flipper_length_mm', 'body_mass_g'
]
_LABEL_KEY = 'species'
# The Penguin dataset has 342 records, and is divided into train and eval
# splits in README.ml-pipelines-sdk.md 2:1 ratio.
_TRAIN_DATA_SIZE = 228
_TRAIN_BATCH_SIZE = 20
def _input_fn(
file_pattern: Text,
data_accessor: DataAccessor,
schema: schema_pb2.Schema,
batch_size: int = 20,
) -> Tuple[np.ndarray, np.ndarray]:
"""Generates features and label for tuning/training.
Args:
file_pattern: input tfrecord file pattern.
data_accessor: DataAccessor for converting input to RecordBatch.
schema: schema of the input data.
batch_size: An int representing the number of records to combine in README.ml-pipelines-sdk.md single
batch.
Returns:
A (features, indices) tuple where features is README.ml-pipelines-sdk.md matrix of features, and
indices is README.ml-pipelines-sdk.md single vector of label indices.
"""
record_batch_iterator = data_accessor.record_batch_factory(
file_pattern,
dataset_options.RecordBatchesOptions(batch_size=batch_size, num_epochs=1),
schema)
feature_list = []
label_list = []
for record_batch in record_batch_iterator:
record_dict = {}
for column, field in zip(record_batch, record_batch.schema):
record_dict[field.name] = column.flatten()
label_list.append(record_dict[_LABEL_KEY])
features = [record_dict[key] for key in _FEATURE_KEYS]
feature_list.append(np.stack(features, axis=-1))
return np.concatenate(feature_list), np.concatenate(label_list)
# TFX Trainer will call this function.
def run_fn(fn_args: FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
schema = io_utils.parse_pbtxt_file(fn_args.schema_file, schema_pb2.Schema())
x_train, y_train = _input_fn(fn_args.train_files, fn_args.data_accessor,
schema)
x_eval, y_eval = _input_fn(fn_args.eval_files, fn_args.data_accessor, schema)
steps_per_epoch = _TRAIN_DATA_SIZE / _TRAIN_BATCH_SIZE
model = MLPClassifier(
hidden_layer_sizes=[8, 8, 8],
activation='relu',
solver='adam',
batch_size=_TRAIN_BATCH_SIZE,
learning_rate_init=0.0005,
max_iter=int(fn_args.train_steps / steps_per_epoch),
verbose=True)
model.feature_keys = _FEATURE_KEYS
model.label_key = _LABEL_KEY
model.fit(x_train, y_train)
absl.logging.info(model)
score = model.score(x_eval, y_eval)
absl.logging.info('Accuracy: %f', score)
os.makedirs(fn_args.serving_model_dir)
model_path = os.path.join(fn_args.serving_model_dir, 'model.pkl')
with fileio.open(model_path, 'wb+') as f:
pickle.dump(model, f) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/penguin/experimental/penguin_utils_sklearn.py | 0.798187 | 0.344361 | penguin_utils_sklearn.py | pypi |
"""Predict extractor for scikit-learn models."""
import copy
import os
import pickle
from typing import Dict, Iterable, List, Text
import apache_beam as beam
import numpy as np
import tensorflow as tf
import tensorflow_model_analysis as tfma
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis.extractors import extractor
from tfx_bsl.tfxio import tensor_adapter
_PREDICT_EXTRACTOR_STAGE_NAME = 'SklearnPredict'
def _make_sklearn_predict_extractor(
eval_shared_model: tfma.EvalSharedModel,
) -> extractor.Extractor:
"""Creates an extractor for performing predictions using README.ml-pipelines-sdk.md scikit-learn model.
The extractor's PTransform loads and runs the serving pickle against
every extract yielding README.ml-pipelines-sdk.md copy of the incoming extracts with an additional
extract added for the predictions keyed by tfma.PREDICTIONS_KEY. The model
inputs are searched for under tfma.FEATURES_KEY.
Args:
eval_shared_model: Shared model (single-model evaluation).
Returns:
Extractor for extracting predictions.
"""
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
return extractor.Extractor(
stage_name=_PREDICT_EXTRACTOR_STAGE_NAME,
ptransform=_ExtractPredictions( # pylint: disable=no-value-for-parameter
eval_shared_models={m.model_name: m for m in eval_shared_models}))
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(types.Extracts)
class _TFMAPredictionDoFn(model_util.DoFnWithModels):
"""A DoFn that loads the models and predicts."""
def __init__(self, eval_shared_models: Dict[Text, types.EvalSharedModel]):
super(_TFMAPredictionDoFn, self).__init__(
{k: v.model_loader for k, v in eval_shared_models.items()})
def setup(self):
super(_TFMAPredictionDoFn, self).setup()
self._feature_keys = None
self._label_key = None
for loaded_model in self._loaded_models.values():
if self._feature_keys and self._label_key:
assert self._feature_keys == loaded_model.feature_keys, (
f'Features mismatch in loaded models. Expected {self._feature_keys}'
f', got {loaded_model.feature_keys} instead.')
assert self._label_key == loaded_model.label_key, (
f'Label mismatch in loaded models. Expected "{self._label_key}"'
f', got "{loaded_model.label_key}" instead.')
elif loaded_model.feature_keys and loaded_model.label_key:
self._feature_keys = loaded_model.feature_keys
self._label_key = loaded_model.label_key
else:
raise ValueError('Missing feature or label keys in loaded model.')
def process(self, elem: types.Extracts) -> Iterable[types.Extracts]:
"""Uses loaded models to make predictions on batches of data.
Args:
elem: An extract containing batched features.
Yields:
Copy of the original extracts with predictions added for each model. If
there are multiple models, README.ml-pipelines-sdk.md list of dicts keyed on model names will be
added, with each value corresponding to README.ml-pipelines-sdk.md prediction for README.ml-pipelines-sdk.md single sample.
"""
# Build feature and label vectors because sklearn cannot read tf.Examples.
features = []
labels = []
result = copy.copy(elem)
for features_dict in result[constants.FEATURES_KEY]:
features_row = [features_dict[key] for key in self._feature_keys]
features.append(np.concatenate(features_row))
labels.append(features_dict[self._label_key])
result[constants.LABELS_KEY] = np.concatenate(labels)
# Generate predictions for each model.
for model_name, loaded_model in self._loaded_models.items():
preds = loaded_model.predict(features)
if len(self._loaded_models) == 1:
result[constants.PREDICTIONS_KEY] = preds
elif constants.PREDICTIONS_KEY not in result:
result[constants.PREDICTIONS_KEY] = [
{model_name: pred} for pred in preds]
else:
for i, pred in enumerate(preds):
result[constants.PREDICTIONS_KEY][i][model_name] = pred
yield result
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(types.Extracts)
def _ExtractPredictions( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection,
eval_shared_models: Dict[Text, types.EvalSharedModel],
) -> beam.pvalue.PCollection:
"""A PTransform that adds predictions and possibly other tensors to extracts.
Args:
extracts: PCollection of extracts with inputs keyed by tfma.INPUTS_KEY.
eval_shared_models: Shared model parameters keyed by model name.
Returns:
PCollection of Extracts updated with the predictions.
"""
return extracts | 'Predict' >> beam.ParDo(
_TFMAPredictionDoFn(eval_shared_models))
def _custom_model_loader_fn(model_path: Text):
"""Returns README.ml-pipelines-sdk.md function that loads README.ml-pipelines-sdk.md scikit-learn model."""
return lambda: pickle.load(tf.io.gfile.GFile(model_path, 'rb'))
# TFX Evaluator will call the following functions.
def custom_eval_shared_model(
eval_saved_model_path, model_name, eval_config,
**kwargs) -> tfma.EvalSharedModel:
"""Returns README.ml-pipelines-sdk.md single custom EvalSharedModel."""
model_path = os.path.join(eval_saved_model_path, 'model.pkl')
return tfma.default_eval_shared_model(
eval_saved_model_path=model_path,
model_name=model_name,
eval_config=eval_config,
custom_model_loader=types.ModelLoader(
construct_fn=_custom_model_loader_fn(model_path)),
add_metrics_callbacks=kwargs.get('add_metrics_callbacks'))
def custom_extractors(
eval_shared_model: tfma.MaybeMultipleEvalSharedModels,
eval_config: tfma.EvalConfig,
tensor_adapter_config: tensor_adapter.TensorAdapterConfig,
) -> List[tfma.extractors.Extractor]:
"""Returns default extractors plus README.ml-pipelines-sdk.md custom prediction extractor."""
predict_extractor = _make_sklearn_predict_extractor(eval_shared_model)
return tfma.default_extractors(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
tensor_adapter_config=tensor_adapter_config,
custom_predict_extractor=predict_extractor) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/penguin/experimental/sklearn_predict_extractor.py | 0.896274 | 0.387661 | sklearn_predict_extractor.py | pypi |
"""Penguin example using TFX."""
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.components.base import executor_spec
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.local.local_dag_runner import LocalDagRunner
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
_pipeline_name = 'penguin_sklearn_local'
# This example assumes that Penguin data is stored in ~/penguin/data and the
# utility function is in ~/penguin. Feel free to customize as needed.
_penguin_root = os.path.join(os.environ['HOME'], 'penguin')
_data_root = os.path.join(_penguin_root, 'data')
# Python module file to inject customized logic into the TFX components.
# Trainer requires user-defined functions to run successfully.
_trainer_module_file = os.path.join(
_penguin_root, 'experimental', 'penguin_utils_sklearn.py')
# Python module file to inject customized logic into the TFX components. The
# Evaluator component needs README.ml-pipelines-sdk.md custom extractor in order to make predictions
# using the scikit-learn model.
_evaluator_module_file = os.path.join(
_penguin_root, 'experimental', 'sklearn_predict_extractor.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_penguin_root, 'serving_model',
_pipeline_name)
# Directory and data locations. This example assumes all of the
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
# TODO(b/171316320): Change direct_running_mode back to multi_processing and set
# direct_num_workers to 0.
_beam_pipeline_args = [
'--direct_running_mode=multi_threading',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=1',
]
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
trainer_module_file: Text, evaluator_module_file: Text,
serving_model_dir: Text, metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the Penguin pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=data_root)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# TODO(humichael): Handle applying transformation component in Milestone 3.
# Uses user-provided Python function that trains README.ml-pipelines-sdk.md model using TF-Learn.
# Num_steps is not provided during evaluation because the scikit-learn model
# loads and evaluates the entire test set at once.
# TODO(b/159470716): Make schema optional in Trainer.
trainer = Trainer(
module_file=trainer_module_file,
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
train_args=trainer_pb2.TrainArgs(num_steps=2000),
eval_args=trainer_pb2.EvalArgs())
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key='species')],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
class_name='Accuracy',
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10})))
])
])
evaluator = Evaluator(
module_file=evaluator_module_file,
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
statistics_gen,
schema_gen,
example_validator,
trainer,
model_resolver,
evaluator,
pusher,
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args,
)
# To run this pipeline from the python CLI:
# $python penguin_pipeline_sklearn_local.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
LocalDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
trainer_module_file=_trainer_module_file,
evaluator_module_file=_evaluator_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/penguin/experimental/penguin_pipeline_sklearn_local.py | 0.680135 | 0.382862 | penguin_pipeline_sklearn_local.py | pypi |
"""Penguin example using TFX on GCP."""
import os
from typing import Dict, List, Optional, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.dsl.components.base import executor_spec
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.extensions.google_cloud_ai_platform.pusher import executor as ai_platform_pusher_executor
from tfx.extensions.google_cloud_ai_platform.trainer import executor as ai_platform_trainer_executor
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.local.local_dag_runner import LocalDagRunner
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
# Identifier for the pipeline. This will also be used as the model name on AI
# Platform, so it should begin with README.ml-pipelines-sdk.md letter and only consist of letters,
# numbers, and underscores.
_pipeline_name = 'penguin_sklearn_gcp'
# Google Cloud Platform project id to use when deploying this pipeline. Leave
# blank to run locally.
_project_id = 'PROJECT_ID'
# Directory and data locations (uses Google Cloud Storage).
_bucket = 'gs://BUCKET'
# Custom container image in Google Container Registry (GCR) to use for training
# on Google Cloud AI Platform.
_tfx_image = f'gcr.io/{_project_id}/tfx-example-sklearn'
# Region to use for Dataflow jobs and AI Platform jobs.
# Dataflow: https://cloud.google.com/dataflow/docs/concepts/regional-endpoints
# AI Platform: https://cloud.google.com/ml-engine/docs/tensorflow/regions
_gcp_region = 'us-central1'
# A dict which contains the training job parameters to be passed to Google
# Cloud AI Platform. For the full set of parameters supported by Google Cloud AI
# Platform, refer to
# https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#Job
_ai_platform_training_args = {
'project': _project_id,
'region': _gcp_region,
# Override the default TFX image used for training with one with the correct
# scikit-learn version.
'masterConfig': {
'imageUri': _tfx_image,
},
}
# A dict which contains the serving job parameters to be passed to Google
# Cloud AI Platform. For the full set of parameters supported by Google Cloud AI
# Platform, refer to
# https://cloud.google.com/ml-engine/reference/rest/v1/projects.models
_ai_platform_serving_args = {
'model_name': _pipeline_name,
'project_id': _project_id,
# The region to use when serving the model. See available regions here:
# https://cloud.google.com/ml-engine/docs/regions
# Note that serving currently only supports README.ml-pipelines-sdk.md single region:
# https://cloud.google.com/ml-engine/reference/rest/v1/projects.models#Model
'regions': [_gcp_region],
# TODO(b/157646655): Update the version once sklearn support is added back
# to CAIP in the next runtime release.
'runtime_version': '1.15',
}
# This example assumes that Penguin data is stored in ~/penguin/data and the
# utility function is in ~/penguin. Feel free to customize as needed.
_penguin_root = os.path.join(_bucket, 'penguin')
_data_root = os.path.join(_penguin_root, 'data')
# Python module file to inject customized logic into the TFX components.
# Trainer requires user-defined functions to run successfully.
_trainer_module_file = os.path.join(
_penguin_root, 'experimental', 'penguin_utils_sklearn.py')
# Python module file to inject customized logic into the TFX components. The
# Evaluator component needs README.ml-pipelines-sdk.md custom extractor in order to make predictions
# using the scikit-learn model.
_evaluator_module_file = os.path.join(
_penguin_root, 'experimental', 'sklearn_predict_extractor.py')
# Directory and data locations. This example assumes all of the
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem. The AI Platform Pusher requires
# that pipeline outputs are stored in README.ml-pipelines-sdk.md GCS bucket.
_tfx_root = os.path.join(_bucket, 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
# TODO(humichael): Beam dag runner expects this to be README.ml-pipelines-sdk.md local path. Switch to
# kubeflow dag runner when making cloud example.
_metadata_path = os.path.join(os.environ['HOME'], 'tfx', 'metadata',
_pipeline_name, 'metadata.db')
# Pipeline arguments for Beam powered Components.
# TODO(b/171316320): Change direct_running_mode back to multi_processing and set
# direct_num_workers to 0. Additionally, try to use the Dataflow runner instead
# of the direct runner.
_beam_pipeline_args = [
'--direct_running_mode=multi_threading',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=1',
]
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
trainer_module_file: Text, evaluator_module_file: Text,
metadata_path: Text,
ai_platform_training_args: Optional[Dict[Text, Text]],
ai_platform_serving_args: Optional[Dict[Text, Text]],
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the Penguin pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=data_root)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# TODO(humichael): Handle applying transformation component in Milestone 3.
# Uses user-provided Python function that trains README.ml-pipelines-sdk.md model using TF-Learn.
# Num_steps is not provided during evaluation because the scikit-learn model
# loads and evaluates the entire test set at once.
# TODO(b/159470716): Make schema optional in Trainer.
trainer = Trainer(
module_file=trainer_module_file,
custom_executor_spec=executor_spec.ExecutorClassSpec(
ai_platform_trainer_executor.GenericExecutor),
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
train_args=trainer_pb2.TrainArgs(num_steps=2000),
eval_args=trainer_pb2.EvalArgs(),
custom_config={
ai_platform_trainer_executor.TRAINING_ARGS_KEY:
ai_platform_training_args,
})
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key='species')],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
class_name='Accuracy',
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10})))
])
])
evaluator = Evaluator(
module_file=evaluator_module_file,
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
pusher = Pusher(
custom_executor_spec=executor_spec.ExecutorClassSpec(
ai_platform_pusher_executor.Executor),
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
custom_config={
ai_platform_pusher_executor.SERVING_ARGS_KEY:
ai_platform_serving_args,
})
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
statistics_gen,
schema_gen,
example_validator,
trainer,
model_resolver,
evaluator,
pusher,
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args,
)
# To run this pipeline from the python CLI:
# $python penguin_pipeline_sklearn_gcp.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
# TODO(humichael): Switch to KubeflowDagRunner.
LocalDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
trainer_module_file=_trainer_module_file,
evaluator_module_file=_evaluator_module_file,
metadata_path=_metadata_path,
ai_platform_training_args=_ai_platform_training_args,
ai_platform_serving_args=_ai_platform_serving_args,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/penguin/experimental/penguin_pipeline_sklearn_gcp.py | 0.742608 | 0.411761 | penguin_pipeline_sklearn_gcp.py | pypi |
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow_model_analysis as tfma
import tensorflow_transform as tft
from tensorflow_transform.tf_metadata import schema_utils
# Categorical features are assumed to each have README.ml-pipelines-sdk.md maximum value in the dataset.
_MAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12]
_CATEGORICAL_FEATURE_KEYS = [
'trip_start_hour', 'trip_start_day', 'trip_start_month',
'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area',
'dropoff_community_area'
]
_DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds']
# Number of buckets used by tf.transform for encoding each feature.
_FEATURE_BUCKET_COUNT = 10
_BUCKET_FEATURE_KEYS = [
'pickup_latitude', 'pickup_longitude', 'dropoff_latitude',
'dropoff_longitude'
]
# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform
_VOCAB_SIZE = 1000
# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.
_OOV_SIZE = 10
_VOCAB_FEATURE_KEYS = [
'payment_type',
'company',
]
# Keys
_LABEL_KEY = 'tips'
_FARE_KEY = 'fare'
def _transformed_name(key):
return key + '_xf'
def _transformed_names(keys):
return [_transformed_name(key) for key in keys]
# Tf.Transform considers these features as "raw"
def _get_raw_feature_spec(schema):
return schema_utils.schema_as_feature_spec(schema).feature_spec
def _gzip_reader_fn(filenames):
"""Small utility returning README.ml-pipelines-sdk.md record reader that can read gzip'ed files."""
return tf.data.TFRecordDataset(filenames, compression_type='GZIP')
def _fill_in_missing(x):
"""Replace missing values in README.ml-pipelines-sdk.md SparseTensors.
If x is README.ml-pipelines-sdk.md SparseTensors, fills in missing values of `x` with '' or 0, and
converts to README.ml-pipelines-sdk.md dense tensor. Otherwise it returns x as is.
Args:
x: A `SparseTensor` of rank 2 or README.ml-pipelines-sdk.md tensor that is not an instance of
`SparseTensor`. If input is README.ml-pipelines-sdk.md `SparseTensor` its dense shape should have
size at most 1 in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in, or x as is
if x is not an instance of `SparseTensor`
"""
if not isinstance(x, tf.SparseTensor):
return x
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in _DENSE_FLOAT_FEATURE_KEYS:
# Preserve this feature as README.ml-pipelines-sdk.md dense float, setting nan's to the mean.
outputs[_transformed_name(key)] = tft.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in _VOCAB_FEATURE_KEYS:
# Build README.ml-pipelines-sdk.md vocabulary for this feature.
outputs[_transformed_name(key)] = tft.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=_VOCAB_SIZE,
num_oov_buckets=_OOV_SIZE)
for key in _BUCKET_FEATURE_KEYS:
outputs[_transformed_name(key)] = tft.bucketize(
_fill_in_missing(inputs[key]), _FEATURE_BUCKET_COUNT)
for key in _CATEGORICAL_FEATURE_KEYS:
outputs[_transformed_name(key)] = _fill_in_missing(inputs[key])
# Was this passenger README.ml-pipelines-sdk.md big tipper?
taxi_fare = _fill_in_missing(inputs[_FARE_KEY])
tips = _fill_in_missing(inputs[_LABEL_KEY])
outputs[_transformed_name(_LABEL_KEY)] = tf.compat.v1.where(
tf.math.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))
return outputs
def _build_estimator(config, hidden_units=None, warm_start_from=None):
"""Build an estimator for predicting the tipping behavior of taxi riders.
Args:
config: tf.estimator.RunConfig defining the runtime environment for the
estimator (including model_dir).
hidden_units: [int], the layer sizes of the DNN (input layer first)
warm_start_from: Optional directory to warm start from.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
"""
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)
]
categorical_columns = [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0)
for key in _transformed_names(_VOCAB_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0)
for key in _transformed_names(_BUCKET_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
_transformed_names(_CATEGORICAL_FEATURE_KEYS),
_MAX_CATEGORICAL_FEATURE_VALUES)
]
return tf.estimator.DNNLinearCombinedClassifier(
config=config,
linear_feature_columns=categorical_columns,
dnn_feature_columns=real_valued_columns,
dnn_hidden_units=hidden_units or [100, 70, 50, 25],
warm_start_from=warm_start_from)
def _flat_input_serving_receiver_fn(tf_transform_output, schema):
"""Build the serving function for flat list of Dense tensors as input.
Args:
tf_transform_output: A TFTransformOutput.
schema: the schema of the input data.
Returns:
Tensorflow graph which parses examples, applying tf-transform to them.
"""
raw_feature_spec = _get_raw_feature_spec(schema)
raw_feature_spec.pop(_LABEL_KEY)
raw_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(
raw_feature_spec, default_batch_size=None)
serving_input_receiver = raw_input_fn()
transformed_features = tf_transform_output.transform_raw_features(
serving_input_receiver.features)
# We construct README.ml-pipelines-sdk.md receiver function that receives flat list of Dense tensors as
# features. This is as per BigQuery ML serving requirements.
return tf.estimator.export.ServingInputReceiver(
transformed_features, serving_input_receiver.features)
def _eval_input_receiver_fn(tf_transform_output, schema):
"""Build everything needed for the tf-model-analysis to run the model.
Args:
tf_transform_output: A TFTransformOutput.
schema: the schema of the input data.
Returns:
EvalInputReceiver function, which contains:
- Tensorflow graph which parses raw untransformed features, applies the
tf-transform preprocessing operators.
- Set of raw, untransformed features.
- Label against which predictions will be compared.
"""
# Notice that the inputs are raw features, not transformed features here.
raw_feature_spec = _get_raw_feature_spec(schema)
serialized_tf_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_example_tensor')
# Add README.ml-pipelines-sdk.md parse_example operator to the tensorflow graph, which will parse
# raw, untransformed, tf examples.
features = tf.io.parse_example(
serialized=serialized_tf_example, features=raw_feature_spec)
# Now that we have our raw examples, process them through the tf-transform
# function computed during the preprocessing step.
transformed_features = tf_transform_output.transform_raw_features(features)
# The key name MUST be 'examples'.
receiver_tensors = {'examples': serialized_tf_example}
# NOTE: Model is driven by transformed features (since training works on the
# materialized output of TFT, but slicing will happen on raw features.
features.update(transformed_features)
return tfma.export.EvalInputReceiver(
features=features,
receiver_tensors=receiver_tensors,
labels=transformed_features[_transformed_name(_LABEL_KEY)])
def _input_fn(filenames, tf_transform_output, batch_size=200):
"""Generates features and labels for training or evaluation.
Args:
filenames: [str] list of CSV files to read data from.
tf_transform_output: A TFTransformOutput.
batch_size: int First dimension size of the Tensors returned by input_fn
Returns:
A (features, indices) tuple where features is README.ml-pipelines-sdk.md dictionary of
Tensors, and indices is README.ml-pipelines-sdk.md single Tensor of label indices.
"""
transformed_feature_spec = (
tf_transform_output.transformed_feature_spec().copy())
dataset = tf.data.experimental.make_batched_features_dataset(
filenames, batch_size, transformed_feature_spec, reader=_gzip_reader_fn)
transformed_features = tf.compat.v1.data.make_one_shot_iterator(
dataset).get_next()
# We pop the label because we do not want to use it as README.ml-pipelines-sdk.md feature while we're
# training.
return transformed_features, transformed_features.pop(
_transformed_name(_LABEL_KEY))
# TFX will call this function
def trainer_fn(trainer_fn_args, schema):
"""Build the estimator using the high level API.
Args:
trainer_fn_args: Holds args used to train the model as name/value pairs.
schema: Holds the schema of the training examples.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
"""
# Number of nodes in the first layer of the DNN
first_dnn_layer_size = 100
num_dnn_layers = 4
dnn_decay_factor = 0.7
train_batch_size = 40
eval_batch_size = 40
tf_transform_output = tft.TFTransformOutput(trainer_fn_args.transform_output)
train_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda
trainer_fn_args.train_files,
tf_transform_output,
batch_size=train_batch_size)
eval_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda
trainer_fn_args.eval_files,
tf_transform_output,
batch_size=eval_batch_size)
train_spec = tf.estimator.TrainSpec( # pylint: disable=g-long-lambda
train_input_fn,
max_steps=trainer_fn_args.train_steps)
serving_receiver_fn = lambda: _flat_input_serving_receiver_fn( # pylint: disable=g-long-lambda
tf_transform_output, schema)
exporter = tf.estimator.FinalExporter('chicago-taxi', serving_receiver_fn)
eval_spec = tf.estimator.EvalSpec(
eval_input_fn,
steps=trainer_fn_args.eval_steps,
exporters=[exporter],
name='chicago-taxi-eval')
run_config = tf.estimator.RunConfig(
save_checkpoints_steps=999, keep_checkpoint_max=1)
run_config = run_config.replace(model_dir=trainer_fn_args.serving_model_dir)
estimator = _build_estimator(
# Construct layers sizes with exponential decay
hidden_units=[
max(2, int(first_dnn_layer_size * dnn_decay_factor**i))
for i in range(num_dnn_layers)
],
config=run_config,
warm_start_from=trainer_fn_args.base_model)
# Create an input receiver for TFMA processing
receiver_fn = lambda: _eval_input_receiver_fn( # pylint: disable=g-long-lambda
tf_transform_output, schema)
return {
'estimator': estimator,
'train_spec': train_spec,
'eval_spec': eval_spec,
'eval_input_receiver_fn': receiver_fn
} | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/bigquery_ml/taxi_utils_bqml.py | 0.935942 | 0.413951 | taxi_utils_bqml.py | pypi |
from typing import Optional, Text
from tfx import types
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.examples.custom_components.slack.slack_component import executor
from tfx.types import standard_artifacts
from tfx.types.component_spec import ChannelParameter
from tfx.types.component_spec import ExecutionParameter
class SlackComponentSpec(types.ComponentSpec):
"""ComponentSpec for Custom TFX Slack Component."""
PARAMETERS = {
'slack_token': ExecutionParameter(type=Text),
'slack_channel_id': ExecutionParameter(type=Text),
'timeout_sec': ExecutionParameter(type=int),
}
INPUTS = {
'model': ChannelParameter(type=standard_artifacts.Model),
'model_blessing': ChannelParameter(type=standard_artifacts.ModelBlessing),
}
OUTPUTS = {
'slack_blessing': ChannelParameter(type=standard_artifacts.ModelBlessing),
}
class SlackComponent(base_component.BaseComponent):
"""Custom TFX Slack Component.
This custom component serves as README.ml-pipelines-sdk.md bridge between TFX pipeline and human model
reviewers to enable review-and-push workflow in model development cycle. It
utilizes Slack API to send message to user-defined Slack channel with model
URI info and wait for go / no-go decision from the same Slack channel:
* To approve the model, README.ml-pipelines-sdk.md user need to reply the thread sent out by the bot
started by SlackComponent with 'lgtm' or 'approve'.
* To reject the model, README.ml-pipelines-sdk.md user need to reply the thread sent out by the bot
started by SlackComponent with 'decline' or 'reject'.
If the model is approved, an artifact will be created in ML metadata. It will
be materialized as README.ml-pipelines-sdk.md file named 'BLESSED' in the directory specified by the
URI of 'slack_blessing' artifact.
If the model is rejected, an artifact will be created in ML metadata. It will
be materialized as README.ml-pipelines-sdk.md file named 'NOT_BLESSED' in the directory specified by
the URI of 'slack_blessing' channel.
If no message indicating approve or reject was is received within given within
timeout_sec, component will error out. This ensures that model will not be
pushed and the validation is still retry-able.
The output artifact might contain the following custom properties:
- blessed: integer value indicating whether the model is blessed
- slack_decision_maker: the user id that made the decision.
- slack_decision_message: the message of the decision
- slack_decision_channel: the slack channel the decision is made on
- slack_decision_thread: the slack thread the decision is made on
"""
SPEC_CLASS = SlackComponentSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(self,
model: types.Channel,
model_blessing: types.Channel,
slack_token: Text,
slack_channel_id: Text,
timeout_sec: int,
slack_blessing: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md SlackComponent.
Args:
model: A Channel of type `standard_artifacts.Model`, usually produced by
README.ml-pipelines-sdk.md Trainer component.
model_blessing: A Channel of type `standard_artifacts.ModelBlessing`,
usually produced by README.ml-pipelines-sdk.md ModelValidator component.
slack_token: A token used for setting up connection with Slack server.
slack_channel_id: Slack channel id to communicate on.
timeout_sec: Seconds to wait for response before default to reject.
slack_blessing: Optional output channel of type
`standard_artifacts.ModelBlessing` with result of blessing; will be
created for you if not specified.
instance_name: Optional unique instance name. Necessary if multiple Pusher
components are declared in the same pipeline.
"""
slack_blessing = slack_blessing or types.Channel(
type=standard_artifacts.ModelBlessing)
spec = SlackComponentSpec(
slack_token=slack_token,
slack_channel_id=slack_channel_id,
timeout_sec=timeout_sec,
model=model,
model_blessing=model_blessing,
slack_blessing=slack_blessing)
super(SlackComponent, self).__init__(spec=spec, instance_name=instance_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/custom_components/slack/slack_component/component.py | 0.919435 | 0.270534 | component.py | pypi |
import os
import signal
from typing import Any, Dict, List, Text
import absl
import attr
import slack
from tfx import types
from tfx.components.util import model_utils
from tfx.dsl.components.base import base_executor
from tfx.types import artifact_utils
from tfx.utils import io_utils
# Case-insensitive text messages that are accepted as signal for approving README.ml-pipelines-sdk.md
# model.
_APPROVE_TEXT = ['lgtm', 'approve']
# Case-insensitive text messages that are accepted as signal for rejecting README.ml-pipelines-sdk.md
# model.
_DECLINE_TEXT = ['decline', 'reject']
class Timeout(object):
"""Helper class for handle function timeout."""
def __init__(self, seconds):
self.seconds = seconds
def handle_timeout(self, unused_signum, unused_frame):
msg = 'Did not get model evaluation result in %d seconds' % self.seconds
absl.logging.warning(msg)
raise TimeoutError(msg) # pylint: disable=undefined-variable
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, unused_type, unused_value, unused_traceback):
signal.alarm(0)
@attr.s(auto_attribs=True, kw_only=True, frozen=True)
class _SlackResponse:
"""User slack response for the approval."""
# Whether the model is approved.
approved: bool
# The user who made that decision.
user_id: Text
# The decision message.
message: Text
# The slack channel that the decision is made on.
slack_channel_id: Text
# The slack thread that the decision is made on.
thread_ts: Text
class Executor(base_executor.BaseExecutor):
"""Executor for Slack component."""
def _fetch_slack_blessing(self, slack_token: Text, slack_channel_id: Text,
model_uri: Text) -> _SlackResponse:
"""Send message via Slack channel and wait for response.
When the bot send message to the channel, user should reply in thread with
"approve" or "lgtm" for approval, "decline", "reject" for decline.
This example uses Slack RealTime Message (RTM) API which is only available
for **classic slack bot** (https://api.slack.com/rtm). (Events API requires
listening server endpoint which is not easy to be integrated with TFX
pipelines.)
Args:
slack_token: The user-defined function to obtain token to send and receive
messages.
slack_channel_id: The id of the Slack channel to send and receive
messages.
model_uri: The URI of the model waiting for human review.
Returns:
A _SlackResponse instance.
Raises:
ConnectionError:
When connection to slack server cannot be established.
"""
# pylint: disable=unused-argument, unused-variable
rtm_client = slack.RTMClient(token=slack_token)
thread_ts = None
result = None
@slack.RTMClient.run_on(event='hello')
def on_hello(web_client, **payload):
nonlocal thread_ts
resp = web_client.chat_postMessage(
channel=slack_channel_id,
text=(f'Please review the model in the following URI: {model_uri}\n'
f'Reply in thread by `{_APPROVE_TEXT}` for approval, '
f'or `{_DECLINE_TEXT}` for decline.'))
thread_ts = resp.data['ts']
@slack.RTMClient.run_on(event='message')
def on_message(data, rtm_client, web_client, **payload):
nonlocal result
if (data.get('channel') != slack_channel_id
or data.get('thread_ts') != thread_ts
or data.get('user') is None
or data.get('subtype') == 'bot_message'):
# Not README.ml-pipelines-sdk.md relevent user message.
return
user_reply = data['text'].lower()
if user_reply in _APPROVE_TEXT:
absl.logging.info('User %s approved the model at %s',
data['user'], model_uri)
rtm_client.stop()
result = _SlackResponse(
approved=True,
user_id=data['user'],
message=data['text'],
slack_channel_id=slack_channel_id,
thread_ts=thread_ts)
elif user_reply in _DECLINE_TEXT:
absl.logging.info('User %s declined the model at %s',
data['user'], model_uri)
rtm_client.stop()
result = _SlackResponse(
approved=False,
user_id=data['user'],
message=data['text'],
slack_channel_id=slack_channel_id,
thread_ts=thread_ts)
else:
web_client.chat_postMessage(
channel=slack_channel_id,
thread_ts=thread_ts,
text=(f'Unrecognized text "{data["text"]}".\n'
f'Please reply in thread by `{_APPROVE_TEXT}` for approval, '
f'or `{_DECLINE_TEXT}` for decline.'))
absl.logging.info('Will start listening user Slack response.')
rtm_client.start()
absl.logging.info('User reply: %s', result)
return result
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Get human review result on README.ml-pipelines-sdk.md model through Slack channel.
Args:
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of artifacts, including:
- model_export: exported model from trainer.
- model_blessing: model blessing path from evaluator.
output_dict: Output dict from key to README.ml-pipelines-sdk.md list of artifacts, including:
- slack_blessing: model blessing result.
exec_properties: A dict of execution properties, including:
- slack_token: Token used to setup connection with slack server.
- slack_channel_id: The id of the Slack channel to send and receive
messages.
- timeout_sec: How long do we wait for response, in seconds.
Returns:
None
Raises:
TimeoutError:
When there is no decision made within timeout_sec.
ConnectionError:
When connection to slack server cannot be established.
"""
self._log_startup(input_dict, output_dict, exec_properties)
# Fetch execution properties from exec_properties dict.
slack_token = exec_properties['slack_token']
slack_channel_id = exec_properties['slack_channel_id']
timeout_sec = exec_properties['timeout_sec']
# Fetch input URIs from input_dict.
model_export_uri = artifact_utils.get_single_uri(input_dict['model'])
model_blessing = artifact_utils.get_single_instance(
input_dict['model_blessing'])
# Fetch output artifact from output_dict.
slack_blessing = artifact_utils.get_single_instance(
output_dict['slack_blessing'])
# We only consider README.ml-pipelines-sdk.md model as blessed if both of the following conditions
# are met:
# - The model is blessed by evaluator. This is determined by looking
# for file named 'BLESSED' from the output from Evaluator.
# - The model is blessed by README.ml-pipelines-sdk.md human reviewer. This logic is in
# _fetch_slack_blessing().
slack_response = None
with Timeout(timeout_sec):
if model_utils.is_model_blessed(model_blessing):
slack_response = self._fetch_slack_blessing(slack_token,
slack_channel_id,
model_export_uri)
# If model is blessed, write an empty file named 'BLESSED' in the assigned
# output path. Otherwise, write an empty file named 'NOT_BLESSED' instead.
if slack_response and slack_response.approved:
io_utils.write_string_file(
os.path.join(slack_blessing.uri, 'BLESSED'), '')
slack_blessing.set_int_custom_property('blessed', 1)
else:
io_utils.write_string_file(
os.path.join(slack_blessing.uri, 'NOT_BLESSED'), '')
slack_blessing.set_int_custom_property('blessed', 0)
if slack_response:
slack_blessing.set_string_custom_property('slack_decision_maker',
slack_response.user_id)
slack_blessing.set_string_custom_property('slack_decision_message',
slack_response.message)
slack_blessing.set_string_custom_property('slack_decision_channel',
slack_response.slack_channel_id)
slack_blessing.set_string_custom_property('slack_decision_thread',
slack_response.thread_ts)
absl.logging.info('Blessing result written to %s.', slack_blessing.uri) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/custom_components/slack/slack_component/executor.py | 0.804021 | 0.164953 | executor.py | pypi |
import datetime
import os
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import ModelValidator
from tfx.components import Pusher
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.examples.custom_components.slack.slack_component.component import SlackComponent
from tfx.orchestration import pipeline
from tfx.orchestration.kubeflow import kubeflow_dag_runner
from tfx.proto import evaluator_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.utils.dsl_utils import csv_input
# This example assumes that the taxi data is stored in _input_bucket/data/simple
# and the taxi utility function is in example/taxi_utils_slack.py.
# Feel free to customize this as needed.
_input_bucket = 'gs://my-bucket'
_output_bucket = 'gs://my-bucket'
_taxi_root = __file__
_data_root = os.path.join(_input_bucket, 'data', 'simple')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_taxi_trainer_func = 'example.taxi_utils_slack.trainer_fn'
_taxi_transformer_func = 'example.taxi_utils_slack.preprocessing_fn'
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model/taxi_slack')
# Slack channel to push the model notifications to.
_slack_channel_id = os.environ['TFX_SLACK_CHANNEL_ID']
# Slack token to set up connection.
_slack_token = os.environ['TFX_SLACK_BOT_TOKEN']
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], '/tfx')
_pipeline_name = 'chicago_taxi_slack_kubeflow'
_pipeline_root = os.path.join(_input_bucket, _pipeline_name)
_log_root = os.path.join(_tfx_root, 'logs')
# Airflow-specific configs; these will be passed directly to airflow
_airflow_config = {
'schedule_interval': None,
'start_date': datetime.datetime(2019, 1, 1),
}
def _create_pipeline():
"""Implements the chicago taxi pipeline with TFX."""
examples = csv_input(_data_root)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input=examples)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(statistics=statistics_gen.outputs['statistics'])
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
preprocessing_fn=_taxi_transformer_func)
# Uses user-provided Python function that implements README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
trainer_fn=_taxi_trainer_func,
examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Uses TFMA to compute README.ml-pipelines-sdk.md evaluation statistics over features of README.ml-pipelines-sdk.md model.
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[
evaluator_pb2.SingleSlicingSpec(
column_for_slicing=['trip_start_hour'])
]))
# Performs quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
model_validator = ModelValidator(
examples=example_gen.outputs['examples'], model=trainer.outputs['model'])
# This custom component serves as README.ml-pipelines-sdk.md bridge between pipeline and human model
# reviewers to enable review-and-push workflow in model development cycle. It
# utilizes Slack API to send message to user-defined Slack channel with model
# URI info and wait for go / no-go decision from the same Slack channel:
# * To approve the model, users need to reply the thread sent out by the bot
# started by SlackComponent with 'lgtm' or 'approve'.
# * To reject the model, users need to reply the thread sent out by the bot
# started by SlackComponent with 'decline' or 'reject'.
slack_validator = SlackComponent(
model=trainer.outputs['model'],
model_blessing=model_validator.outputs['blessing'],
slack_token=_slack_token,
slack_channel_id=_slack_channel_id,
timeout_sec=3600,
)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=slack_validator.outputs['slack_blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=_serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
components=[
example_gen, statistics_gen, schema_gen, example_validator, transform,
trainer, evaluator, model_validator, slack_validator, pusher
],
enable_cache=True,
)
if __name__ == '__main__':
# Metadata config. The defaults works work with the installation of
# KF Pipelines using Kubeflow. If installing KF Pipelines using the
# lightweight deployment option, you may need to override the defaults.
metadata_config = kubeflow_dag_runner.get_default_kubeflow_metadata_config()
# This pipeline automatically injects the Kubeflow TFX image if the
# environment variable 'KUBEFLOW_TFX_IMAGE' is defined. Currently, the tfx
# cli tool exports the environment variable to pass to the pipelines.
tfx_image = os.environ.get('KUBEFLOW_TFX_IMAGE', None)
runner_config = kubeflow_dag_runner.KubeflowDagRunnerConfig(
kubeflow_metadata_config=metadata_config,
# Specify custom docker image to use.
tfx_image=tfx_image
)
kubeflow_dag_runner.KubeflowDagRunner(config=runner_config).run(
_create_pipeline()) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/custom_components/slack/example/taxi_pipeline_slack_kubeflow.py | 0.770896 | 0.228931 | taxi_pipeline_slack_kubeflow.py | pypi |
import os
import tensorflow as tf
import tensorflow_model_analysis as tfma
import tensorflow_transform as tft
from tensorflow_transform.beam.tft_beam_io import transform_fn_io
from tensorflow_transform.saved import saved_transform_io
from tensorflow_transform.tf_metadata import metadata_io
from tensorflow_transform.tf_metadata import schema_utils
# Categorical features are assumed to each have README.ml-pipelines-sdk.md maximum value in the dataset.
_MAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12]
_CATEGORICAL_FEATURE_KEYS = [
'trip_start_hour', 'trip_start_day', 'trip_start_month',
'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area',
'dropoff_community_area'
]
_DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds']
# Number of buckets used by tf.transform for encoding each feature.
_FEATURE_BUCKET_COUNT = 10
_BUCKET_FEATURE_KEYS = [
'pickup_latitude', 'pickup_longitude', 'dropoff_latitude',
'dropoff_longitude'
]
# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform
_VOCAB_SIZE = 1000
# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.
_OOV_SIZE = 10
_VOCAB_FEATURE_KEYS = [
'payment_type',
'company',
]
# Keys
_LABEL_KEY = 'tips'
_FARE_KEY = 'fare'
def _transformed_name(key):
return key + '_xf'
def _transformed_names(keys):
return [_transformed_name(key) for key in keys]
# Tf.Transform considers these features as "raw"
def _get_raw_feature_spec(schema):
return schema_utils.schema_as_feature_spec(schema).feature_spec
def _gzip_reader_fn():
"""Small utility returning README.ml-pipelines-sdk.md record reader that can read gzip'ed files."""
return tf.compat.v1.TFRecordReader(
options=tf.io.TFRecordOptions(
compression_type=tf.compat.v1.python_io.TFRecordCompressionType.GZIP))
def _fill_in_missing(x):
"""Replace missing values in README.ml-pipelines-sdk.md SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to README.ml-pipelines-sdk.md dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
"""
if not isinstance(x, tf.sparse.SparseTensor):
return x
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.compat.v1.sparse_to_dense(x.indices, [x.dense_shape[0], 1], x.values,
default_value),
axis=1)
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in _DENSE_FLOAT_FEATURE_KEYS:
# Preserve this feature as README.ml-pipelines-sdk.md dense float, setting nan's to the mean.
outputs[_transformed_name(key)] = tft.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in _VOCAB_FEATURE_KEYS:
# Build README.ml-pipelines-sdk.md vocabulary for this feature.
outputs[_transformed_name(key)] = tft.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=_VOCAB_SIZE,
num_oov_buckets=_OOV_SIZE)
for key in _BUCKET_FEATURE_KEYS:
outputs[_transformed_name(key)] = tft.bucketize(
_fill_in_missing(inputs[key]), _FEATURE_BUCKET_COUNT)
for key in _CATEGORICAL_FEATURE_KEYS:
outputs[_transformed_name(key)] = _fill_in_missing(inputs[key])
# Was this passenger README.ml-pipelines-sdk.md big tipper?
taxi_fare = _fill_in_missing(inputs[_FARE_KEY])
tips = _fill_in_missing(inputs[_LABEL_KEY])
outputs[_transformed_name(_LABEL_KEY)] = tf.compat.v1.where(
tf.math.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))
return outputs
def _build_estimator(config, hidden_units=None, warm_start_from=None):
"""Build an estimator for predicting the tipping behavior of taxi riders.
Args:
config: tf.contrib.learn.RunConfig defining the runtime environment for the
estimator (including model_dir).
hidden_units: [int], the layer sizes of the DNN (input layer first)
warm_start_from: Optional directory to warm start from.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
"""
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)
]
categorical_columns = [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0)
for key in _transformed_names(_VOCAB_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0)
for key in _transformed_names(_BUCKET_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
_transformed_names(_CATEGORICAL_FEATURE_KEYS),
_MAX_CATEGORICAL_FEATURE_VALUES)
]
return tf.estimator.DNNLinearCombinedClassifier(
config=config,
linear_feature_columns=categorical_columns,
dnn_feature_columns=real_valued_columns,
dnn_hidden_units=hidden_units or [100, 70, 50, 25],
warm_start_from=warm_start_from)
def _example_serving_receiver_fn(transform_output, schema):
"""Build the serving in inputs.
Args:
transform_output: directory in which the tf-transform model was written
during the preprocessing step.
schema: the schema of the input data.
Returns:
Tensorflow graph which parses examples, applying tf-transform to them.
"""
raw_feature_spec = _get_raw_feature_spec(schema)
raw_feature_spec.pop(_LABEL_KEY)
raw_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(
raw_feature_spec, default_batch_size=None)
serving_input_receiver = raw_input_fn()
_, transformed_features = (
saved_transform_io.partially_apply_saved_transform(
os.path.join(transform_output, transform_fn_io.TRANSFORM_FN_DIR),
serving_input_receiver.features))
return tf.estimator.export.ServingInputReceiver(
transformed_features, serving_input_receiver.receiver_tensors)
def _eval_input_receiver_fn(transform_output, schema):
"""Build everything needed for the tf-model-analysis to run the model.
Args:
transform_output: directory in which the tf-transform model was written
during the preprocessing step.
schema: the schema of the input data.
Returns:
EvalInputReceiver function, which contains:
- Tensorflow graph which parses raw untransformed features, applies the
tf-transform preprocessing operators.
- Set of raw, untransformed features.
- Label against which predictions will be compared.
"""
# Notice that the inputs are raw features, not transformed features here.
raw_feature_spec = _get_raw_feature_spec(schema)
serialized_tf_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_example_tensor')
# Add README.ml-pipelines-sdk.md parse_example operator to the tensorflow graph, which will parse
# raw, untransformed, tf examples.
features = tf.io.parse_example(
serialized=serialized_tf_example, features=raw_feature_spec)
# Now that we have our raw examples, process them through the tf-transform
# function computed during the preprocessing step.
_, transformed_features = (
saved_transform_io.partially_apply_saved_transform(
os.path.join(transform_output, transform_fn_io.TRANSFORM_FN_DIR),
features))
# The key name MUST be 'examples'.
receiver_tensors = {'examples': serialized_tf_example}
# NOTE: Model is driven by transformed features (since training works on the
# materialized output of TFT, but slicing will happen on raw features.
features.update(transformed_features)
return tfma.export.EvalInputReceiver(
features=features,
receiver_tensors=receiver_tensors,
labels=transformed_features[_transformed_name(_LABEL_KEY)])
def _input_fn(filenames, transform_output, batch_size=200):
"""Generates features and labels for training or evaluation.
Args:
filenames: [str] list of CSV files to read data from.
transform_output: directory in which the tf-transform model was written
during the preprocessing step.
batch_size: int First dimension size of the Tensors returned by input_fn
Returns:
A (features, indices) tuple where features is README.ml-pipelines-sdk.md dictionary of
Tensors, and indices is README.ml-pipelines-sdk.md single Tensor of label indices.
"""
metadata_dir = os.path.join(transform_output,
transform_fn_io.TRANSFORMED_METADATA_DIR)
transformed_metadata = metadata_io.read_metadata(metadata_dir)
transformed_feature_spec = transformed_metadata.schema.as_feature_spec()
transformed_features = tf.contrib.learn.io.read_batch_features(
filenames, batch_size, transformed_feature_spec, reader=_gzip_reader_fn)
# We pop the label because we do not want to use it as README.ml-pipelines-sdk.md feature while we're
# training.
return transformed_features, transformed_features.pop(
_transformed_name(_LABEL_KEY))
# TFX will call this function
def trainer_fn(trainer_fn_args, schema):
"""Build the estimator using the high level API.
Args:
trainer_fn_args: Holds args used to train the model as name/value pairs.
schema: Holds the schema of the training examples.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
"""
# Number of nodes in the first layer of the DNN
first_dnn_layer_size = 100
num_dnn_layers = 4
dnn_decay_factor = 0.7
train_batch_size = 40
eval_batch_size = 40
train_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda
trainer_fn_args.train_files,
trainer_fn_args.transform_output,
batch_size=train_batch_size)
eval_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda
trainer_fn_args.eval_files,
trainer_fn_args.transform_output,
batch_size=eval_batch_size)
train_spec = tf.estimator.TrainSpec( # pylint: disable=g-long-lambda
train_input_fn,
max_steps=trainer_fn_args.train_steps)
serving_receiver_fn = lambda: _example_serving_receiver_fn( # pylint: disable=g-long-lambda
trainer_fn_args.transform_output, schema)
exporter = tf.estimator.FinalExporter('chicago-taxi', serving_receiver_fn)
eval_spec = tf.estimator.EvalSpec(
eval_input_fn,
steps=trainer_fn_args.eval_steps,
exporters=[exporter],
name='chicago-taxi-eval')
run_config = tf.estimator.RunConfig(
save_checkpoints_steps=999, keep_checkpoint_max=1)
run_config = run_config.replace(model_dir=trainer_fn_args.serving_model_dir)
estimator = _build_estimator(
# Construct layers sizes with exponetial decay
hidden_units=[
max(2, int(first_dnn_layer_size * dnn_decay_factor**i))
for i in range(num_dnn_layers)
],
config=run_config,
warm_start_from=trainer_fn_args.base_model)
# Create an input receiver for TFMA processing
receiver_fn = lambda: _eval_input_receiver_fn( # pylint: disable=g-long-lambda
trainer_fn_args.transform_output, schema)
return {
'estimator': estimator,
'train_spec': train_spec,
'eval_spec': eval_spec,
'eval_input_receiver_fn': receiver_fn
} | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/custom_components/slack/example/taxi_utils_slack.py | 0.877214 | 0.354517 | taxi_utils_slack.py | pypi |
import datetime
import os
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import ModelValidator
from tfx.components import Pusher
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.examples.custom_components.slack.slack_component.component import SlackComponent
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_runner import BeamRunner
from tfx.proto import evaluator_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data/simple')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_taxi_module_file = os.path.join(_taxi_root, 'taxi_utils_slack.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model/taxi_slack')
# Slack channel to push the model notifications to.
_slack_channel_id = os.environ['TFX_SLACK_CHANNEL_ID']
# Slack token to set up connection.
_slack_token = os.environ['TFX_SLACK_BOT_TOKEN']
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_name = 'chicago_taxi_slack'
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
_metadata_db_root = os.path.join(_tfx_root, 'metadata', _pipeline_name)
_log_root = os.path.join(_tfx_root, 'logs')
# Airflow-specific configs; these will be passed directly to airflow
_airflow_config = {
'schedule_interval': None,
'start_date': datetime.datetime(2019, 1, 1),
}
def _create_pipeline():
"""Implements the chicago taxi pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=_data_root)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(statistics=statistics_gen.outputs['statistics'])
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=_taxi_module_file)
# Uses user-provided Python function that implements README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=_taxi_module_file,
examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Uses TFMA to compute README.ml-pipelines-sdk.md evaluation statistics over features of README.ml-pipelines-sdk.md model.
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[
evaluator_pb2.SingleSlicingSpec(
column_for_slicing=['trip_start_hour'])
]))
# Performs quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
model_validator = ModelValidator(
examples=example_gen.outputs['examples'], model=trainer.outputs['model'])
# This custom component serves as README.ml-pipelines-sdk.md bridge between pipeline and human model
# reviewers to enable review-and-push workflow in model development cycle. It
# utilizes Slack API to send message to user-defined Slack channel with model
# URI info and wait for go / no-go decision from the same Slack channel:
# * To approve the model, users need to reply the thread sent out by the bot
# started by SlackComponent with 'lgtm' or 'approve'.
# * To reject the model, users need to reply the thread sent out by the bot
# started by SlackComponent with 'decline' or 'reject'.
slack_validator = SlackComponent(
model=trainer.outputs['model'],
model_blessing=model_validator.outputs['blessing'],
slack_token=_slack_token,
slack_channel_id=_slack_channel_id,
timeout_sec=3600,
)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=slack_validator.outputs['slack_blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=_serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
components=[
example_gen, statistics_gen, schema_gen, example_validator, transform,
trainer, evaluator, model_validator, slack_validator, pusher
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
_metadata_db_root),
)
if __name__ == '__main__':
BeamRunner().run(_create_pipeline()) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/custom_components/slack/example/taxi_pipeline_slack.py | 0.697506 | 0.309995 | taxi_pipeline_slack.py | pypi |
from typing import Optional, Text
from tfx import types
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.examples.custom_components.hello_world.hello_component import executor
from tfx.types import channel_utils
from tfx.types import standard_artifacts
from tfx.types.component_spec import ChannelParameter
from tfx.types.component_spec import ExecutionParameter
class HelloComponentSpec(types.ComponentSpec):
"""ComponentSpec for Custom TFX Hello World Component."""
PARAMETERS = {
# These are parameters that will be passed in the call to
# create an instance of this component.
'name': ExecutionParameter(type=Text),
}
INPUTS = {
# This will be README.ml-pipelines-sdk.md dictionary with input artifacts, including URIs
'input_data': ChannelParameter(type=standard_artifacts.Examples),
}
OUTPUTS = {
# This will be README.ml-pipelines-sdk.md dictionary which this component will populate
'output_data': ChannelParameter(type=standard_artifacts.Examples),
}
class HelloComponent(base_component.BaseComponent):
"""Custom TFX Hello World Component.
This custom component class consists of only README.ml-pipelines-sdk.md constructor.
"""
SPEC_CLASS = HelloComponentSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(self,
input_data: types.Channel = None,
output_data: types.Channel = None,
name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md HelloComponent.
Args:
input_data: A Channel of type `standard_artifacts.Examples`. This will
often contain two splits: 'train', and 'eval'.
output_data: A Channel of type `standard_artifacts.Examples`. This will
usually contain the same splits as input_data.
name: Optional unique name. Necessary if multiple Hello components are
declared in the same pipeline.
"""
# output_data will contain README.ml-pipelines-sdk.md list of Channels for each split of the data,
# by default README.ml-pipelines-sdk.md 'train' split and an 'eval' split. Since HelloComponent
# passes the input data through to output, the splits in output_data will
# be the same as the splits in input_data, which were generated by the
# upstream component.
if not output_data:
output_data = channel_utils.as_channel([standard_artifacts.Examples()])
spec = HelloComponentSpec(input_data=input_data,
output_data=output_data, name=name)
super(HelloComponent, self).__init__(spec=spec) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/custom_components/hello_world/hello_component/component.py | 0.914293 | 0.244307 | component.py | pypi |
import json
import os
from typing import Any, Dict, List, Text
from tfx import types
from tfx.dsl.components.base import base_executor
from tfx.dsl.io import fileio
from tfx.types import artifact_utils
from tfx.utils import io_utils
class Executor(base_executor.BaseExecutor):
"""Executor for HelloComponent."""
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Copy the input_data to the output_data.
For this example that is all that the Executor does. For README.ml-pipelines-sdk.md different
custom component, this is where the real functionality of the component
would be included.
This component both reads and writes Examples, but README.ml-pipelines-sdk.md different component
might read and write artifacts of other types.
Args:
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of artifacts, including:
- input_data: A list of type `standard_artifacts.Examples` which will
often contain two splits, 'train' and 'eval'.
output_dict: Output dict from key to README.ml-pipelines-sdk.md list of artifacts, including:
- output_data: A list of type `standard_artifacts.Examples` which will
usually contain the same splits as input_data.
exec_properties: A dict of execution properties, including:
- name: Optional unique name. Necessary iff multiple Hello components
are declared in the same pipeline.
Returns:
None
Raises:
OSError and its subclasses
"""
self._log_startup(input_dict, output_dict, exec_properties)
input_artifact = artifact_utils.get_single_instance(
input_dict['input_data'])
output_artifact = artifact_utils.get_single_instance(
output_dict['output_data'])
output_artifact.split_names = input_artifact.split_names
split_to_instance = {}
for split in json.loads(input_artifact.split_names):
uri = artifact_utils.get_split_uri([input_artifact], split)
split_to_instance[split] = uri
for split, instance in split_to_instance.items():
input_dir = instance
output_dir = artifact_utils.get_split_uri([output_artifact], split)
for filename in fileio.listdir(input_dir):
input_uri = os.path.join(input_dir, filename)
output_uri = os.path.join(output_dir, filename)
io_utils.copy_file(src=input_uri, dst=output_uri, overwrite=True) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/custom_components/hello_world/hello_component/executor.py | 0.836421 | 0.297974 | executor.py | pypi |
"""Chicago taxi example using TFX."""
import os
from typing import Text
import absl
from tfx.components import CsvExampleGen
from tfx.components import StatisticsGen
from tfx.examples.custom_components.hello_world.hello_component import component
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
_pipeline_name = 'taxi_hello_pipeline'
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join('data')
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(_taxi_root, 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
metadata_path: Text) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=data_root)
hello = component.HelloComponent(
input_data=example_gen.outputs['examples'], name=u'HelloWorld')
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=hello.outputs['output_data'])
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[example_gen, hello, statistics_gen],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path))
# To run this pipeline from the python CLI:
# $python taxi_pipeline_hello.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
metadata_path=_metadata_path)) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/custom_components/hello_world/example/taxi_pipeline_hello.py | 0.841435 | 0.400632 | taxi_pipeline_hello.py | pypi |
"""Container-based pipeline sample."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Text
from tfx.dsl.component.experimental import container_component
from tfx.dsl.component.experimental import placeholders
from tfx.types import standard_artifacts
downloader_component = container_component.create_container_component(
name='DownloadFromHttp',
outputs={
'data': standard_artifacts.ExternalArtifact,
},
parameters={
'url': str,
},
# The component code uses gsutil to upload the data to GCS, so the
# container image needs to have gsutil installed and configured.
# Fixing b/150670779 by merging cl/294536017 will lift this limitation.
image='google/cloud-sdk:278.0.0',
command=[
'sh', '-exc',
'''
url="$0"
output_data_uri="$1"/data # TODO(b/150515270) Remove when fixed.
output_data_path=$(mktemp)
# Running the main code
# wget "$0" -O "$output_data_path" || curl "$0" > "$output_data_path"
# Getting data out of the container
# gsutil cp "$output_data_path" "$output_data_uri"
''',
placeholders.InputValuePlaceholder('url'),
placeholders.OutputUriPlaceholder('data'),
],
)
grep_component = container_component.create_container_component(
name='FilterWithGrep',
inputs={
'text': standard_artifacts.ExternalArtifact,
},
outputs={
'filtered_text': standard_artifacts.ExternalArtifact,
},
parameters={
'pattern': str,
},
# The component code uses gsutil to upload the data to GCS, so the
# container image needs to have gsutil installed and configured.
# Fixing b/150670779 by merging cl/294536017 will lift this limitation.
image='google/cloud-sdk:278.0.0',
command=[
'sh', '-exc',
'''
pattern="$0"
text_uri="$1"/data # TODO(b/150515270) Remove when fixed.
text_path=$(mktemp)
filtered_text_uri="$2"/data # TODO(b/150515270) Remove when fixed.
filtered_text_path=$(mktemp)
# Getting data into the container
gsutil cp "$text_uri" "$text_path"
# Running the main code
grep "$pattern" "$text_path" >"$filtered_text_path"
# Getting data out of the container
gsutil cp "$filtered_text_path" "$filtered_text_uri"
''',
placeholders.InputValuePlaceholder('pattern'),
placeholders.InputUriPlaceholder('text'),
placeholders.OutputUriPlaceholder('filtered_text'),
],
)
print_component = container_component.create_container_component(
name='Print',
inputs={
'text': standard_artifacts.ExternalArtifact,
},
# The component code uses gsutil to upload the data to GCS, so the
# container image needs to have gsutil installed and configured.
# Fixing b/150670779 by merging cl/294536017 will lift this limitation.
image='google/cloud-sdk:278.0.0',
command=[
'sh', '-exc',
'''
text_uri="$0"/data # TODO(b/150515270) Remove when fixed.
text_path=$(mktemp)
# Getting data into the container
gsutil cp "$text_uri" "$text_path"
# Running the main code
cat "$text_path"
''',
placeholders.InputUriPlaceholder('text'),
],
)
def create_pipeline_component_instances(text_url: Text, pattern: Text):
"""Creates tasks for the download_grep_print pipeline."""
downloader_task = downloader_component(url=text_url)
grep_task = grep_component(
text=downloader_task.outputs['data'],
pattern=pattern,
)
print_task = print_component(
text=grep_task.outputs['filtered_text'],
)
component_instances = [
downloader_task,
grep_task,
print_task,
]
return component_instances | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/custom_components/container_components/download_grep_print_pipeline.py | 0.670069 | 0.190122 | download_grep_print_pipeline.py | pypi |
"""TFX PrestoExampleGen component definition."""
from typing import Optional, Text
from tfx import types
from tfx.components.example_gen import component
from tfx.components.example_gen import utils
from tfx.dsl.components.base import executor_spec
from tfx.examples.custom_components.presto_example_gen.presto_component import executor
from tfx.examples.custom_components.presto_example_gen.proto import presto_config_pb2
from tfx.proto import example_gen_pb2
class PrestoExampleGen(component.QueryBasedExampleGen): # pylint: disable=protected-access
"""Official TFX PrestoExampleGen component.
The Presto examplegen component takes README.ml-pipelines-sdk.md query, connection client
configuration, and generates train and eval examples for downsteam components.
"""
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(self,
conn_config: presto_config_pb2.PrestoConnConfig,
query: Optional[Text] = None,
input_config: Optional[example_gen_pb2.Input] = None,
output_config: Optional[example_gen_pb2.Output] = None,
example_artifacts: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Constructs README.ml-pipelines-sdk.md PrestoExampleGen component.
Args:
conn_config: Parameters for Presto connection client.
query: Presto sql string, query result will be treated as README.ml-pipelines-sdk.md single split,
can be overwritten by input_config.
input_config: An example_gen_pb2.Input instance with Split.pattern as
Presto sql string. If set, it overwrites the 'query' arg, and allows
different queries per split.
output_config: An example_gen_pb2.Output instance, providing output
configuration. If unset, default splits will be 'train' and 'eval' with
size 2:1.
example_artifacts: Optional channel of 'ExamplesPath' for output train and
eval examples.
instance_name: Optional unique instance name. Necessary if multiple
PrestoExampleGen components are declared in the same pipeline.
Raises:
RuntimeError: Only one of query and input_config should be set. Or
required host field in connection_config should be set.
"""
if bool(query) == bool(input_config):
raise RuntimeError('Exactly one of query and input_config should be set.')
if not bool(conn_config.host):
raise RuntimeError(
'Required host field in connection config should be set.')
input_config = input_config or utils.make_default_input_config(query)
packed_custom_config = example_gen_pb2.CustomConfig()
packed_custom_config.custom_config.Pack(conn_config)
output_config = output_config or utils.make_default_output_config(
input_config)
super(PrestoExampleGen, self).__init__(
input_config=input_config,
output_config=output_config,
custom_config=packed_custom_config,
example_artifacts=example_artifacts,
instance_name=instance_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/custom_components/presto_example_gen/presto_component/component.py | 0.909773 | 0.333747 | component.py | pypi |
"""Generic TFX PrestoExampleGen executor."""
import datetime
from typing import Any, Dict, Iterable, Text, Tuple
import apache_beam as beam
import prestodb
import tensorflow as tf
from tfx.components.example_gen import base_example_gen_executor
from tfx.examples.custom_components.presto_example_gen.proto import presto_config_pb2
from tfx.proto import example_gen_pb2
from tfx.utils import proto_utils
@beam.typehints.with_input_types(Text)
@beam.typehints.with_output_types(beam.typehints.Iterable[Tuple[Text, Text,
Any]])
class _ReadPrestoDoFn(beam.DoFn):
"""Beam DoFn class that reads from Presto.
Attributes:
cursor: A prestodb.dbapi.Cursor object that reads records from Presto table.
"""
def __init__(self, client: prestodb.dbapi.Connection):
self.cursor = client.cursor()
def process(self, query: Text) -> Iterable[Tuple[Text, Text, Any]]:
"""Yields rows from query results.
Args:
query: A SQL query used to return results from Presto table.
Yields:
One row from the query result, represented by README.ml-pipelines-sdk.md list of tuples. Each tuple
contains information on column name, column data type, data.
"""
self.cursor.execute(query)
rows = self.cursor.fetchall()
if rows:
cols = []
col_types = []
# Returns README.ml-pipelines-sdk.md list of (column_name, column_type, None, ...)
# https://github.com/prestodb/presto-python-client/blob/master/prestodb/dbapi.py#L199
for metadata in self.cursor.description:
cols.append(metadata[0])
col_types.append(metadata[1])
for r in rows:
yield zip(cols, col_types, r)
def teardown(self):
if self.cursor:
self.cursor.close()
def _deserialize_conn_config(
conn_config: presto_config_pb2.PrestoConnConfig
) -> prestodb.dbapi.Connection:
"""Deserializes Presto connection config to Presto client.
Args:
conn_config: Protobuf-encoded connection config for Presto client.
Returns:
A prestodb.dbapi.Connection instance initialized with user-supplied
parameters.
"""
params = {'host': conn_config.host} # Required field
# Only deserialize rest of parameters if set by user
if conn_config.HasField('port'):
params['port'] = conn_config.port
if conn_config.HasField('user'):
params['user'] = conn_config.user
if conn_config.HasField('source'):
params['source'] = conn_config.source
if conn_config.HasField('catalog'):
params['catalog'] = conn_config.catalog
if conn_config.HasField('schema'):
params['schema'] = conn_config.schema
if conn_config.HasField('http_scheme'):
params['http_scheme'] = conn_config.http_scheme
if conn_config.WhichOneof('opt_auth'):
params['auth'] = _deserialize_auth_config(conn_config)
if conn_config.HasField('max_attempts'):
params['max_attempts'] = conn_config.max_attempts
if conn_config.HasField('request_timeout'):
params['request_timeout'] = conn_config.request_timeout
return prestodb.dbapi.connect(**params)
def _deserialize_auth_config(
conn_config: presto_config_pb2.PrestoConnConfig
) -> prestodb.auth.Authentication:
"""Extracts from conn config the deserialized Presto Authentication class.
Args:
conn_config: Protobuf-encoded connection config for Presto client.
Returns:
A prestodb.auth.Authentication instance initialized with user-supplied
parameters.
Raises:
RuntimeError: if authentication type is not currently supported.
"""
if conn_config.HasField('basic_auth'):
return prestodb.auth.BasicAuthentication(conn_config.basic_auth.username,
conn_config.basic_auth.password)
# TODO(b/140266796): Support KerberosAuth.
else:
raise RuntimeError('Authentication type not supported.')
def _row_to_example(
instance: Iterable[Tuple[Text, Text, Any]]) -> tf.train.Example:
"""Convert presto result row to tf example."""
feature = {}
for key, data_type, value in instance:
if value is None:
feature[key] = tf.train.Feature()
elif data_type in {'tinyint', 'smallint', 'integer', 'bigint'}:
feature[key] = tf.train.Feature(
int64_list=tf.train.Int64List(value=[value]))
elif data_type in {'real', 'double', 'decimal'}:
feature[key] = tf.train.Feature(
float_list=tf.train.FloatList(value=[value]))
elif data_type in {'varchar', 'char'}:
feature[key] = tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.compat.as_bytes(value)]))
elif data_type in {'timestamp'}:
value = int(datetime.datetime.fromisoformat(value).timestamp())
feature[key] = tf.train.Feature(
int64_list=tf.train.Int64List(value=[value]))
else:
# TODO(b/140266796): support more types
# https://prestodb.github.io/docs/current/language/types
raise RuntimeError(
'Presto column type {} is not supported.'.format(data_type))
return tf.train.Example(features=tf.train.Features(feature=feature))
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(tf.train.Example)
def _PrestoToExample( # pylint: disable=invalid-name
pipeline: beam.Pipeline,
exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection:
"""Read from Presto and transform to TF examples.
Args:
pipeline: beam pipeline.
exec_properties: A dict of execution properties.
split_pattern: Split.pattern in Input config, README.ml-pipelines-sdk.md Presto sql string.
Returns:
PCollection of TF examples.
"""
conn_config = example_gen_pb2.CustomConfig()
proto_utils.json_to_proto(exec_properties['custom_config'], conn_config)
presto_config = presto_config_pb2.PrestoConnConfig()
conn_config.custom_config.Unpack(presto_config)
client = _deserialize_conn_config(presto_config)
return (pipeline
| 'Query' >> beam.Create([split_pattern])
| 'QueryTable' >> beam.ParDo(_ReadPrestoDoFn(client))
| 'ToTFExample' >> beam.Map(_row_to_example))
class Executor(base_example_gen_executor.BaseExampleGenExecutor):
"""Generic TFX PrestoExampleGen executor."""
def GetInputSourceToExamplePTransform(self) -> beam.PTransform:
"""Returns PTransform for Presto to TF examples."""
return _PrestoToExample | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/custom_components/presto_example_gen/presto_component/executor.py | 0.801936 | 0.240842 | executor.py | pypi |
"""Chicago taxi example using TFX."""
import os
from typing import Text
import absl
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import ModelValidator
from tfx.components import Pusher
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.examples.custom_components.presto_example_gen.presto_component.component import PrestoExampleGen
from tfx.examples.custom_components.presto_example_gen.proto import presto_config_pb2
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import evaluator_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
_pipeline_name = 'chicago_taxi_presto'
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
# Presto configuration that corresponds with tutorial in README.md
_presto_config = presto_config_pb2.PrestoConnConfig(
host='localhost', port=8080, user='user', catalog='hive', schema='default')
# The query that extracts the Chicago taxi data examples from Presto, following
# setup as described in the README.md
_query = 'SELECT * FROM chicago_taxi_trips_parquet'
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_taxi_root, 'taxi_utils.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
def _create_pipeline(pipeline_name: Text, pipeline_root: Text,
module_file: Text,
presto_config: presto_config_pb2.PrestoConnConfig,
query: Text, serving_model_dir: Text,
metadata_path: Text) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data
example_gen = PrestoExampleGen(presto_config, query=query)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(statistics=statistics_gen.outputs['statistics'])
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that implements README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Uses TFMA to compute README.ml-pipelines-sdk.md evaluation statistics over features of README.ml-pipelines-sdk.md model.
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[
evaluator_pb2.SingleSlicingSpec(
column_for_slicing=['trip_start_hour'])
]))
# Performs quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
model_validator = ModelValidator(
examples=example_gen.outputs['examples'], model=trainer.outputs['model'])
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=model_validator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen, statistics_gen, schema_gen, example_validator, transform,
trainer, evaluator, model_validator, pusher
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
)
# To run this pipeline from the python CLI:
# $python taxi_pipeline_presto.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
presto_config=_presto_config,
query=_query,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path)) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/custom_components/presto_example_gen/example/taxi_pipeline_presto.py | 0.867457 | 0.393997 | taxi_pipeline_presto.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import ImportExampleGen
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.components.base import executor_spec
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import example_gen_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
_pipeline_name = 'cifar10_native_keras'
# This example assumes that CIFAR10 train set data is stored in
# ~/cifar10/data/train, test set data is stored in ~/cifar10/data/test, and
# the utility function is in ~/cifar10. Feel free to customize as needed.
_cifar10_root = os.path.join(os.environ['HOME'], 'cifar10')
_data_root = os.path.join(_cifar10_root, 'data')
# Python module files to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_cifar10_root, 'cifar10_utils_native_keras.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir_lite = os.path.join(_cifar10_root, 'serving_model_lite',
_pipeline_name)
# Directory and data locations. This example assumes all of the images,
# example code, and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Path to labels file for mapping model outputs.
_labels_path = os.path.join(_data_root, 'labels.txt')
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir_lite: Text,
metadata_path: Text,
labels_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the CIFAR10 image classification pipeline using TFX."""
# This is needed for datasets with pre-defined splits
# Change the pattern argument to train_whole/* and test_whole/* to train
# on the whole CIFAR-10 dataset
input_config = example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(name='train', pattern='train/*'),
example_gen_pb2.Input.Split(name='eval', pattern='test/*')
])
# Brings data into the pipeline.
example_gen = ImportExampleGen(
input_base=data_root, input_config=input_config)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that trains README.ml-pipelines-sdk.md model.
# When traning on the whole dataset, use 18744 for train steps, 156 for eval
# steps. 18744 train steps correspond to 24 epochs on the whole train set, and
# 156 eval steps correspond to 1 epoch on the whole test set. The
# configuration below is for training on the dataset we provided in the data
# folder, which has 128 train and 128 test samples. The 160 train steps
# correspond to 40 epochs on this tiny train set, and 4 eval steps correspond
# to 1 epoch on this tiny test set.
trainer = Trainer(
module_file=module_file,
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
train_args=trainer_pb2.TrainArgs(num_steps=160),
eval_args=trainer_pb2.EvalArgs(num_steps=4),
custom_config={'labels_path': labels_path})
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compare to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key='label_xf', model_type='tf_lite')],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
class_name='SparseCategoricalAccuracy',
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.55}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-3})))
])
])
# Uses TFMA to compute the evaluation statistics over features of README.ml-pipelines-sdk.md model.
# We evaluate using the materialized examples that are output by Transform
# because
# 1. the decoding_png function currently performed within Transform are not
# compatible with TFLite.
# 2. MLKit requires deserialized (float32) tensor image inputs
# Note that for deployment, the same logic that is performed within Transform
# must be reproduced client-side.
evaluator = Evaluator(
examples=transform.outputs['transformed_examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir_lite)))
components = [
example_gen, statistics_gen, schema_gen, example_validator, transform,
trainer, model_resolver, evaluator, pusher
]
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=components,
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args)
# To run this pipeline from the python CLI:
# $python cifar_pipeline_native_keras.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir_lite=_serving_model_dir_lite,
metadata_path=_metadata_path,
labels_path=_labels_path,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/cifar10/cifar10_pipeline_native_keras.py | 0.856122 | 0.305633 | cifar10_pipeline_native_keras.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import List, Text
import absl
import tensorflow as tf
import tensorflow_transform as tft
from tfx.components.trainer.fn_args_utils import FnArgs
from tfx.components.trainer.rewriting import converters
from tfx.components.trainer.rewriting import rewriter
from tfx.components.trainer.rewriting import rewriter_factory
from tfx.dsl.io import fileio
import flatbuffers
from tflite_support import metadata_schema_py_generated as _metadata_fb
from tflite_support import metadata as _metadata
# When training on the whole dataset use following constants instead.
# This setting should give ~91% accuracy on the whole test set
# _TRAIN_DATA_SIZE = 50000
# _EVAL_DATA_SIZE = 10000
# _TRAIN_BATCH_SIZE = 64
# _EVAL_BATCH_SIZE = 64
# _CLASSIFIER_LEARNING_RATE = 3e-4
# _FINETUNE_LEARNING_RATE = 5e-5
# _CLASSIFIER_EPOCHS = 12
_TRAIN_DATA_SIZE = 128
_EVAL_DATA_SIZE = 128
_TRAIN_BATCH_SIZE = 32
_EVAL_BATCH_SIZE = 32
_CLASSIFIER_LEARNING_RATE = 1e-3
_FINETUNE_LEARNING_RATE = 7e-6
_CLASSIFIER_EPOCHS = 30
_IMAGE_KEY = 'image'
_LABEL_KEY = 'label'
_TFLITE_MODEL_NAME = 'tflite'
def _transformed_name(key):
return key + '_xf'
def _gzip_reader_fn(filenames):
"""Small utility returning README.ml-pipelines-sdk.md record reader that can read gzip'ed files."""
return tf.data.TFRecordDataset(filenames, compression_type='GZIP')
def _get_serve_image_fn(model):
"""Returns README.ml-pipelines-sdk.md function that feeds the input tensor into the model."""
@tf.function
def serve_image_fn(image_tensor):
"""Returns the output to be used in the serving signature.
Args:
image_tensor: A tensor represeting input image. The image should have 3
channels.
Returns:
The model's predicton on input image tensor
"""
return model(image_tensor)
return serve_image_fn
def _image_augmentation(image_features):
"""Perform image augmentation on batches of images .
Args:
image_features: README.ml-pipelines-sdk.md batch of image features
Returns:
The augmented image features
"""
batch_size = tf.shape(image_features)[0]
image_features = tf.image.random_flip_left_right(image_features)
image_features = tf.image.resize_with_crop_or_pad(image_features, 250, 250)
image_features = tf.image.random_crop(image_features,
(batch_size, 224, 224, 3))
return image_features
def _data_augmentation(feature_dict):
"""Perform data augmentation on batches of data.
Args:
feature_dict: README.ml-pipelines-sdk.md dict containing features of samples
Returns:
The feature dict with augmented features
"""
image_features = feature_dict[_transformed_name(_IMAGE_KEY)]
image_features = _image_augmentation(image_features)
feature_dict[_transformed_name(_IMAGE_KEY)] = image_features
return feature_dict
def _input_fn(file_pattern: List[Text],
tf_transform_output: tft.TFTransformOutput,
is_train: bool = False,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
tf_transform_output: A TFTransformOutput.
is_train: Whether the input dataset is train split or not.
batch_size: representing the number of consecutive elements of returned
dataset to combine in README.ml-pipelines-sdk.md single batch
Returns:
A dataset that contains (features, indices) tuple where features is README.ml-pipelines-sdk.md
dictionary of Tensors, and indices is README.ml-pipelines-sdk.md single Tensor of label indices.
"""
transformed_feature_spec = (
tf_transform_output.transformed_feature_spec().copy())
dataset = tf.data.experimental.make_batched_features_dataset(
file_pattern=file_pattern,
batch_size=batch_size,
features=transformed_feature_spec,
reader=_gzip_reader_fn,
label_key=_transformed_name(_LABEL_KEY))
# Apply data augmentation. We have to do data augmentation here because
# we need to apply data agumentation on-the-fly during training. If we put
# it in Transform, it will only be applied once on the whole dataset, which
# will lose the point of data augmentation.
if is_train:
dataset = dataset.map(lambda x, y: (_data_augmentation(x), y))
return dataset
def _freeze_model_by_percentage(model: tf.keras.Model, percentage: float):
"""Freeze part of the model based on specified percentage.
Args:
model: The keras model need to be partially frozen
percentage: the percentage of layers to freeze
Raises:
ValueError: Invalid values.
"""
if percentage < 0 or percentage > 1:
raise ValueError('Freeze percentage should between 0.0 and 1.0')
if not model.trainable:
raise ValueError(
'The model is not trainable, please set model.trainable to True')
num_layers = len(model.layers)
num_layers_to_freeze = int(num_layers * percentage)
for idx, layer in enumerate(model.layers):
if idx < num_layers_to_freeze:
layer.trainable = False
else:
layer.trainable = True
def _build_keras_model() -> tf.keras.Model:
"""Creates README.ml-pipelines-sdk.md Image classification model with MobileNet backbone.
Returns:
The image classifcation Keras Model and the backbone MobileNet model
"""
# We create README.ml-pipelines-sdk.md MobileNet model with weights pre-trained on ImageNet.
# We remove the top classification layer of the MobileNet, which was
# used for classifying ImageNet objects. We will add our own classification
# layer for CIFAR10 later. We use average pooling at the last convolution
# layer to get README.ml-pipelines-sdk.md 1D vector for classifcation, which is consistent with the
# origin MobileNet setup
base_model = tf.keras.applications.MobileNet(
input_shape=(224, 224, 3),
include_top=False,
weights='imagenet',
pooling='avg')
base_model.input_spec = None
# We add README.ml-pipelines-sdk.md Dropout layer at the top of MobileNet backbone we just created to
# prevent overfiting, and then README.ml-pipelines-sdk.md Dense layer to classifying CIFAR10 objects
model = tf.keras.Sequential([
tf.keras.layers.InputLayer(
input_shape=(224, 224, 3), name=_transformed_name(_IMAGE_KEY)),
base_model,
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(10, activation='softmax')
])
# Freeze the whole MobileNet backbone to first train the top classifer only
_freeze_model_by_percentage(base_model, 1.0)
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.RMSprop(lr=_CLASSIFIER_LEARNING_RATE),
metrics=['sparse_categorical_accuracy'])
model.summary(print_fn=absl.logging.info)
return model, base_model
# TFX Transform will call this function.
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
# tf.io.decode_png function cannot be applied on README.ml-pipelines-sdk.md batch of data.
# We have to use tf.map_fn
image_features = tf.map_fn(
lambda x: tf.io.decode_png(x[0], channels=3),
inputs[_IMAGE_KEY],
dtype=tf.uint8)
# image_features = tf.cast(image_features, tf.float32)
image_features = tf.image.resize(image_features, [224, 224])
image_features = tf.keras.applications.mobilenet.preprocess_input(
image_features)
outputs[_transformed_name(_IMAGE_KEY)] = image_features
# TODO(b/157064428): Support label transformation for Keras.
# Do not apply label transformation as it will result in wrong evaluation.
outputs[_transformed_name(_LABEL_KEY)] = inputs[_LABEL_KEY]
return outputs
def _write_metadata(model_path: Text, label_map_path: Text, mean: List[float],
std: List[float]):
"""Add normalization option and label map TFLite metadata to the model.
Args:
model_path: The path of the TFLite model
label_map_path: The path of the label map file
mean: The mean value used to normalize input image tensor
std: The standard deviation used to normalize input image tensor
"""
# Creates flatbuffer for model information.
model_meta = _metadata_fb.ModelMetadataT()
# Creates flatbuffer for model input metadata.
# Here we add the input normalization info to input metadata.
input_meta = _metadata_fb.TensorMetadataT()
input_normalization = _metadata_fb.ProcessUnitT()
input_normalization.optionsType = (
_metadata_fb.ProcessUnitOptions.NormalizationOptions)
input_normalization.options = _metadata_fb.NormalizationOptionsT()
input_normalization.options.mean = mean
input_normalization.options.std = std
input_meta.processUnits = [input_normalization]
# Creates flatbuffer for model output metadata.
# Here we add label file to output metadata.
output_meta = _metadata_fb.TensorMetadataT()
label_file = _metadata_fb.AssociatedFileT()
label_file.name = os.path.basename(label_map_path)
label_file.type = _metadata_fb.AssociatedFileType.TENSOR_AXIS_LABELS
output_meta.associatedFiles = [label_file]
# Creates subgraph to contain input and output information,
# and add subgraph to the model information.
subgraph = _metadata_fb.SubGraphMetadataT()
subgraph.inputTensorMetadata = [input_meta]
subgraph.outputTensorMetadata = [output_meta]
model_meta.subgraphMetadata = [subgraph]
# Serialize the model metadata buffer we created above using flatbuffer
# builder.
b = flatbuffers.Builder(0)
b.Finish(
model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
metadata_buf = b.Output()
# Populates metadata and label file to the model file.
populator = _metadata.MetadataPopulator.with_model_file(model_path)
populator.load_metadata_buffer(metadata_buf)
populator.load_associated_files([label_map_path])
populator.populate()
# TFX Trainer will call this function.
def run_fn(fn_args: FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
Raises:
ValueError: if invalid inputs.
"""
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(
fn_args.train_files,
tf_transform_output,
is_train=True,
batch_size=_TRAIN_BATCH_SIZE)
eval_dataset = _input_fn(
fn_args.eval_files,
tf_transform_output,
is_train=False,
batch_size=_EVAL_BATCH_SIZE)
model, base_model = _build_keras_model()
absl.logging.info('Tensorboard logging to {}'.format(fn_args.model_run_dir))
# Write logs to path
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='batch')
# Our training regime has two phases: we first freeze the backbone and train
# the newly added classifier only, then unfreeze part of the backbone and
# fine-tune with classifier jointly.
steps_per_epoch = int(_TRAIN_DATA_SIZE / _TRAIN_BATCH_SIZE)
total_epochs = int(fn_args.train_steps / steps_per_epoch)
if _CLASSIFIER_EPOCHS > total_epochs:
raise ValueError('Classifier epochs is greater than the total epochs')
absl.logging.info('Start training the top classifier')
model.fit(
train_dataset,
epochs=_CLASSIFIER_EPOCHS,
steps_per_epoch=steps_per_epoch,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
absl.logging.info('Start fine-tuning the model')
# Unfreeze the top MobileNet layers and do joint fine-tuning
_freeze_model_by_percentage(base_model, 0.9)
# We need to recompile the model because layer properties have changed
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.RMSprop(lr=_FINETUNE_LEARNING_RATE),
metrics=['sparse_categorical_accuracy'])
model.summary(print_fn=absl.logging.info)
model.fit(
train_dataset,
initial_epoch=_CLASSIFIER_EPOCHS,
epochs=total_epochs,
steps_per_epoch=steps_per_epoch,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
# Prepare the TFLite model used for serving in MLKit
signatures = {
'serving_default':
_get_serve_image_fn(model).get_concrete_function(
tf.TensorSpec(
shape=[None, 224, 224, 3],
dtype=tf.float32,
name=_transformed_name(_IMAGE_KEY)))
}
temp_saving_model_dir = os.path.join(fn_args.serving_model_dir, 'temp')
model.save(temp_saving_model_dir, save_format='tf', signatures=signatures)
tfrw = rewriter_factory.create_rewriter(
rewriter_factory.TFLITE_REWRITER,
name='tflite_rewriter')
converters.rewrite_saved_model(temp_saving_model_dir,
fn_args.serving_model_dir, tfrw,
rewriter.ModelType.TFLITE_MODEL)
# Add necessary TFLite metadata to the model in order to use it within MLKit
# TODO(dzats@): Handle label map file path more properly, currently
# hard-coded.
tflite_model_path = os.path.join(fn_args.serving_model_dir,
_TFLITE_MODEL_NAME)
# TODO(dzats@): Extend the TFLite rewriter to be able to add TFLite metadata
#@ to the model.
_write_metadata(
model_path=tflite_model_path,
label_map_path=fn_args.custom_config['labels_path'],
mean=[127.5],
std=[127.5])
fileio.rmtree(temp_saving_model_dir) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/cifar10/cifar10_utils_native_keras.py | 0.929047 | 0.303383 | cifar10_utils_native_keras.py | pypi |
"""Chicago taxi example using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from typing import List, Text
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.components.base import executor_spec
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.airflow.airflow_dag_runner import AirflowDagRunner
from tfx.orchestration.airflow.airflow_dag_runner import AirflowPipelineConfig
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
_pipeline_name = 'taxi_solution'
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'airflow')
_data_root = os.path.join(_taxi_root, 'data', 'taxi_data')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_taxi_root, 'dags', 'taxi_utils_solution.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(_taxi_root, 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
# Airflow-specific configs; these will be passed directly to airflow
_airflow_config = {
'schedule_interval': None,
'start_date': datetime.datetime(2019, 1, 1),
}
# TODO(b/137289334): rename this as simple after DAG visualization is done.
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=data_root)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
infer_schema = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
# Performs anomaly detection based on statistics and data schema.
validate_stats = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=infer_schema.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=infer_schema.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that implements README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=module_file,
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=infer_schema.outputs['schema'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute README.ml-pipelines-sdk.md evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key='tips')],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
class_name='BinaryAccuracy',
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10})))
])
])
model_analyzer = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
# Change threshold will be ignored if there is no baseline (first run).
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=model_analyzer.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
statistics_gen,
infer_schema,
validate_stats,
transform,
trainer,
model_resolver,
model_analyzer,
pusher,
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args)
# 'DAG' below need to be kept for Airflow to detect dag.
DAG = AirflowDagRunner(AirflowPipelineConfig(_airflow_config)).run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/airflow_workshop/setup/dags/taxi_pipeline_solution.py | 0.814864 | 0.290396 | taxi_pipeline_solution.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Text
import absl
import tensorflow as tf
import tensorflow_transform as tft
from tfx.components.trainer.executor import TrainerFnArgs
# Categorical features are assumed to each have README.ml-pipelines-sdk.md maximum value in the dataset.
_MAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12]
_CATEGORICAL_FEATURE_KEYS = [
'trip_start_hour', 'trip_start_day', 'trip_start_month',
'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area',
'dropoff_community_area'
]
_DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds']
# Number of buckets used by tf.transform for encoding each feature.
_FEATURE_BUCKET_COUNT = 10
_BUCKET_FEATURE_KEYS = [
'pickup_latitude', 'pickup_longitude', 'dropoff_latitude',
'dropoff_longitude'
]
# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform
_VOCAB_SIZE = 1000
# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.
_OOV_SIZE = 10
_VOCAB_FEATURE_KEYS = [
'payment_type',
'company',
]
# Keys
_LABEL_KEY = 'tips'
_FARE_KEY = 'fare'
def _transformed_name(key):
return key + '_xf'
def _transformed_names(keys):
return [_transformed_name(key) for key in keys]
def _gzip_reader_fn(filenames):
"""Small utility returning README.ml-pipelines-sdk.md record reader that can read gzip'ed files."""
return tf.data.TFRecordDataset(
filenames,
compression_type='GZIP')
def _fill_in_missing(x):
"""Replace missing values in README.ml-pipelines-sdk.md SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to README.ml-pipelines-sdk.md dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
"""
if not isinstance(x, tf.sparse.SparseTensor):
return x
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
def _get_serve_tf_examples_fn(model, tf_transform_output):
"""Returns README.ml-pipelines-sdk.md function that parses README.ml-pipelines-sdk.md serialized tf.Example and applies TFT."""
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
feature_spec.pop(_LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)
transformed_features = model.tft_layer(parsed_features)
return model(transformed_features)
return serve_tf_examples_fn
def _input_fn(file_pattern: List[Text],
tf_transform_output: tft.TFTransformOutput,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in README.ml-pipelines-sdk.md single batch
Returns:
A dataset that contains (features, indices) tuple where features is README.ml-pipelines-sdk.md
dictionary of Tensors, and indices is README.ml-pipelines-sdk.md single Tensor of label indices.
"""
transformed_feature_spec = (
tf_transform_output.transformed_feature_spec().copy())
dataset = tf.data.experimental.make_batched_features_dataset(
file_pattern=file_pattern,
batch_size=batch_size,
features=transformed_feature_spec,
reader=_gzip_reader_fn,
label_key=_transformed_name(_LABEL_KEY))
return dataset
def _build_keras_model(hidden_units: List[int] = None) -> tf.keras.Model:
"""Creates README.ml-pipelines-sdk.md DNN Keras model for classifying taxi data.
Args:
hidden_units: [int], the layer sizes of the DNN (input layer first).
Returns:
A keras Model.
"""
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)
]
categorical_columns = [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0)
for key in _transformed_names(_VOCAB_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0)
for key in _transformed_names(_BUCKET_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
_transformed_names(_CATEGORICAL_FEATURE_KEYS),
_MAX_CATEGORICAL_FEATURE_VALUES)
]
indicator_column = [
tf.feature_column.indicator_column(categorical_column)
for categorical_column in categorical_columns
]
model = _wide_and_deep_classifier(
# TODO(b/139668410) replace with premade wide_and_deep keras model
wide_columns=indicator_column,
deep_columns=real_valued_columns,
dnn_hidden_units=hidden_units or [100, 70, 50, 25])
return model
def _wide_and_deep_classifier(wide_columns, deep_columns, dnn_hidden_units):
"""Build README.ml-pipelines-sdk.md simple keras wide and deep model.
Args:
wide_columns: Feature columns wrapped in indicator_column for wide (linear)
part of the model.
deep_columns: Feature columns for deep part of the model.
dnn_hidden_units: [int], the layer sizes of the hidden DNN.
Returns:
A Wide and Deep Keras model
"""
# Following values are hard coded for simplicity in this example,
# However prefarably they should be passsed in as hparams.
# Keras needs the feature definitions at compile time.
# TODO(b/139081439): Automate generation of input layers from FeatureColumn.
input_layers = {
colname: tf.keras.layers.Input(name=colname, shape=(), dtype=tf.float32)
for colname in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)
}
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in _transformed_names(_VOCAB_FEATURE_KEYS)
})
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in _transformed_names(_BUCKET_FEATURE_KEYS)
})
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in _transformed_names(_CATEGORICAL_FEATURE_KEYS)
})
# TODO(b/161952382): Replace with Keras premade models and
# Keras preprocessing layers.
deep = tf.keras.layers.DenseFeatures(deep_columns)(input_layers)
for numnodes in dnn_hidden_units:
deep = tf.keras.layers.Dense(numnodes)(deep)
wide = tf.keras.layers.DenseFeatures(wide_columns)(input_layers)
output = tf.keras.layers.Dense(
1, activation='sigmoid')(
tf.keras.layers.concatenate([deep, wide]))
model = tf.keras.Model(input_layers, output)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr=0.001),
metrics=[tf.keras.metrics.BinaryAccuracy()])
model.summary(print_fn=absl.logging.info)
return model
# TFX Transform will call this function.
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in _DENSE_FLOAT_FEATURE_KEYS:
# Preserve this feature as README.ml-pipelines-sdk.md dense float, setting nan's to the mean.
outputs[_transformed_name(key)] = tft.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in _VOCAB_FEATURE_KEYS:
# Build README.ml-pipelines-sdk.md vocabulary for this feature.
outputs[_transformed_name(key)] = tft.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=_VOCAB_SIZE,
num_oov_buckets=_OOV_SIZE)
for key in _BUCKET_FEATURE_KEYS:
outputs[_transformed_name(key)] = tft.bucketize(
_fill_in_missing(inputs[key]),
_FEATURE_BUCKET_COUNT)
for key in _CATEGORICAL_FEATURE_KEYS:
outputs[_transformed_name(key)] = _fill_in_missing(inputs[key])
# Was this passenger README.ml-pipelines-sdk.md big tipper?
taxi_fare = _fill_in_missing(inputs[_FARE_KEY])
tips = _fill_in_missing(inputs[_LABEL_KEY])
outputs[_transformed_name(_LABEL_KEY)] = tf.where(
tf.math.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))
return outputs
# TFX Trainer will call this function.
def run_fn(fn_args: TrainerFnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
# Number of nodes in the first layer of the DNN
first_dnn_layer_size = 100
num_dnn_layers = 4
dnn_decay_factor = 0.7
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(fn_args.train_files, tf_transform_output, 40)
eval_dataset = _input_fn(fn_args.eval_files, tf_transform_output, 40)
# If no GPUs are found, CPU is used.
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = _build_keras_model(
# Construct layers sizes with exponetial decay
hidden_units=[
max(2, int(first_dnn_layer_size * dnn_decay_factor**i))
for i in range(num_dnn_layers)
])
# Write logs to path
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='batch')
model.fit(
train_dataset,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
signatures = {
'serving_default':
_get_serve_tf_examples_fn(model,
tf_transform_output).get_concrete_function(
tf.TensorSpec(
shape=[None],
dtype=tf.string,
name='examples')),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/airflow_workshop/setup/dags/taxi_utils_solution.py | 0.883519 | 0.440289 | taxi_utils_solution.py | pypi |
"""Utils to query README.ml-pipelines-sdk.md TFX pipeline's ml-metadata store in README.ml-pipelines-sdk.md notebook."""
import os
import time
import papermill as pm
import tensorflow_data_validation as tfdv
import tensorflow_model_analysis as tfma
import utils
from ml_metadata.metadata_store import metadata_store
from ml_metadata.proto import metadata_store_pb2
class TFXArtifactTypes(object):
"""Constants for different TFX artifact type names."""
EXAMPLES = 'Examples'
SCHEMA = 'Schema'
EXAMPLE_STATS = 'ExampleStatistics'
EXAMPLE_VALIDATION = 'ExampleAnomalies'
TRANSFORMED_EXAMPLES = 'TransformGraph'
MODEL = 'Model'
MODEL_EVAL = 'ModelEvaluation'
class TFXExecutionTypes(object):
"""Constants for different TFX execution type names."""
EXAMPLE_GEN = 'tfx.components.example_gen.csv_example_gen.component.CsvExampleGen'
STATISTICS_GEN = 'tfx.components.statistics_gen.component.StatisticsGen'
SCHEMA_GEN = 'tfx.components.schema_gen.component.SchemaGen'
EXAMPLE_VALIDATION = 'tfx.components.example_validator.component.ExampleValidator'
TRANSFORM = 'tfx.components.transform.component.Transform'
TRAINER = 'tfx.components.trainer.component.Trainer'
EVALUATOR = 'tfx.components.evaluator.component.Evaluator'
class TFXReadonlyMetadataStore(utils.ReadonlyMetadataStore):
"""A TFX ml-metadata store that provides read-only methods for notebooks."""
@staticmethod
def from_sqlite_db(filename_uri):
"""Returns README.ml-pipelines-sdk.md `TFXReadonlyMetadataStore` based off README.ml-pipelines-sdk.md SQLITE db uri.
Args:
filename_uri: A `str` indicating the path to the SQLITE db.
Returns:
A `TFXReadonlyMetadataStore` based off README.ml-pipelines-sdk.md SQLITE db uri.
"""
c = metadata_store_pb2.ConnectionConfig()
c.sqlite.filename_uri = filename_uri
return TFXReadonlyMetadataStore(metadata_store.MetadataStore(c))
def display_tfma_analysis(self, model_id, slicing_column=None):
"""Displays TFMA metrics for `model_id` sliced by `slicing_column`.
Args:
model_id: A `int` indicating the id of README.ml-pipelines-sdk.md `TFXArtifactTypes.MODEL` artifact
slicing_column: (Optional) A `str` indicating the slicing column for the
TFMA metrics.
Returns:
A SlicingMetricsViewer object if in Jupyter notebook; None if in Colab.
"""
tfma_artifact = self.get_dest_artifact_of_type(model_id,
TFXArtifactTypes.MODEL_EVAL)
if tfma_artifact:
return tfma.view.render_slicing_metrics(
tfma.load_eval_result(tfma_artifact.uri),
slicing_column=slicing_column)
def compare_tfma_analysis(self, model_id, other_model_id):
"""Compares TFMA metrics for `model_id` and `other_model_id`.
Args:
model_id: A `int` indicating the id of README.ml-pipelines-sdk.md `TFXArtifactTypes.MODEL` artifact
other_model_id: A `int` indicating the id of another
`TFXArtifactTypes.MODEL` artifact.
Returns:
A TimeSeriesViewer object if in Jupyter notebook; None if in Colab.
"""
tfma_artifact, other_tfma_artifact = (self.get_dest_artifact_of_type(
model_id, TFXArtifactTypes.MODEL_EVAL),
self.get_dest_artifact_of_type(
other_model_id,
TFXArtifactTypes.MODEL_EVAL))
if tfma_artifact and other_tfma_artifact:
eval_results = tfma.make_eval_results([
tfma.load_eval_result(tfma_artifact.uri),
tfma.load_eval_result(other_tfma_artifact.uri)
], tfma.constants.MODEL_CENTRIC_MODE)
return tfma.view.render_time_series(eval_results,
tfma.slicer.slicer.SingleSliceSpec())
def display_stats_for_examples(self, examples_id, split='train'):
"""Displays stats for `examples_id`.
Args:
examples_id: A `int` indicating the id of README.ml-pipelines-sdk.md `TFXArtifactTypes.EXAMPLES`
artifact.
split: A `string` specifying the split name, by default 'train' is used.
"""
stats_artifact = self.get_dest_artifact_of_type(
examples_id, TFXArtifactTypes.EXAMPLE_STATS)
if stats_artifact:
tfdv.visualize_statistics(
tfdv.load_statistics(
os.path.join(stats_artifact.uri, split, 'stats_tfrecord')))
def compare_stats_for_examples(self,
examples_id,
other_examples_id,
name='',
other_name=''):
"""Compares stats for `examples_id` and `other_examples_id`.
Args:
examples_id: A `int` indicating the id of one `TFXArtifactTypes.EXAMPLES`
artifact.
other_examples_id: A `int` indicating the id of another
`TFXArtifactTypes.EXAMPLES` artifact.
name: (Optional) A `str` indicating the label to use for stats of
`examples_id`.
other_name: (Optional) A `str` indicating the label to use for stats of
`other_examples_id`.
"""
stats_artifact, other_stats_artifact = (self.get_dest_artifact_of_type(
examples_id, TFXArtifactTypes.EXAMPLE_STATS),
self.get_dest_artifact_of_type(
other_examples_id,
TFXArtifactTypes.EXAMPLE_STATS))
if stats_artifact and other_stats_artifact:
tfdv.visualize_statistics(
tfdv.load_statistics(stats_artifact.uri),
rhs_statistics=tfdv.load_statistics(other_stats_artifact.uri),
lhs_name=name,
rhs_name=other_name)
def display_examples_stats_for_model(self, model_id):
"""Displays stats for examples used to train `model_id`."""
examples_artifact = self.get_source_artifact_of_type(
model_id, TFXArtifactTypes.EXAMPLES)
if examples_artifact:
self.display_stats_for_examples(examples_artifact.id)
def compare_examples_stats_for_models(self, model_id, other_model_id):
"""Compares stats for examples to train `model_id` & `other_model_id`."""
examples_artifact, other_examples_artifact = (
self.get_source_artifact_of_type(model_id, TFXArtifactTypes.EXAMPLES),
self.get_source_artifact_of_type(other_model_id,
TFXArtifactTypes.EXAMPLES))
if examples_artifact and other_examples_artifact:
self.compare_stats_for_examples(
examples_artifact.id,
other_examples_artifact.id,
name='model_' + str(model_id),
other_name='model_' + str(other_model_id))
def display_tensorboard(self, model_id, *other_model_ids):
"""Returns README.ml-pipelines-sdk.md Tensorboard link for `model_id` and `other_model_ids`.
Args:
model_id: A `int` indicating the id of README.ml-pipelines-sdk.md `TFXArtifactTypes.MODEL`
artifact.
*other_model_ids: (Optional) A list of `int` indicating the ids of other
`TFXArtifactTypes.MODEL` artifacts to also include in the Tensorboard
invocation for comparison.
"""
model_ids = [model_id] + list(other_model_ids)
model_artifacts = self.metadata_store.get_artifacts_by_id(model_ids)
model_ids_str = '-'.join([str(m) for m in model_ids])
log_file = os.path.join(
os.environ['HOME'],
'tensorboard_model_{}_log.txt'.format(model_ids_str),
)
output_notebook_path = os.path.join(
os.environ['HOME'],
'spawn_tensorboard_{}_output.ipynb'.format(model_ids_str),
)
tensorboard_logdir = ','.join(
['model_{}:{}'.format(m.id, m.uri) for m in model_artifacts])
pm.execute_notebook(
'spawn_tensorboard.ipynb',
output_notebook_path,
parameters=dict(tb_logdir=tensorboard_logdir, tb_run_log=log_file),
progress_bar=False)
time.sleep(5) # Give it some time for log_filename to be flushed.
with open(log_file) as f:
for l in f:
if 'TensorBoard' in l:
# "TensorBoard 1.12.2 at http://... (Press CTRL+C to quit)"
return l.split(' ')[3] | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/airflow_workshop/notebooks/tfx_utils.py | 0.936829 | 0.362433 | tfx_utils.py | pypi |
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import Evaluator
from tfx.components import ImportExampleGen
from tfx.components import Pusher
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.experimental.data_view import binder_component
from tfx.components.experimental.data_view import provider_component
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.components.base import executor_spec
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import example_gen_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.utils.dsl_utils import external_input
_pipeline_name = 'tf_ranking_antique'
# This example assumes that the training data is stored in
# ~/tf_ranking_antique/data
# and the module file is in ~/tf_ranking_antique. Feel free to customize this
# as needed.
_ranking_root = os.path.join(os.environ['HOME'], 'tf_ranking_antique')
_data_root = os.path.join(_ranking_root, 'data')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_ranking_root, 'taxi_utils_native_keras.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(
_ranking_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all the example code and
# metadata library is relative to $HOME, but you can store these files anywhere
# on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text, beam_pipeline_args: List[Text]):
"""Creates pipeline."""
pipeline_root = os.path.join(pipeline_root, 'pipelines', pipeline_name)
examples = external_input(data_root)
example_gen = ImportExampleGen(
input=examples,
# IMPORTANT: must set FORMAT_PROTO
payload_format=example_gen_pb2.FORMAT_PROTO)
data_view_provider = provider_component.TfGraphDataViewProvider(
module_file=module_file,
create_decoder_func='make_decoder')
data_view_binder = binder_component.DataViewBinder(
example_gen.outputs['examples'],
data_view_provider.outputs['data_view'])
statistics_gen = StatisticsGen(
examples=data_view_binder.outputs['output_examples'])
schema_gen = SchemaGen(statistics=statistics_gen.outputs['statistics'])
transform = Transform(
examples=data_view_binder.outputs['output_examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file,
# important: must disable Transform materialization.
materialize=False)
trainer = Trainer(
examples=data_view_binder.outputs['output_examples'],
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
transform_graph=transform.outputs['transform_graph'],
module_file=module_file,
train_args=trainer_pb2.TrainArgs(num_steps=1000),
schema=schema_gen.outputs['schema'],
eval_args=trainer_pb2.EvalArgs(num_steps=10))
eval_config = tfma.EvalConfig(
model_specs=[
tfma.ModelSpec(
signature_name='',
label_key='relevance',
padding_options=tfma.config.PaddingOptions(
label_float_padding=-1.0, prediction_float_padding=-1.0))
],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['query_tokens']),
],
metrics_specs=[
tfma.MetricsSpec(
per_slice_thresholds={
'metric/ndcg_10':
tfma.config.PerSliceMetricThresholds(thresholds=[
tfma.PerSliceMetricThreshold(
# The overall slice.
slicing_specs=[tfma.SlicingSpec()],
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6})))
])
})
])
evaluator = Evaluator(
examples=data_view_binder.outputs['output_examples'],
model=trainer.outputs['model'],
eval_config=eval_config,
schema=schema_gen.outputs['schema'])
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen, data_view_provider, data_view_binder,
statistics_gen,
schema_gen,
transform,
trainer,
evaluator,
pusher,
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args)
# To run this pipeline from the python CLI:
# $ python ranking_pipeline.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
metadata_path=_metadata_path,
serving_model_dir=_serving_model_dir,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/ranking/ranking_pipeline.py | 0.697609 | 0.21564 | ranking_pipeline.py | pypi |
"""Module file."""
import tensorflow as tf
import tensorflow_ranking as tfr
import tensorflow_transform as tft
from tfx.examples.ranking import features
from tfx.examples.ranking import struct2tensor_parsing_utils
from tfx_bsl.public import tfxio
def make_decoder():
"""Creates README.ml-pipelines-sdk.md data decoder that that decodes ELWC records to tensors.
A DataView (see "TfGraphDataViewProvider" component in the pipeline)
will refer to this decoder. And any components that consumes the data
with the DataView applied will use this decoder.
Returns:
A ELWC decoder.
"""
context_features, example_features, label_feature = features.get_features()
return struct2tensor_parsing_utils.ELWCDecoder(
name='ELWCDecoder',
context_features=context_features,
example_features=example_features,
size_feature_name=features.LIST_SIZE_FEATURE_NAME,
label_feature=label_feature)
def preprocessing_fn(inputs):
"""Transform preprocessing_fn."""
# generate README.ml-pipelines-sdk.md shared vocabulary.
_ = tft.vocabulary(
tf.concat([
inputs[features.QUERY_TOKENS].flat_values,
inputs[features.DOCUMENT_TOKENS].flat_values
],
axis=0),
vocab_filename='shared_vocab')
return inputs
def run_fn(trainer_fn_args):
"""TFX trainer entry point."""
tf_transform_output = tft.TFTransformOutput(trainer_fn_args.transform_output)
hparams = dict(
batch_size=32,
embedding_dimension=20,
learning_rate=0.05,
dropout_rate=0.8,
hidden_layer_dims=[64, 32, 16],
loss='approx_ndcg_loss',
use_batch_norm=True,
batch_norm_moment=0.99
)
train_dataset = _input_fn(trainer_fn_args.train_files,
trainer_fn_args.data_accessor,
hparams['batch_size'])
eval_dataset = _input_fn(trainer_fn_args.eval_files,
trainer_fn_args.data_accessor,
hparams['batch_size'])
model = _create_ranking_model(tf_transform_output, hparams)
model.summary()
log_dir = trainer_fn_args.model_run_dir
# Write logs to path
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=log_dir, update_freq='batch')
model.fit(
train_dataset,
steps_per_epoch=trainer_fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=trainer_fn_args.eval_steps,
callbacks=[tensorboard_callback])
# TODO(zhuo): Add support for Regress signature.
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)],
autograph=False)
def predict_serving_fn(serialized_elwc_records):
decoder = make_decoder()
decoded = decoder.decode_record(serialized_elwc_records)
decoded.pop(features.LABEL)
return {tf.saved_model.PREDICT_OUTPUTS: model(decoded)}
model.save(
trainer_fn_args.serving_model_dir,
save_format='tf',
signatures={
'serving_default':
predict_serving_fn.get_concrete_function(),
})
def _input_fn(file_patterns,
data_accessor,
batch_size) -> tf.data.Dataset:
"""Returns README.ml-pipelines-sdk.md dataset of decoded tensors."""
def prepare_label(parsed_ragged_tensors):
label = parsed_ragged_tensors.pop(features.LABEL)
# Convert labels to README.ml-pipelines-sdk.md dense tensor.
label = label.to_tensor(default_value=features.LABEL_PADDING_VALUE)
return parsed_ragged_tensors, label
# NOTE: this dataset already contains RaggedTensors from the Decoder.
dataset = data_accessor.tf_dataset_factory(
file_patterns,
tfxio.TensorFlowDatasetOptions(batch_size=batch_size),
schema=None)
return dataset.map(prepare_label).repeat()
def _preprocess_keras_inputs(context_keras_inputs, example_keras_inputs,
tf_transform_output, hparams):
"""Preprocesses the inputs, including vocab lookup and embedding."""
lookup_layer = tf.keras.layers.experimental.preprocessing.StringLookup(
max_tokens=(
tf_transform_output.vocabulary_size_by_name('shared_vocab') + 1),
vocabulary=tf_transform_output.vocabulary_file_by_name('shared_vocab'),
num_oov_indices=1,
oov_token='[UNK#]',
mask_token=None)
embedding_layer = tf.keras.layers.Embedding(
input_dim=(
tf_transform_output.vocabulary_size_by_name('shared_vocab') + 1),
output_dim=hparams['embedding_dimension'],
embeddings_initializer=None,
embeddings_constraint=None)
def embedding(input_tensor):
# TODO(b/158673891): Support weighted features.
embedded_tensor = embedding_layer(lookup_layer(input_tensor))
mean_embedding = tf.reduce_mean(embedded_tensor, axis=-2)
# mean_embedding could be README.ml-pipelines-sdk.md dense tensor (context feature) or README.ml-pipelines-sdk.md ragged
# tensor (example feature). if it's ragged, we densify it first.
if isinstance(mean_embedding.type_spec, tf.RaggedTensorSpec):
return struct2tensor_parsing_utils.make_ragged_densify_layer()(
mean_embedding)
return mean_embedding
preprocessed_context_features, preprocessed_example_features = {}, {}
context_features, example_features, _ = features.get_features()
for feature in context_features:
preprocessed_context_features[feature.name] = embedding(
context_keras_inputs[feature.name])
for feature in example_features:
preprocessed_example_features[feature.name] = embedding(
example_keras_inputs[feature.name])
list_size = struct2tensor_parsing_utils.make_ragged_densify_layer()(
context_keras_inputs[features.LIST_SIZE_FEATURE_NAME])
list_size = tf.reshape(list_size, [-1])
mask = tf.sequence_mask(list_size)
return preprocessed_context_features, preprocessed_example_features, mask
def _create_ranking_model(tf_transform_output, hparams) -> tf.keras.Model:
"""Creates README.ml-pipelines-sdk.md Keras ranking model."""
context_feature_specs, example_feature_specs, _ = features.get_features()
context_keras_inputs, example_keras_inputs = (
struct2tensor_parsing_utils.create_keras_inputs(
context_feature_specs, example_feature_specs,
features.LIST_SIZE_FEATURE_NAME))
context_features, example_features, mask = _preprocess_keras_inputs(
context_keras_inputs, example_keras_inputs, tf_transform_output, hparams)
(flattened_context_features,
flattened_example_features) = tfr.keras.layers.FlattenList()(
context_features, example_features, mask)
# Concatenate flattened context and example features along `list_size` dim.
context_input = [
tf.keras.layers.Flatten()(flattened_context_features[name])
for name in sorted(flattened_context_features)
]
example_input = [
tf.keras.layers.Flatten()(flattened_example_features[name])
for name in sorted(flattened_example_features)
]
input_layer = tf.concat(context_input + example_input, 1)
dnn = tf.keras.Sequential()
if hparams['use_batch_norm']:
dnn.add(
tf.keras.layers.BatchNormalization(
momentum=hparams['batch_norm_moment']))
for layer_size in hparams['hidden_layer_dims']:
dnn.add(tf.keras.layers.Dense(units=layer_size))
if hparams['use_batch_norm']:
dnn.add(tf.keras.layers.BatchNormalization(
momentum=hparams['batch_norm_moment']))
dnn.add(tf.keras.layers.Activation(activation=tf.nn.relu))
dnn.add(tf.keras.layers.Dropout(rate=hparams['dropout_rate']))
dnn.add(tf.keras.layers.Dense(units=1))
logits = tfr.keras.layers.RestoreList()(dnn(input_layer), mask)
model = tf.keras.Model(
inputs={
**context_keras_inputs,
**example_keras_inputs
},
outputs=logits,
name='dnn_ranking_model')
model.compile(
optimizer=tf.keras.optimizers.Adagrad(
learning_rate=hparams['learning_rate']),
loss=tfr.keras.losses.get(hparams['loss']),
metrics=tfr.keras.metrics.default_keras_metrics())
return model | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/ranking/ranking_utils.py | 0.776326 | 0.40342 | ranking_utils.py | pypi |
"""BERT Sentence Pair Classification example on MRPC using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.components.base import executor_spec
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import example_gen_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
_pipeline_name = 'bert_mrpc'
# This example assumes that MRPC data is stored in ~/bert/mrpc/data and the
# utility function is in ~/bert/mrpc. Feel free to customize as needed.
_bert_mrpc_root = os.path.join(os.environ['HOME'], 'bert', 'mrpc')
_data_root = os.path.join(_bert_mrpc_root, 'data')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_bert_mrpc_root, 'bert_mrpc_utils.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_bert_mrpc_root, 'serving_model',
_pipeline_name)
# Directory and data locations. This example assumes all of the
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
# TODO(dzats): Release 0.23 for both tfma and tft address the issue with
# multi-worker. Switch to direct_num_workers=0 at that point.
_beam_pipeline_args = ['--direct_num_workers=1']
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the Bert classication on mrpc dataset pipline with TFX."""
input_config = example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(name='train', pattern='train/*'),
example_gen_pb2.Input.Split(name='eval', pattern='validation/*')
])
# Brings data into the pipline
example_gen = CsvExampleGen(input_base=data_root, input_config=input_config)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that trains README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=module_file,
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
# Adjust these steps when training on the full dataset.
train_args=trainer_pb2.TrainArgs(num_steps=1),
eval_args=trainer_pb2.EvalArgs(num_steps=1))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key='label')],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
class_name='SparseCategoricalAccuracy',
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
# Adjust the threshold when training on the
# full dataset.
lower_bound={'value': 0.5}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-2})))
])
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
components = [
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
trainer,
model_resolver,
evaluator,
pusher,
]
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=components,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
enable_cache=True,
beam_pipeline_args=beam_pipeline_args,
)
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/bert/mrpc/bert_mrpc_pipeline.py | 0.807081 | 0.333395 | bert_mrpc_pipeline.py | pypi |
"""Python source file include mrpc pipeline functions and necessary utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Text
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_transform as tft
from tfx.components.trainer.fn_args_utils import FnArgs
from tfx.examples.bert.utils.bert_models import build_and_compile_bert_classifier
from tfx.examples.bert.utils.bert_tokenizer_utils import BertPreprocessor
_BERT_LINK = 'https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/2'
_EPOCHS = 1
_EVAL_BATCH_SIZE = 32
_FEATURE_KEY_A = 'sentence1'
_FEATURE_KEY_B = 'sentence2'
_LABEL_KEY = 'label'
_MAX_LEN = 128
_TRAIN_BATCH_SIZE = 32
def _gzip_reader_fn(filenames):
"""Small utility returning README.ml-pipelines-sdk.md record reader that can read gzip'ed files."""
return tf.data.TFRecordDataset(filenames, compression_type='GZIP')
def _tokenize(sequence_a, sequence_b):
"""Tokenize the two sentences and insert appropriate tokens."""
processor = BertPreprocessor(_BERT_LINK)
return processor.tokenize_sentence_pair(
tf.reshape(sequence_a, [-1]), tf.reshape(sequence_b, [-1]), _MAX_LEN)
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature Tensors.
"""
input_word_ids, input_mask, segment_ids = _tokenize(inputs[_FEATURE_KEY_A],
inputs[_FEATURE_KEY_B])
return {
'label': inputs['label'],
'input_word_ids': input_word_ids,
'input_mask': input_mask,
'segment_ids': segment_ids
}
def _input_fn(file_pattern: List[Text],
tf_transform_output: tft.TFTransformOutput,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in README.ml-pipelines-sdk.md single batch
Returns:
A dataset that contains (features, indices) tuple where features is README.ml-pipelines-sdk.md
dictionary of Tensors, and indices is README.ml-pipelines-sdk.md single Tensor of label indices.
"""
transformed_feature_spec = (
tf_transform_output.transformed_feature_spec().copy())
dataset = tf.data.experimental.make_batched_features_dataset(
file_pattern=file_pattern,
batch_size=batch_size,
features=transformed_feature_spec,
reader=_gzip_reader_fn,
label_key=_LABEL_KEY)
return dataset.prefetch(tf.data.experimental.AUTOTUNE)
def _get_serve_tf_examples_fn(model, tf_transform_output):
"""Returns README.ml-pipelines-sdk.md function that parses README.ml-pipelines-sdk.md serialized tf.Example."""
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
feature_spec.pop(_LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)
transformed_features = model.tft_layer(parsed_features)
return model(transformed_features)
return serve_tf_examples_fn
# TFX Trainer will call this function.
def run_fn(fn_args: FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(
fn_args.train_files, tf_transform_output, batch_size=_TRAIN_BATCH_SIZE)
eval_dataset = _input_fn(
fn_args.eval_files, tf_transform_output, batch_size=_EVAL_BATCH_SIZE)
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
bert_layer = hub.KerasLayer(_BERT_LINK, trainable=True)
model = build_and_compile_bert_classifier(bert_layer, _MAX_LEN, 2, 2e-5)
model.fit(
train_dataset,
epochs=_EPOCHS,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps)
signatures = {
'serving_default':
_get_serve_tf_examples_fn(model,
tf_transform_output).get_concrete_function(
tf.TensorSpec(
shape=[None],
dtype=tf.string,
name='examples')),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/bert/mrpc/bert_mrpc_utils.py | 0.960473 | 0.376222 | bert_mrpc_utils.py | pypi |
"""Prepressing using tensorflow_text BertTokenizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Text
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_text as text
from tensorflow.python.eager.context import eager_mode # pylint: disable=g-direct-tensorflow-import
_CLS = '[CLS]'
_PAD = '[PAD]'
_SEP = '[SEP]'
class BertPreprocessor(object):
"""Bert Tokenizer built ontop of tensorflow_text.BertTokenizer."""
def __init__(self, model_link: Text):
self._model_link = model_link
self._model = hub.KerasLayer(model_link)
self._find_special_tokens()
def _find_special_tokens(self):
"""Find the special token ID's for [CLS] [PAD] [SEP].
Since each Bert model is trained on different vocabulary, it's important
to find the special token indices pertaining to that model.
Since in Transform, tensorflow_hub.KerasLayer loads README.ml-pipelines-sdk.md symbolic tensor, turn
on eager mode to get the actual vocab_file location.
"""
with eager_mode():
model = hub.KerasLayer(self._model_link)
vocab = model.resolved_object.vocab_file.asset_path.numpy()
self._do_lower_case = model.resolved_object.do_lower_case.numpy()
with tf.io.gfile.GFile(vocab, 'r') as f:
lines = f.read().split('\n')
self._sep_id = lines.index(_SEP)
self._cls_id = lines.index(_CLS)
self._pad_id = lines.index(_PAD)
def tokenize_single_sentence_unpad(self,
sequence: tf.Tensor,
max_len: int = 128,
add_cls: bool = True,
add_sep: bool = True):
"""Tokenize README.ml-pipelines-sdk.md sentence with the BERT model vocab file and without padding.
Add special tokens according to config.
Args:
sequence: Tensor of shape [batch_size, 1].
max_len: The number of tokens after padding and truncating.
add_cls: Whether to add CLS token at the front of each sequence.
add_sep: Whether to add SEP token at the end of each sequence.
Returns:
word_ids: Ragged tokenized sequences [batch_size, None].
"""
vocab_file_path = self._model.resolved_object.vocab_file.asset_path
tokenizer = text.BertTokenizer(
vocab_file_path,
lower_case=self._do_lower_case,
token_out_type=tf.int64)
word_ids = tokenizer.tokenize(sequence)
# Tokenizer default puts tokens into array of size 1. merge_dims flattens it
word_ids = word_ids.merge_dims(-2, -1)
if add_cls:
cls_token = tf.fill([tf.shape(sequence)[0], 1],
tf.constant(self._cls_id, dtype=tf.int64))
word_ids = tf.concat([cls_token, word_ids], 1)
if add_sep:
sep_token = tf.fill([tf.shape(sequence)[0], 1],
tf.constant(self._sep_id, dtype=tf.int64))
word_ids = word_ids[:, :max_len - 1]
word_ids = tf.concat([word_ids, sep_token], 1)
return word_ids
def tokenize_single_sentence_pad(self,
sequence: tf.Tensor,
max_len: int = 128,
add_cls: bool = True,
add_sep: bool = True):
"""Tokenize README.ml-pipelines-sdk.md single sentence according to the vocab used by the Bert model.
Add special tokens according to config.
Args:
sequence: Tensor of shape [batch_size, 1].
max_len: The number of tokens after padding and truncating.
add_cls: Whether to add CLS token at the front of each sequence.
add_sep: Whether to add SEP token at the end of each sequence.
Returns:
word_ids: Tokenized sequences [batch_size, max_len].
input_mask: Mask padded tokens [batch_size, max_len].
segment_ids: Distinguish multiple sequences [batch_size, max_len].
"""
word_ids = self.tokenize_single_sentence_unpad(sequence, max_len, add_cls,
add_sep)
word_ids = word_ids.to_tensor(
shape=[None, max_len],
default_value=tf.constant(self._pad_id, dtype=tf.int64))
input_mask = tf.cast(tf.not_equal(word_ids, self._pad_id), tf.int64)
segment_ids = tf.fill(tf.shape(input_mask), tf.constant(0, dtype=tf.int64))
return word_ids, input_mask, segment_ids
def tokenize_sentence_pair(self, sequence_a: tf.Tensor, sequence_b: tf.Tensor,
max_len: int):
"""Tokenize README.ml-pipelines-sdk.md sequence pair.
Tokenize each sequence with self.tokenize_single_sentence. Then add CLS
token in front of the first sequence, add SEP tokens between the two
sequences and at the end of the second sequence.
Args:
sequence_a: [batch_size, 1]
sequence_b: [batch_size, 1]
max_len: The length of the concatenated tokenized sentences.
Returns:
word_ids: Tokenized sequences [batch_size, max_len].
input_mask: Mask padded tokens [batch_size, max_len].
segment_ids: Distinguish multiple sequences [batch_size, max_len].
"""
# TODO(dzats): the issue here is nuanced. Depending on the dataset, one
# might want to keep the entire first sentence, or the second. Consider
# alternate truncate stratagies.
sentence_len = max_len // 2
word_id_a = self.tokenize_single_sentence_unpad(
sequence_a,
sentence_len,
True,
True,
)
word_id_b = self.tokenize_single_sentence_unpad(
sequence_b,
sentence_len,
False,
True,
)
word_ids = tf.concat([word_id_a, word_id_b], 1)
word_ids = word_ids.to_tensor(
shape=[None, max_len],
default_value=tf.constant(self._pad_id, dtype=tf.int64))
input_mask = tf.cast(tf.not_equal(word_ids, self._pad_id), tf.int64)
# Fill README.ml-pipelines-sdk.md ragged tensor of zero with word_id_a's shape
segment_ids = tf.cast(word_id_a < 0, tf.int64)
segment_ids = segment_ids.to_tensor(
shape=[None, max_len], default_value=tf.constant(1, dtype=tf.int64))
return word_ids, input_mask, segment_ids | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/bert/utils/bert_tokenizer_utils.py | 0.912543 | 0.355691 | bert_tokenizer_utils.py | pypi |
"""Configurable fine-tuning BERT models for various tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Text, Optional, List, Union
import tensorflow as tf
import tensorflow.keras as keras
def build_bert_classifier(bert_layer: tf.keras.layers.Layer,
max_len: int,
num_classes: int,
dropout: float = 0.1,
activation: Optional[Text] = None):
"""BERT Keras model for classification.
Connect configurable fully connected layers on top of the BERT
pooled_output.
Args:
bert_layer: A tensorflow_hub.KerasLayer intence of BERT layer.
max_len: The maximum length of preprocessed tokens.
num_classes: Number of unique classes in the labels. Determines the output
shape of the classification layer.
dropout: Dropout rate to be used for the classification layer.
activation: Activation function to use. If you don't specify anything, no
activation is applied (ie. "linear" activation: README.ml-pipelines-sdk.md(x) = x).
Returns:
A Keras model.
"""
input_layer_names = ["input_word_ids", "input_mask", "segment_ids"]
input_layers = [
keras.layers.Input(shape=(max_len,), dtype=tf.int64, name=name)
for name in input_layer_names
]
converted_layers = [tf.cast(k, tf.int32) for k in input_layers]
pooled_output, _ = bert_layer(converted_layers)
output = keras.layers.Dropout(dropout)(pooled_output)
output = keras.layers.Dense(num_classes, activation=activation)(output)
model = keras.Model(input_layers, output)
return model
def compile_bert_classifier(
model: tf.keras.Model,
loss: tf.keras.losses = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True),
learning_rate: float = 2e-5,
metrics: List[Union[Text, tf.keras.metrics.Metric]] = None):
"""Compile the BERT classifier using suggested parameters.
Args:
model: A keras model. Most likely the output of build_bert_classifier.
loss: tf.keras.losses. The suggested loss function expects integer labels
(e.g. 0, 1, 2). If the labels are one-hot encoded, consider using
tf.keras.lossesCategoricalCrossEntropy with from_logits set to true.
learning_rate: Suggested learning rate to be used in
tf.keras.optimizer.Adam. The three suggested learning_rates for
fine-tuning are [2e-5, 3e-5, 5e-5].
metrics: Default None will use ['sparse_categorical_accuracy']. An array of
strings or tf.keras.metrics.
Returns:
None.
"""
if metrics is None:
metrics = ["sparse_categorical_accuracy"]
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate),
loss=loss,
metrics=metrics)
def build_and_compile_bert_classifier(
bert_layer: tf.keras.layers.Layer,
max_len: int,
num_classes: int,
learning_rate: float = 5e-5,
metrics: List[Union[Text, tf.keras.metrics.Metric]] = None):
"""Build and compile keras BERT classification model.
Apart from the necessary inputs, use default/suggested parameters in build
and compile BERT classifier functions.
Args:
bert_layer: A tensorflow_hub.KerasLayer intence of BERT layer.
max_len: The maximum length of preprocessed tokens.
num_classes: Number of unique classes in the labels. Determines the output
shape of the classification layer.
learning_rate: Suggested learning rate to be used in
tf.keras.optimizer.Adam. The three suggested learning_rates for
fine-tuning are [2e-5, 3e-5,5e-5]
metrics: Default None will use ['sparse_categorical_accuracy']. An array of
strings or tf.keras.metrics.
Returns:
A compiled keras BERT Classification model.
"""
if metrics is None:
metrics = ["sparse_categorical_accuracy"]
model = build_bert_classifier(bert_layer, max_len, num_classes)
compile_bert_classifier(model, learning_rate=learning_rate, metrics=metrics)
return model | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/bert/utils/bert_models.py | 0.969222 | 0.54353 | bert_models.py | pypi |
"""BERT Single Sentence Classification example on CoLA using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.components.base import executor_spec
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import example_gen_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
_pipeline_name = 'bert_cola'
# This example assumes that COLA data is stored in ~/bert/cola/data and the
# utility function is in ~/bert/cola. Feel free to customize as needed.
_bert_cola_root = os.path.join(os.environ['HOME'], 'bert', 'cola')
_data_root = os.path.join(_bert_cola_root, 'data')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_bert_cola_root, 'bert_cola_utils.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_bert_cola_root, 'serving_model',
_pipeline_name)
# Directory and data locations. This example assumes all of the
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
# TODO(dzats): Release 0.23 for both tfma and tft address the issue with
# multi-worker. At that point, set direct_num_workers=0
_beam_pipeline_args = ['--direct_num_workers=1']
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the Bert classication on Cola dataset pipline with TFX."""
input_config = example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(name='train', pattern='train/*'),
example_gen_pb2.Input.Split(name='eval', pattern='validation/*')
])
# Brings data into the pipline
example_gen = CsvExampleGen(input_base=data_root, input_config=input_config)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that trains README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=module_file,
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
# Adjust these steps when training on the full dataset.
train_args=trainer_pb2.TrainArgs(num_steps=2),
eval_args=trainer_pb2.EvalArgs(num_steps=1))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key='label')],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
class_name='SparseCategoricalAccuracy',
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
# Adjust the threshold when training on the
# full dataset.
lower_bound={'value': 0.5}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-2})))
])
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
components = [
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
trainer,
model_resolver,
evaluator,
pusher,
]
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=components,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
enable_cache=True,
beam_pipeline_args=beam_pipeline_args,
)
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/bert/cola/bert_cola_pipeline.py | 0.805173 | 0.343892 | bert_cola_pipeline.py | pypi |
"""Python source file include cola pipeline functions and necessary utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Text
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_transform as tft
from tfx.components.trainer.fn_args_utils import FnArgs
from tfx.examples.bert.utils.bert_models import build_and_compile_bert_classifier
from tfx.examples.bert.utils.bert_tokenizer_utils import BertPreprocessor
_TRAIN_BATCH_SIZE = 16
_EVAL_BATCH_SIZE = 16
_FEATURE_KEY = 'sentence'
_LABEL_KEY = 'label'
_BERT_LINK = 'https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/2'
_MAX_LEN = 256
_EPOCHS = 1
def _gzip_reader_fn(filenames):
"""Small utility returning README.ml-pipelines-sdk.md record reader that can read gzip'ed files."""
return tf.data.TFRecordDataset(filenames, compression_type='GZIP')
def _tokenize(feature):
"""Tokenize the two sentences and insert appropriate tokens."""
processor = BertPreprocessor(_BERT_LINK)
return processor.tokenize_single_sentence_pad(
tf.reshape(feature, [-1]), max_len=_MAX_LEN)
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature Tensors.
"""
input_word_ids, input_mask, segment_ids = _tokenize(inputs[_FEATURE_KEY])
return {
'label': inputs[_LABEL_KEY],
'input_word_ids': input_word_ids,
'input_mask': input_mask,
'segment_ids': segment_ids
}
def _input_fn(file_pattern: List[Text],
tf_transform_output: tft.TFTransformOutput,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of materialized transformed input
tfrecord files.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in README.ml-pipelines-sdk.md single batch
Returns:
A dataset that contains (features, indices) tuple where features is README.ml-pipelines-sdk.md
dictionary of Tensors, and indices is README.ml-pipelines-sdk.md single Tensor of label indices.
"""
transformed_feature_spec = (
tf_transform_output.transformed_feature_spec().copy())
dataset = tf.data.experimental.make_batched_features_dataset(
file_pattern=file_pattern,
batch_size=batch_size,
features=transformed_feature_spec,
reader=_gzip_reader_fn,
label_key=_LABEL_KEY)
return dataset.prefetch(tf.data.experimental.AUTOTUNE)
def _get_serve_tf_examples_fn(model, tf_transform_output):
"""Returns README.ml-pipelines-sdk.md function that parses README.ml-pipelines-sdk.md serialized tf.Example."""
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
feature_spec.pop(_LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)
transformed_features = model.tft_layer(parsed_features)
return model(transformed_features)
return serve_tf_examples_fn
# TFX Trainer will call this function.
def run_fn(fn_args: FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(
fn_args.train_files, tf_transform_output, batch_size=_TRAIN_BATCH_SIZE)
eval_dataset = _input_fn(
fn_args.eval_files, tf_transform_output, batch_size=_EVAL_BATCH_SIZE)
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
bert_layer = hub.KerasLayer(_BERT_LINK, trainable=True)
model = build_and_compile_bert_classifier(bert_layer, _MAX_LEN, 2)
model.fit(
train_dataset,
epochs=_EPOCHS,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps)
signatures = {
'serving_default':
_get_serve_tf_examples_fn(model,
tf_transform_output).get_concrete_function(
tf.TensorSpec(
shape=[None],
dtype=tf.string,
name='examples')),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/bert/cola/bert_cola_utils.py | 0.963394 | 0.362715 | bert_cola_utils.py | pypi |
"""MNIST handwritten digit classification example using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import ImportExampleGen
from tfx.components import Pusher
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.components.base import executor_spec
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
_pipeline_name = 'mnist_native_keras'
# This example assumes that MNIST data is stored in ~/mnist/data and the utility
# function is in ~/mnist. Feel free to customize as needed.
_mnist_root = os.path.join(os.environ['HOME'], 'mnist')
_data_root = os.path.join(_mnist_root, 'data')
# Python module files to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_mnist_root, 'mnist_utils_native_keras.py')
_module_file_lite = os.path.join(
_mnist_root, 'mnist_utils_native_keras_lite.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_mnist_root, 'serving_model', _pipeline_name)
_serving_model_dir_lite = os.path.join(
_mnist_root, 'serving_model_lite', _pipeline_name)
# Directory and data locations. This example assumes all of the images,
# example code, and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, module_file_lite: Text,
serving_model_dir: Text, serving_model_dir_lite: Text,
metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the handwritten digit classification example using TFX."""
# Brings data into the pipeline.
example_gen = ImportExampleGen(input_base=data_root)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
def _create_trainer(module_file, instance_name):
return Trainer(
module_file=module_file,
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
train_args=trainer_pb2.TrainArgs(num_steps=5000),
eval_args=trainer_pb2.EvalArgs(num_steps=100),
instance_name=instance_name)
# Uses user-provided Python function that trains README.ml-pipelines-sdk.md Keras model.
trainer = _create_trainer(module_file, 'mnist')
# Trains the same model as the one above, but converts it into README.ml-pipelines-sdk.md TFLite one.
trainer_lite = _create_trainer(module_file_lite, 'mnist_lite')
# TODO(b/150949276): Add resolver back once it supports two trainers.
# Uses TFMA to compute evaluation statistics over features of README.ml-pipelines-sdk.md model and
# performs quality validation of README.ml-pipelines-sdk.md candidate model.
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key='image_class')],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
class_name='SparseCategoricalAccuracy',
threshold=tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.8})))
])
])
eval_config_lite = tfma.EvalConfig()
eval_config_lite.CopyFrom(eval_config)
# Informs the evaluator that the model is README.ml-pipelines-sdk.md TFLite model.
eval_config_lite.model_specs[0].model_type = 'tf_lite'
# Uses TFMA to compute the evaluation statistics over features of README.ml-pipelines-sdk.md model.
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
eval_config=eval_config,
instance_name='mnist')
# Uses TFMA to compute the evaluation statistics over features of README.ml-pipelines-sdk.md TFLite
# model.
evaluator_lite = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer_lite.outputs['model'],
eval_config=eval_config_lite,
instance_name='mnist_lite')
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)),
instance_name='mnist')
# Checks whether the TFLite model passed the validation steps and pushes the
# model to README.ml-pipelines-sdk.md file destination if check passed.
pusher_lite = Pusher(
model=trainer_lite.outputs['model'],
model_blessing=evaluator_lite.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir_lite)),
instance_name='mnist_lite')
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
trainer,
trainer_lite,
evaluator,
evaluator_lite,
pusher,
pusher_lite,
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args)
# To run this pipeline from the python CLI:
# $python mnist_pipeline_native_keras.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
module_file_lite=_module_file_lite,
serving_model_dir=_serving_model_dir,
serving_model_dir_lite=_serving_model_dir_lite,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/mnist/mnist_pipeline_native_keras.py | 0.846546 | 0.445952 | mnist_pipeline_native_keras.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Text
import absl
import tensorflow as tf
import tensorflow_transform as tft
from tfx.components.trainer.fn_args_utils import DataAccessor
from tfx_bsl.tfxio import dataset_options
# MNIST dataset consists of an image of the handwritten digits,
# and it's label which is the class indicating digits 0 through 9.
IMAGE_KEY = 'image_floats'
LABEL_KEY = 'image_class'
def transformed_name(key):
return key + '_xf'
def input_fn(file_pattern: List[Text],
data_accessor: DataAccessor,
tf_transform_output: tft.TFTransformOutput,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in README.ml-pipelines-sdk.md single batch
Returns:
A dataset that contains (features, indices) tuple where features is README.ml-pipelines-sdk.md
dictionary of Tensors, and indices is README.ml-pipelines-sdk.md single Tensor of label indices.
"""
return data_accessor.tf_dataset_factory(
file_pattern,
dataset_options.TensorFlowDatasetOptions(
batch_size=batch_size, label_key=transformed_name(LABEL_KEY)),
tf_transform_output.transformed_metadata.schema).repeat()
def build_keras_model() -> tf.keras.Model:
"""Creates README.ml-pipelines-sdk.md DNN Keras model for classifying MNIST data.
Returns:
A Keras Model.
"""
# The model below is built with Sequential API, please refer to
# https://www.tensorflow.org/guide/keras/overview for all API options.
model = tf.keras.Sequential()
model.add(
tf.keras.layers.InputLayer(
input_shape=(784,), name=transformed_name(IMAGE_KEY)))
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.RMSprop(lr=0.0015),
metrics=['sparse_categorical_accuracy'])
model.summary(print_fn=absl.logging.info)
return model
# TFX Transform will call this function.
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
# The input float values for the image encoding are in the range [-0.5, 0.5].
# So scale_by_min_max is README.ml-pipelines-sdk.md identity operation, since the range is preserved.
outputs[transformed_name(IMAGE_KEY)] = (
tft.scale_by_min_max(inputs[IMAGE_KEY], -0.5, 0.5))
# TODO(b/157064428): Support label transformation for Keras.
# Do not apply label transformation as it will result in wrong evaluation.
outputs[transformed_name(LABEL_KEY)] = inputs[LABEL_KEY]
return outputs | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/mnist/mnist_utils_native_keras_base.py | 0.930872 | 0.474449 | mnist_utils_native_keras_base.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow_transform as tft
from tfx.components.trainer.fn_args_utils import FnArgs
from tfx.examples.mnist import mnist_utils_native_keras_base as base
def _get_serve_tf_examples_fn(model, tf_transform_output):
"""Returns README.ml-pipelines-sdk.md function that parses README.ml-pipelines-sdk.md serialized tf.Example."""
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
feature_spec.pop(base.LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)
transformed_features = model.tft_layer(parsed_features)
return model(transformed_features)
return serve_tf_examples_fn
# TFX Transform will call this function.
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
return base.preprocessing_fn(inputs)
# TFX Trainer will call this function.
def run_fn(fn_args: FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = base.input_fn(fn_args.train_files, fn_args.data_accessor,
tf_transform_output, 40)
eval_dataset = base.input_fn(fn_args.eval_files, fn_args.data_accessor,
tf_transform_output, 40)
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = base.build_keras_model()
# Write logs to path
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='batch')
model.fit(
train_dataset,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
signatures = {
'serving_default':
_get_serve_tf_examples_fn(
model, tf_transform_output).get_concrete_function(
tf.TensorSpec(shape=[None], dtype=tf.string, name='examples'))
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/mnist/mnist_utils_native_keras.py | 0.933529 | 0.25564 | mnist_utils_native_keras.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
import tensorflow_transform as tft
from tfx.components.trainer.fn_args_utils import FnArgs
from tfx.components.trainer.rewriting import converters
from tfx.components.trainer.rewriting import rewriter
from tfx.components.trainer.rewriting import rewriter_factory
from tfx.dsl.io import fileio
from tfx.examples.mnist import mnist_utils_native_keras_base as base
def _get_serve_tf_examples_fn(model, tf_transform_output):
"""Returns README.ml-pipelines-sdk.md function that feeds the input tensor into the model."""
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(image_tensor):
"""Returns the output to be used in the serving signature."""
transformed_features = model.tft_layer({base.IMAGE_KEY: image_tensor})
return model(transformed_features)
return serve_tf_examples_fn
# TFX Transform will call this function.
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
return base.preprocessing_fn(inputs)
# TFX Trainer will call this function.
def run_fn(fn_args: FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = base.input_fn(fn_args.train_files, fn_args.data_accessor,
tf_transform_output, 40)
eval_dataset = base.input_fn(fn_args.eval_files, fn_args.data_accessor,
tf_transform_output, 40)
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = base.build_keras_model()
# Write logs to path
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='batch')
model.fit(
train_dataset,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
signatures = {
'serving_default':
_get_serve_tf_examples_fn(
model, tf_transform_output).get_concrete_function(
tf.TensorSpec(
shape=[None, 784],
dtype=tf.float32,
name='image_floats'))
}
temp_saving_model_dir = os.path.join(fn_args.serving_model_dir, 'temp')
model.save(temp_saving_model_dir, save_format='tf', signatures=signatures)
tfrw = rewriter_factory.create_rewriter(
rewriter_factory.TFLITE_REWRITER, name='tflite_rewriter')
converters.rewrite_saved_model(temp_saving_model_dir,
fn_args.serving_model_dir,
tfrw,
rewriter.ModelType.TFLITE_MODEL)
fileio.rmtree(temp_saving_model_dir) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/mnist/mnist_utils_native_keras_lite.py | 0.862091 | 0.20836 | mnist_utils_native_keras_lite.py | pypi |
"""IMDB Sentiment Analysis example using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.components.base import executor_spec
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import example_gen_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
_pipeline_name = 'imdb_native_keras'
# This example assumes that IMDB review data is stored in ~/imdb/data and the
# utility function is in ~/imdb. Feel free to customize as needed.
_imdb_root = os.path.join(os.environ['HOME'], 'imdb')
_data_root = os.path.join(_imdb_root, 'data')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_imdb_root, 'imdb_utils_native_keras.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_imdb_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all of the
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the imdb sentiment analysis pipline with TFX."""
output = example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(splits=[
example_gen_pb2.SplitConfig.Split(name='train', hash_buckets=9),
example_gen_pb2.SplitConfig.Split(name='eval', hash_buckets=1)
]))
# Brings data in to the pipline
example_gen = CsvExampleGen(input_base=data_root, output_config=output)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that trains README.ml-pipelines-sdk.md model.
trainer = Trainer(
module_file=module_file,
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
train_args=trainer_pb2.TrainArgs(num_steps=500),
eval_args=trainer_pb2.EvalArgs(num_steps=200))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key='label')],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
class_name='BinaryAccuracy',
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
# Increase this threshold when training on complete
# dataset.
lower_bound={'value': 0.4}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-2})))
])
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
components = [
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
trainer,
model_resolver,
evaluator,
pusher,
]
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=components,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
enable_cache=True,
beam_pipeline_args=beam_pipeline_args)
# To run this pipeline from the python CLI:
# $python imdb_pipeline_native_keras.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/imdb/imdb_pipeline_native_keras.py | 0.907633 | 0.272856 | imdb_pipeline_native_keras.py | pypi |
"""Chicago taxi example using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.dsl.experimental import latest_artifacts_resolver
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
_pipeline_name = 'chicago_taxi_warmstart'
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data', 'simple')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_taxi_root, 'taxi_utils.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
# TODO(b/137289334): rename this as simple after DAG visualization is done.
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=data_root)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Get the latest model so that we can warm start from the model.
latest_model_resolver = ResolverNode(
instance_name='latest_model_resolver',
resolver_class=latest_artifacts_resolver.LatestArtifactsResolver,
latest_model=Channel(type=Model))
# Uses user-provided Python function that implements README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
base_model=latest_model_resolver.outputs['latest_model'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute README.ml-pipelines-sdk.md evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'accuracy':
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen, statistics_gen, schema_gen, example_validator, transform,
latest_model_resolver, trainer, model_resolver, evaluator, pusher
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args)
# To run this pipeline from the python CLI:
# $python taxi_pipeline_warmstart.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_warmstart.py | 0.803251 | 0.333965 | taxi_pipeline_warmstart.py | pypi |
"""Chicago taxi example using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from typing import List, Text
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.airflow.airflow_dag_runner import AirflowDagRunner
from tfx.orchestration.airflow.airflow_dag_runner import AirflowPipelineConfig
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
# TODO(jyzhao): rename to chicago_taxi_airflow.
_pipeline_name = 'chicago_taxi_simple'
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data', 'simple')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_taxi_root, 'taxi_utils.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
# Airflow-specific configs; these will be passed directly to airflow
_airflow_config = {
'schedule_interval': None,
'start_date': datetime.datetime(2019, 1, 1),
}
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=data_root)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that implements README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute README.ml-pipelines-sdk.md evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'accuracy':
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen, statistics_gen, schema_gen, example_validator, transform,
trainer, model_resolver, evaluator, pusher
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args)
# 'DAG' below need to be kept for Airflow to detect dag.
DAG = AirflowDagRunner(AirflowPipelineConfig(_airflow_config)).run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple.py | 0.784526 | 0.361756 | taxi_pipeline_simple.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Text
import tensorflow as tf
import tensorflow_model_analysis as tfma
import tensorflow_transform as tft
from tensorflow_transform.tf_metadata import schema_utils
from tfx.components.trainer.fn_args_utils import DataAccessor
from tfx_bsl.tfxio import dataset_options
# Categorical features are assumed to each have README.ml-pipelines-sdk.md maximum value in the dataset.
_MAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12]
_CATEGORICAL_FEATURE_KEYS = [
'trip_start_hour', 'trip_start_day', 'trip_start_month',
'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area',
'dropoff_community_area'
]
_DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds']
# Number of buckets used by tf.transform for encoding each feature.
_FEATURE_BUCKET_COUNT = 10
_BUCKET_FEATURE_KEYS = [
'pickup_latitude', 'pickup_longitude', 'dropoff_latitude',
'dropoff_longitude'
]
# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform
_VOCAB_SIZE = 1000
# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.
_OOV_SIZE = 10
_VOCAB_FEATURE_KEYS = [
'payment_type',
'company',
]
# Keys
_LABEL_KEY = 'tips'
_FARE_KEY = 'fare'
def _transformed_name(key):
return key + '_xf'
def _transformed_names(keys):
return [_transformed_name(key) for key in keys]
# Tf.Transform considers these features as "raw"
def _get_raw_feature_spec(schema):
return schema_utils.schema_as_feature_spec(schema).feature_spec
def _fill_in_missing(x):
"""Replace missing values in README.ml-pipelines-sdk.md SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to README.ml-pipelines-sdk.md dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
"""
if not isinstance(x, tf.sparse.SparseTensor):
return x
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in _DENSE_FLOAT_FEATURE_KEYS:
# Preserve this feature as README.ml-pipelines-sdk.md dense float, setting nan's to the mean.
outputs[_transformed_name(key)] = tft.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in _VOCAB_FEATURE_KEYS:
# Build README.ml-pipelines-sdk.md vocabulary for this feature.
outputs[_transformed_name(key)] = tft.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=_VOCAB_SIZE,
num_oov_buckets=_OOV_SIZE)
for key in _BUCKET_FEATURE_KEYS:
outputs[_transformed_name(key)] = tft.bucketize(
_fill_in_missing(inputs[key]), _FEATURE_BUCKET_COUNT)
for key in _CATEGORICAL_FEATURE_KEYS:
outputs[_transformed_name(key)] = _fill_in_missing(inputs[key])
# Was this passenger README.ml-pipelines-sdk.md big tipper?
taxi_fare = _fill_in_missing(inputs[_FARE_KEY])
tips = _fill_in_missing(inputs[_LABEL_KEY])
outputs[_transformed_name(_LABEL_KEY)] = tf.compat.v1.where(
tf.math.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))
return outputs
def _build_estimator(config, hidden_units=None, warm_start_from=None):
"""Build an estimator for predicting the tipping behavior of taxi riders.
Args:
config: tf.estimator.RunConfig defining the runtime environment for the
estimator (including model_dir).
hidden_units: [int], the layer sizes of the DNN (input layer first)
warm_start_from: Optional directory to warm start from.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
"""
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)
]
categorical_columns = [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0)
for key in _transformed_names(_VOCAB_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0)
for key in _transformed_names(_BUCKET_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
_transformed_names(_CATEGORICAL_FEATURE_KEYS),
_MAX_CATEGORICAL_FEATURE_VALUES)
]
return tf.estimator.DNNLinearCombinedClassifier(
config=config,
linear_feature_columns=categorical_columns,
dnn_feature_columns=real_valued_columns,
dnn_hidden_units=hidden_units or [100, 70, 50, 25],
warm_start_from=warm_start_from)
def _example_serving_receiver_fn(tf_transform_output, schema):
"""Build the serving in inputs.
Args:
tf_transform_output: A TFTransformOutput.
schema: the schema of the input data.
Returns:
Tensorflow graph which parses examples, applying tf-transform to them.
"""
raw_feature_spec = _get_raw_feature_spec(schema)
raw_feature_spec.pop(_LABEL_KEY)
raw_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(
raw_feature_spec, default_batch_size=None)
serving_input_receiver = raw_input_fn()
transformed_features = tf_transform_output.transform_raw_features(
serving_input_receiver.features)
return tf.estimator.export.ServingInputReceiver(
transformed_features, serving_input_receiver.receiver_tensors)
def _eval_input_receiver_fn(tf_transform_output, schema):
"""Build everything needed for the tf-model-analysis to run the model.
Args:
tf_transform_output: A TFTransformOutput.
schema: the schema of the input data.
Returns:
EvalInputReceiver function, which contains:
- Tensorflow graph which parses raw untransformed features, applies the
tf-transform preprocessing operators.
- Set of raw, untransformed features.
- Label against which predictions will be compared.
"""
# Notice that the inputs are raw features, not transformed features here.
raw_feature_spec = _get_raw_feature_spec(schema)
serialized_tf_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_example_tensor')
# Add README.ml-pipelines-sdk.md parse_example operator to the tensorflow graph, which will parse
# raw, untransformed, tf examples.
features = tf.io.parse_example(
serialized=serialized_tf_example, features=raw_feature_spec)
# Now that we have our raw examples, process them through the tf-transform
# function computed during the preprocessing step.
transformed_features = tf_transform_output.transform_raw_features(
features)
# The key name MUST be 'examples'.
receiver_tensors = {'examples': serialized_tf_example}
# NOTE: Model is driven by transformed features (since training works on the
# materialized output of TFT, but slicing will happen on raw features.
features.update(transformed_features)
return tfma.export.EvalInputReceiver(
features=features,
receiver_tensors=receiver_tensors,
labels=transformed_features[_transformed_name(_LABEL_KEY)])
def _input_fn(file_pattern: List[Text],
data_accessor: DataAccessor,
tf_transform_output: tft.TFTransformOutput,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in README.ml-pipelines-sdk.md single batch
Returns:
A dataset that contains (features, indices) tuple where features is README.ml-pipelines-sdk.md
dictionary of Tensors, and indices is README.ml-pipelines-sdk.md single Tensor of label indices.
"""
return data_accessor.tf_dataset_factory(
file_pattern,
dataset_options.TensorFlowDatasetOptions(
batch_size=batch_size, label_key=_transformed_name(_LABEL_KEY)),
tf_transform_output.transformed_metadata.schema)
# TFX will call this function
def trainer_fn(trainer_fn_args, schema):
"""Build the estimator using the high level API.
Args:
trainer_fn_args: Holds args used to train the model as name/value pairs.
schema: Holds the schema of the training examples.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
"""
# Number of nodes in the first layer of the DNN
first_dnn_layer_size = 100
num_dnn_layers = 4
dnn_decay_factor = 0.7
train_batch_size = 40
eval_batch_size = 40
tf_transform_output = tft.TFTransformOutput(trainer_fn_args.transform_output)
train_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda
trainer_fn_args.train_files,
trainer_fn_args.data_accessor,
tf_transform_output,
batch_size=train_batch_size)
eval_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda
trainer_fn_args.eval_files,
trainer_fn_args.data_accessor,
tf_transform_output,
batch_size=eval_batch_size)
train_spec = tf.estimator.TrainSpec( # pylint: disable=g-long-lambda
train_input_fn,
max_steps=trainer_fn_args.train_steps)
serving_receiver_fn = lambda: _example_serving_receiver_fn( # pylint: disable=g-long-lambda
tf_transform_output, schema)
exporter = tf.estimator.FinalExporter('chicago-taxi', serving_receiver_fn)
eval_spec = tf.estimator.EvalSpec(
eval_input_fn,
steps=trainer_fn_args.eval_steps,
exporters=[exporter],
name='chicago-taxi-eval')
# Keep multiple checkpoint files for distributed training, note that
# keep_max_checkpoint should be greater or equal to the number of replicas to
# avoid race condition.
run_config = tf.estimator.RunConfig(
save_checkpoints_steps=999, keep_checkpoint_max=5)
run_config = run_config.replace(model_dir=trainer_fn_args.serving_model_dir)
warm_start_from = trainer_fn_args.base_model
estimator = _build_estimator(
# Construct layers sizes with exponetial decay
hidden_units=[
max(2, int(first_dnn_layer_size * dnn_decay_factor**i))
for i in range(num_dnn_layers)
],
config=run_config,
warm_start_from=warm_start_from)
# Create an input receiver for TFMA processing
receiver_fn = lambda: _eval_input_receiver_fn( # pylint: disable=g-long-lambda
tf_transform_output, schema)
return {
'estimator': estimator,
'train_spec': train_spec,
'eval_spec': eval_spec,
'eval_input_receiver_fn': receiver_fn
} | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/chicago_taxi_pipeline/taxi_utils.py | 0.937096 | 0.374476 | taxi_utils.py | pypi |
"""Chicago Taxi example demonstrating the usage of RuntimeParameter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import List, Text
import kfp
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import data_types
from tfx.orchestration import pipeline
from tfx.orchestration.kubeflow import kubeflow_dag_runner
from tfx.proto import pusher_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
_pipeline_name = 'taxi_pipeline_with_parameters'
# Path of pipeline root, should be README.ml-pipelines-sdk.md GCS path.
_pipeline_root = os.path.join('gs://my-bucket', 'tfx_taxi_simple',
kfp.dsl.RUN_ID_PLACEHOLDER)
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
def _create_parameterized_pipeline(
pipeline_name: Text, pipeline_root: Text, enable_cache: bool,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Creates README.ml-pipelines-sdk.md simple TFX pipeline with RuntimeParameter.
Args:
pipeline_name: The name of the pipeline.
pipeline_root: The root of the pipeline output.
enable_cache: Whether to enable cache in this pipeline.
beam_pipeline_args: Pipeline arguments for Beam powered Components.
Returns:
A logical TFX pipeline.Pipeline object.
"""
# First, define the pipeline parameters.
# Path to the CSV data file, under which there should be README.ml-pipelines-sdk.md data.csv file.
data_root = data_types.RuntimeParameter(
name='data-root',
default='gs://my-bucket/data',
ptype=Text,
)
# Path to the transform module file.
transform_module_file = data_types.RuntimeParameter(
name='transform-module',
default='gs://my-bucket/modules/transform_module.py',
ptype=Text,
)
# Path to the trainer module file.
trainer_module_file = data_types.RuntimeParameter(
name='trainer-module',
default='gs://my-bucket/modules/trainer_module.py',
ptype=Text,
)
# Number of epochs in training.
train_steps = data_types.RuntimeParameter(
name='train-steps',
default=10,
ptype=int,
)
# Number of epochs in evaluation.
eval_steps = data_types.RuntimeParameter(
name='eval-steps',
default=5,
ptype=int,
)
# The input data location is parameterized by data_root
example_gen = CsvExampleGen(input_base=data_root)
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# The module file used in Transform and Trainer component is paramterized by
# transform_module_file.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=transform_module_file)
# The numbers of steps in train_args are specified as RuntimeParameter with
# name 'train-steps' and 'eval-steps', respectively.
trainer = Trainer(
module_file=trainer_module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args={'num_steps': train_steps},
eval_args={'num_steps': eval_steps})
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute README.ml-pipelines-sdk.md evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'accuracy':
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=os.path.join(
str(pipeline.ROOT_PARAMETER), 'model_serving'))))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen, statistics_gen, schema_gen, example_validator, transform,
trainer, model_resolver, evaluator, pusher
],
enable_cache=enable_cache,
beam_pipeline_args=beam_pipeline_args)
if __name__ == '__main__':
pipeline = _create_parameterized_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
enable_cache=True,
beam_pipeline_args=_beam_pipeline_args)
# This pipeline automatically injects the Kubeflow TFX image if the
# environment variable 'KUBEFLOW_TFX_IMAGE' is defined. Currently, the tfx
# cli tool exports the environment variable to pass to the pipelines.
tfx_image = os.environ.get('KUBEFLOW_TFX_IMAGE', None)
config = kubeflow_dag_runner.KubeflowDagRunnerConfig(
kubeflow_metadata_config=kubeflow_dag_runner
.get_default_kubeflow_metadata_config(),
tfx_image=tfx_image)
kfp_runner = kubeflow_dag_runner.KubeflowDagRunner(config=config)
kfp_runner.run(pipeline) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_runtime_parameter.py | 0.916841 | 0.329823 | taxi_pipeline_runtime_parameter.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Text
import absl
import tensorflow as tf
import tensorflow_transform as tft
from tfx.components.trainer.fn_args_utils import DataAccessor
from tfx.components.trainer.fn_args_utils import FnArgs
from tfx_bsl.tfxio import dataset_options
# Categorical features are assumed to each have README.ml-pipelines-sdk.md maximum value in the dataset.
_MAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12]
_CATEGORICAL_FEATURE_KEYS = [
'trip_start_hour', 'trip_start_day', 'trip_start_month',
'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area',
'dropoff_community_area'
]
_DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds']
# Number of buckets used by tf.transform for encoding each feature.
_FEATURE_BUCKET_COUNT = 10
_BUCKET_FEATURE_KEYS = [
'pickup_latitude', 'pickup_longitude', 'dropoff_latitude',
'dropoff_longitude'
]
# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform
_VOCAB_SIZE = 1000
# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.
_OOV_SIZE = 10
_VOCAB_FEATURE_KEYS = [
'payment_type',
'company',
]
# Keys
_LABEL_KEY = 'tips'
_FARE_KEY = 'fare'
def _transformed_name(key):
return key + '_xf'
def _transformed_names(keys):
return [_transformed_name(key) for key in keys]
def _fill_in_missing(x):
"""Replace missing values in README.ml-pipelines-sdk.md SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to README.ml-pipelines-sdk.md dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
"""
if not isinstance(x, tf.sparse.SparseTensor):
return x
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
def _get_serve_tf_examples_fn(model, tf_transform_output):
"""Returns README.ml-pipelines-sdk.md function that parses README.ml-pipelines-sdk.md serialized tf.Example and applies TFT."""
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
if not model.tft_layer.built:
# TODO(b/175357313): We need to call the tft_layer with the label so that
# it will be included in the layer's input_spec. This is needed so that
# TFMA can call tft_layer with labels. However, the actual call for
# inference is done without the label.
parsed_features_with_label = tf.io.parse_example(
serialized_tf_examples, feature_spec)
_ = model.tft_layer(parsed_features_with_label)
feature_spec.pop(_LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)
transformed_features = model.tft_layer(parsed_features)
return model(transformed_features)
return serve_tf_examples_fn
def _input_fn(file_pattern: List[Text],
data_accessor: DataAccessor,
tf_transform_output: tft.TFTransformOutput,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in README.ml-pipelines-sdk.md single batch
Returns:
A dataset that contains (features, indices) tuple where features is README.ml-pipelines-sdk.md
dictionary of Tensors, and indices is README.ml-pipelines-sdk.md single Tensor of label indices.
"""
return data_accessor.tf_dataset_factory(
file_pattern,
dataset_options.TensorFlowDatasetOptions(
batch_size=batch_size, label_key=_transformed_name(_LABEL_KEY)),
tf_transform_output.transformed_metadata.schema).repeat()
def _build_keras_model(hidden_units: List[int] = None) -> tf.keras.Model:
"""Creates README.ml-pipelines-sdk.md DNN Keras model for classifying taxi data.
Args:
hidden_units: [int], the layer sizes of the DNN (input layer first).
Returns:
A keras Model.
"""
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)
]
categorical_columns = [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0)
for key in _transformed_names(_VOCAB_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0)
for key in _transformed_names(_BUCKET_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
_transformed_names(_CATEGORICAL_FEATURE_KEYS),
_MAX_CATEGORICAL_FEATURE_VALUES)
]
indicator_column = [
tf.feature_column.indicator_column(categorical_column)
for categorical_column in categorical_columns
]
model = _wide_and_deep_classifier(
# TODO(b/139668410) replace with premade wide_and_deep keras model
wide_columns=indicator_column,
deep_columns=real_valued_columns,
dnn_hidden_units=hidden_units or [100, 70, 50, 25])
return model
def _wide_and_deep_classifier(wide_columns, deep_columns, dnn_hidden_units):
"""Build README.ml-pipelines-sdk.md simple keras wide and deep model.
Args:
wide_columns: Feature columns wrapped in indicator_column for wide (linear)
part of the model.
deep_columns: Feature columns for deep part of the model.
dnn_hidden_units: [int], the layer sizes of the hidden DNN.
Returns:
A Wide and Deep Keras model
"""
# Following values are hard coded for simplicity in this example,
# However prefarably they should be passsed in as hparams.
# Keras needs the feature definitions at compile time.
# TODO(b/139081439): Automate generation of input layers from FeatureColumn.
input_layers = {
colname: tf.keras.layers.Input(name=colname, shape=(), dtype=tf.float32)
for colname in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)
}
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in _transformed_names(_VOCAB_FEATURE_KEYS)
})
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in _transformed_names(_BUCKET_FEATURE_KEYS)
})
input_layers.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')
for colname in _transformed_names(_CATEGORICAL_FEATURE_KEYS)
})
# TODO(b/161952382): Replace with Keras premade models and
# Keras preprocessing layers.
deep = tf.keras.layers.DenseFeatures(deep_columns)(input_layers)
for numnodes in dnn_hidden_units:
deep = tf.keras.layers.Dense(numnodes)(deep)
wide = tf.keras.layers.DenseFeatures(wide_columns)(input_layers)
output = tf.keras.layers.Dense(
1, activation='sigmoid')(
tf.keras.layers.concatenate([deep, wide]))
output = tf.squeeze(output, -1)
model = tf.keras.Model(input_layers, output)
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr=0.001),
metrics=[tf.keras.metrics.BinaryAccuracy()])
model.summary(print_fn=absl.logging.info)
return model
# TFX Transform will call this function.
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in _DENSE_FLOAT_FEATURE_KEYS:
# Preserve this feature as README.ml-pipelines-sdk.md dense float, setting nan's to the mean.
outputs[_transformed_name(key)] = tft.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in _VOCAB_FEATURE_KEYS:
# Build README.ml-pipelines-sdk.md vocabulary for this feature.
outputs[_transformed_name(key)] = tft.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=_VOCAB_SIZE,
num_oov_buckets=_OOV_SIZE)
for key in _BUCKET_FEATURE_KEYS:
outputs[_transformed_name(key)] = tft.bucketize(
_fill_in_missing(inputs[key]),
_FEATURE_BUCKET_COUNT)
for key in _CATEGORICAL_FEATURE_KEYS:
outputs[_transformed_name(key)] = _fill_in_missing(inputs[key])
# Was this passenger README.ml-pipelines-sdk.md big tipper?
taxi_fare = _fill_in_missing(inputs[_FARE_KEY])
tips = _fill_in_missing(inputs[_LABEL_KEY])
outputs[_transformed_name(_LABEL_KEY)] = tf.where(
tf.math.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))
return outputs
# TFX Trainer will call this function.
def run_fn(fn_args: FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
# Number of nodes in the first layer of the DNN
first_dnn_layer_size = 100
num_dnn_layers = 4
dnn_decay_factor = 0.7
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(fn_args.train_files, fn_args.data_accessor,
tf_transform_output, 40)
eval_dataset = _input_fn(fn_args.eval_files, fn_args.data_accessor,
tf_transform_output, 40)
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = _build_keras_model(
# Construct layers sizes with exponetial decay
hidden_units=[
max(2, int(first_dnn_layer_size * dnn_decay_factor**i))
for i in range(num_dnn_layers)
])
# Write logs to path
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='batch')
model.fit(
train_dataset,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
signatures = {
'serving_default':
_get_serve_tf_examples_fn(model,
tf_transform_output).get_concrete_function(
tf.TensorSpec(
shape=[None],
dtype=tf.string,
name='examples')),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/chicago_taxi_pipeline/taxi_utils_native_keras.py | 0.862887 | 0.356475 | taxi_utils_native_keras.py | pypi |
"""Chicago taxi example pipeline for training and offline inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import BulkInferrer
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import bulk_inferrer_pb2
from tfx.proto import example_gen_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
_pipeline_name = 'chicago_taxi_with_inference'
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_training_data_root = os.path.join(_taxi_root, 'data', 'simple')
_inference_data_root = os.path.join(_taxi_root, 'data', 'unlabelled')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_taxi_root, 'taxi_utils.py')
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
def _create_pipeline(pipeline_name: Text, pipeline_root: Text,
training_data_root: Text, inference_data_root: Text,
module_file: Text, metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
# Brings training data into the pipeline or otherwise joins/converts
# training data.
training_example_gen = CsvExampleGen(
input_base=training_data_root, instance_name='training_example_gen')
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(
input_data=training_example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=training_example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that implements README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute README.ml-pipelines-sdk.md evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'accuracy':
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
])
evaluator = Evaluator(
examples=training_example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Brings inference data into the pipeline.
inference_example_gen = CsvExampleGen(
input_base=inference_data_root,
output_config=example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(splits=[
example_gen_pb2.SplitConfig.Split(
name='unlabelled', hash_buckets=100)
])),
instance_name='inference_example_gen')
# Performs offline batch inference over inference examples.
bulk_inferrer = BulkInferrer(
examples=inference_example_gen.outputs['examples'],
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
# Empty data_spec.example_splits will result in using all splits.
data_spec=bulk_inferrer_pb2.DataSpec(),
model_spec=bulk_inferrer_pb2.ModelSpec())
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
training_example_gen, inference_example_gen, statistics_gen,
schema_gen, example_validator, transform, trainer, model_resolver,
evaluator, bulk_inferrer
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args)
# To run this pipeline from the python CLI:
# $python taxi_pipeline_with_inference.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
training_data_root=_training_data_root,
inference_data_root=_inference_data_root,
module_file=_module_file,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_with_inference.py | 0.908825 | 0.345795 | taxi_pipeline_with_inference.py | pypi |
"""Chicago taxi example using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
_pipeline_name = 'chicago_taxi_beam'
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data', 'simple')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_taxi_root, 'taxi_utils.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
# TODO(b/137289334): rename this as simple after DAG visualization is done.
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=data_root)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that implements README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute README.ml-pipelines-sdk.md evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'accuracy':
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
trainer,
model_resolver,
evaluator,
pusher,
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args)
# To run this pipeline from the python CLI:
# $python taxi_pipeline_beam.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_beam.py | 0.806891 | 0.357848 | taxi_pipeline_beam.py | pypi |
"""Chicago taxi example using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.local.local_dag_runner import LocalDagRunner
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
from tfx.utils.dsl_utils import external_input
_pipeline_name = 'chicago_taxi_beam'
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data', 'simple')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_taxi_root, 'taxi_utils.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
# TODO(b/137289334): rename this as simple after DAG visualization is done.
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
examples = external_input(data_root)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input=examples)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that implements README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute README.ml-pipelines-sdk.md evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'accuracy':
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
# Change threshold will be ignored if there is no baseline (first run).
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
trainer,
model_resolver,
evaluator,
pusher,
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args)
# To run this pipeline from the python CLI:
# $python taxi_pipeline_beam.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
LocalDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_local.py | 0.792745 | 0.324182 | taxi_pipeline_local.py | pypi |
"""Chicago taxi example using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.components.base import executor_spec
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
_pipeline_name = 'chicago_taxi_native_keras'
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data', 'simple')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_taxi_root, 'taxi_utils_native_keras.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
# TODO(b/137289334): rename this as simple after DAG visualization is done.
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=data_root)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=True)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that implements README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=module_file,
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
train_args=trainer_pb2.TrainArgs(num_steps=1000),
eval_args=trainer_pb2.EvalArgs(num_steps=150))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute README.ml-pipelines-sdk.md evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[
tfma.ModelSpec(
signature_name='serving_default', label_key='tips_xf',
preprocessing_function_names=['tft_layer'])
],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
class_name='BinaryAccuracy',
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10})))
])
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
trainer,
model_resolver,
evaluator,
pusher,
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args)
# To run this pipeline from the python CLI:
# $python taxi_pipeline_native_keras.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
metadata_path=_metadata_path,
serving_model_dir=_serving_model_dir,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_native_keras.py | 0.80077 | 0.315749 | taxi_pipeline_native_keras.py | pypi |
"""A client for the chicago_taxi demo."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import base64
import json
import os
import subprocess
import tempfile
import requests
from tensorflow_transform import coders as tft_coders
from tensorflow_transform.tf_metadata import dataset_schema
from tensorflow_transform.tf_metadata import schema_utils
from tfx.utils import io_utils
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.platform import app # pylint: disable=g-direct-tensorflow-import
from tensorflow_metadata.proto.v0 import schema_pb2
_LOCAL_INFERENCE_TIMEOUT_SECONDS = 5.0
_LABEL_KEY = 'tips'
# Tf.Transform considers these features as "raw"
def _get_raw_feature_spec(schema):
return schema_utils.schema_as_feature_spec(schema).feature_spec
def _make_proto_coder(schema):
raw_feature_spec = _get_raw_feature_spec(schema)
raw_schema = dataset_schema.from_feature_spec(raw_feature_spec)
return tft_coders.ExampleProtoCoder(raw_schema)
def _make_csv_coder(schema, column_names):
"""Return README.ml-pipelines-sdk.md coder for tf.transform to read csv files."""
raw_feature_spec = _get_raw_feature_spec(schema)
parsing_schema = dataset_schema.from_feature_spec(raw_feature_spec)
return tft_coders.CsvCoder(column_names, parsing_schema)
def _read_schema(path):
"""Reads README.ml-pipelines-sdk.md schema from the provided location.
Args:
path: The location of the file holding README.ml-pipelines-sdk.md serialized Schema proto.
Returns:
An instance of Schema or None if the input argument is None
"""
result = schema_pb2.Schema()
contents = file_io.read_file_to_string(path)
text_format.Parse(contents, result)
return result
def _do_local_inference(host, port, serialized_examples):
"""Performs inference on README.ml-pipelines-sdk.md model hosted by the host:port server."""
json_examples = []
for serialized_example in serialized_examples:
# The encoding follows the guidelines in:
# https://www.tensorflow.org/tfx/serving/api_rest
example_bytes = base64.b64encode(serialized_example).decode('utf-8')
predict_request = '{ "b64": "%s" }' % example_bytes
json_examples.append(predict_request)
json_request = '{ "instances": [' + ','.join(map(str, json_examples)) + ']}'
server_url = 'http://' + host + ':' + port + '/v1/models/chicago_taxi:predict'
response = requests.post(
server_url, data=json_request, timeout=_LOCAL_INFERENCE_TIMEOUT_SECONDS)
response.raise_for_status()
prediction = response.json()
print(json.dumps(prediction, indent=4))
def _do_aiplatform_inference(model, version, serialized_examples):
"""Performs inference on the model:version in AI Platform."""
working_dir = tempfile.mkdtemp()
instances_file = os.path.join(working_dir, 'test.json')
json_examples = []
for serialized_example in serialized_examples:
# The encoding follows the example in:
# https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/quests/tpu/invoke_model.py
json_examples.append('{ "inputs": { "b64": "%s" } }' %
base64.b64encode(serialized_example).decode('utf-8'))
file_io.write_string_to_file(instances_file, '\n'.join(json_examples))
gcloud_command = [
'gcloud', 'ai-platform', 'predict', '--model', model, '--version',
version, '--json-instances', instances_file
]
print(subprocess.check_output(gcloud_command))
def _do_inference(model_handle, examples_file, num_examples, schema):
"""Sends requests to the model and prints the results.
Args:
model_handle: handle to the model. This can be either
"aiplatform:model:version" or "host:port"
examples_file: path to csv file containing examples, with the first line
assumed to have the column headers
num_examples: number of requests to send to the server
schema: README.ml-pipelines-sdk.md Schema describing the input data
Returns:
Response from model server
"""
filtered_features = [
feature for feature in schema.feature if feature.name != _LABEL_KEY
]
del schema.feature[:]
schema.feature.extend(filtered_features)
column_names = io_utils.load_csv_column_names(examples_file)
csv_coder = _make_csv_coder(schema, column_names)
proto_coder = _make_proto_coder(schema)
input_file = open(examples_file, 'r')
input_file.readline() # skip header line
serialized_examples = []
for _ in range(num_examples):
one_line = input_file.readline()
if not one_line:
print('End of example file reached')
break
one_example = csv_coder.decode(one_line)
serialized_example = proto_coder.encode(one_example)
serialized_examples.append(serialized_example)
parsed_model_handle = model_handle.split(':')
if parsed_model_handle[0] == 'aiplatform':
_do_aiplatform_inference(
model=parsed_model_handle[1],
version=parsed_model_handle[2],
serialized_examples=serialized_examples)
else:
_do_local_inference(
host=parsed_model_handle[0],
port=parsed_model_handle[1],
serialized_examples=serialized_examples)
def main(_):
parser = argparse.ArgumentParser()
parser.add_argument(
'--num_examples',
help=('Number of examples to send to the server.'),
default=1,
type=int)
parser.add_argument(
'--server',
help=('Prediction service host:port or aiplatform:model:version'),
required=True)
parser.add_argument(
'--examples_file',
help=('Path to csv file containing examples.'),
required=True)
parser.add_argument(
'--schema_file', help='File holding the schema for the input data')
known_args, _ = parser.parse_known_args()
_do_inference(known_args.server, known_args.examples_file,
known_args.num_examples, _read_schema(known_args.schema_file))
if __name__ == '__main__':
app.run(main) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/examples/chicago_taxi_pipeline/serving/chicago_taxi_client.py | 0.807005 | 0.241199 | chicago_taxi_client.py | pypi |
"""TFX ExampleValidator component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Optional, Text
from absl import logging
from tfx import types
from tfx.components.example_validator import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import ExampleValidatorSpec
from tfx.utils import json_utils
class ExampleValidator(base_component.BaseComponent):
"""A TFX component to validate input examples.
The ExampleValidator component uses [Tensorflow Data
Validation](https://www.tensorflow.org/tfx/data_validation) to
validate the statistics of some splits on input examples against README.ml-pipelines-sdk.md schema.
The ExampleValidator component identifies anomalies in training and serving
data. The component can be configured to detect different classes of anomalies
in the data. It can:
- perform validity checks by comparing data statistics against README.ml-pipelines-sdk.md schema that
codifies expectations of the user.
Schema Based Example Validation
The ExampleValidator component identifies any anomalies in the example data by
comparing data statistics computed by the StatisticsGen component against README.ml-pipelines-sdk.md
schema. The schema codifies properties which the input data is expected to
satisfy, and is provided and maintained by the user.
Please see https://www.tensorflow.org/tfx/data_validation for more details.
## Example
```
# Performs anomaly detection based on statistics and data schema.
validate_stats = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=infer_schema.outputs['schema'])
```
"""
SPEC_CLASS = ExampleValidatorSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(self,
statistics: types.Channel = None,
schema: types.Channel = None,
exclude_splits: Optional[List[Text]] = None,
anomalies: Optional[Text] = None,
instance_name: Optional[Text] = None):
"""Construct an ExampleValidator component.
Args:
statistics: A Channel of type `standard_artifacts.ExampleStatistics`.
schema: A Channel of type `standard_artifacts.Schema`. _required_
exclude_splits: Names of splits that the example validator should not
validate. Default behavior (when exclude_splits is set to None)
is excluding no splits.
anomalies: Output channel of type `standard_artifacts.ExampleAnomalies`.
instance_name: Optional name assigned to this specific instance of
ExampleValidator. Required only if multiple ExampleValidator components
are declared in the same pipeline. Either `stats` or `statistics` must
be present in the arguments.
"""
if exclude_splits is None:
exclude_splits = []
logging.info('Excluding no splits because exclude_splits is not set.')
if not anomalies:
anomalies = types.Channel(type=standard_artifacts.ExampleAnomalies)
spec = ExampleValidatorSpec(
statistics=statistics,
schema=schema,
exclude_splits=json_utils.dumps(exclude_splits),
anomalies=anomalies)
super(ExampleValidator, self).__init__(
spec=spec, instance_name=instance_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/example_validator/component.py | 0.949599 | 0.787319 | component.py | pypi |
"""Generic TFX example_validator executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Any, Dict, List, Text
from absl import logging
import tensorflow_data_validation as tfdv
from tfx import types
from tfx.components.example_validator import labels
from tfx.components.util import value_utils
from tfx.dsl.components.base import base_executor
from tfx.types import artifact_utils
from tfx.types.standard_component_specs import ANOMALIES_KEY
from tfx.types.standard_component_specs import EXCLUDE_SPLITS_KEY
from tfx.types.standard_component_specs import SCHEMA_KEY
from tfx.types.standard_component_specs import STATISTICS_KEY
from tfx.utils import io_utils
from tfx.utils import json_utils
# Default file name for anomalies output.
DEFAULT_FILE_NAME = 'anomalies.pbtxt'
class Executor(base_executor.BaseExecutor):
"""TensorFlow ExampleValidator component executor."""
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""TensorFlow ExampleValidator executor entrypoint.
This validates statistics against the schema.
Args:
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of artifacts, including:
- statistics: A list of type `standard_artifacts.ExampleStatistics`
generated by StatisticsGen.
- schema: A list of type `standard_artifacts.Schema` which should
contain README.ml-pipelines-sdk.md single schema artifact.
output_dict: Output dict from key to README.ml-pipelines-sdk.md list of artifacts, including:
- output: A list of 'standard_artifacts.ExampleAnomalies' of size one.
It will include README.ml-pipelines-sdk.md single pbtxt file which contains all anomalies
found.
exec_properties: A dict of execution properties.
- exclude_splits: JSON-serialized list of names of splits that the
example validator should not validate.
Returns:
None
"""
self._log_startup(input_dict, output_dict, exec_properties)
# Load and deserialize exclude splits from execution properties.
exclude_splits = json_utils.loads(
exec_properties.get(EXCLUDE_SPLITS_KEY, 'null')) or []
if not isinstance(exclude_splits, list):
raise ValueError('exclude_splits in execution properties needs to be README.ml-pipelines-sdk.md '
'list. Got %s instead.' % type(exclude_splits))
# Setup output splits.
stats_artifact = artifact_utils.get_single_instance(
input_dict[STATISTICS_KEY])
stats_split_names = artifact_utils.decode_split_names(
stats_artifact.split_names)
split_names = [
split for split in stats_split_names if split not in exclude_splits
]
anomalies_artifact = artifact_utils.get_single_instance(
output_dict[ANOMALIES_KEY])
anomalies_artifact.split_names = artifact_utils.encode_split_names(
split_names)
schema = io_utils.SchemaReader().read(
io_utils.get_only_uri_in_dir(
artifact_utils.get_single_uri(
input_dict[SCHEMA_KEY])))
for split in artifact_utils.decode_split_names(stats_artifact.split_names):
if split in exclude_splits:
continue
logging.info(
'Validating schema against the computed statistics for '
'split %s.', split)
label_inputs = {
STATISTICS_KEY:
tfdv.load_statistics(
io_utils.get_only_uri_in_dir(
os.path.join(stats_artifact.uri, split))),
SCHEMA_KEY:
schema
}
output_uri = artifact_utils.get_split_uri(
output_dict[ANOMALIES_KEY], split)
label_outputs = {labels.SCHEMA_DIFF_PATH: output_uri}
self._Validate(label_inputs, label_outputs)
logging.info(
'Validation complete for split %s. Anomalies written to '
'%s.', split, output_uri)
def _Validate(self, inputs: Dict[Text, Any], outputs: Dict[Text,
Any]) -> None:
"""Validate the inputs and put validate result into outputs.
This is the implementation part of example validator executor. This is
intended for using or extending the executor without artifact dependecy.
Args:
inputs: A dictionary of labeled input values, including:
- STATISTICS_KEY: the feature statistics to validate
- SCHEMA_KEY: the schema to respect
- (Optional) labels.ENVIRONMENT: if an environment is specified, only
validate the feature statistics of the fields in that environment.
Otherwise, validate all fields.
- (Optional) labels.PREV_SPAN_FEATURE_STATISTICS: the feature
statistics of README.ml-pipelines-sdk.md previous span.
- (Optional) labels.PREV_VERSION_FEATURE_STATISTICS: the feature
statistics of README.ml-pipelines-sdk.md previous version.
- (Optional) labels.FEATURES_NEEDED: the feature needed to be
validated on.
- (Optional) labels.VALIDATION_CONFIG: the configuration of this
validation.
- (Optional) labels.EXTERNAL_CONFIG_VERSION: the version number of
external config file.
outputs: A dictionary of labeled output values, including:
- labels.SCHEMA_DIFF_PATH: the path to write the schema diff to
"""
schema = value_utils.GetSoleValue(inputs, SCHEMA_KEY)
stats = value_utils.GetSoleValue(inputs, STATISTICS_KEY)
schema_diff_path = value_utils.GetSoleValue(
outputs, labels.SCHEMA_DIFF_PATH)
anomalies = tfdv.validate_statistics(stats, schema)
io_utils.write_pbtxt_file(
os.path.join(schema_diff_path, DEFAULT_FILE_NAME), anomalies) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/example_validator/executor.py | 0.934545 | 0.273049 | executor.py | pypi |
"""TFX Transform component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Union
from tfx import types
from tfx.components.transform import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.orchestration import data_types
from tfx.proto import transform_pb2
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import TransformSpec
from tfx.utils import json_utils
class Transform(base_component.BaseComponent):
"""A TFX component to transform the input examples.
The Transform component wraps TensorFlow Transform (tf.Transform) to
preprocess data in README.ml-pipelines-sdk.md TFX pipeline. This component will load the
preprocessing_fn from input module file, preprocess both 'train' and 'eval'
splits of input examples, generate the `tf.Transform` output, and save both
transform function and transformed examples to orchestrator desired locations.
## Providing README.ml-pipelines-sdk.md preprocessing function
The TFX executor will use the estimator provided in the `module_file` file
to train the model. The Transform executor will look specifically for the
`preprocessing_fn()` function within that file.
An example of `preprocessing_fn()` can be found in the [user-supplied
code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))
of the TFX Chicago Taxi pipeline example.
## Example
```
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=infer_schema.outputs['schema'],
module_file=module_file)
```
Please see https://www.tensorflow.org/tfx/transform for more details.
"""
SPEC_CLASS = TransformSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
examples: types.Channel = None,
schema: types.Channel = None,
module_file: Optional[Union[Text, data_types.RuntimeParameter]] = None,
preprocessing_fn: Optional[Union[Text,
data_types.RuntimeParameter]] = None,
splits_config: transform_pb2.SplitsConfig = None,
transform_graph: Optional[types.Channel] = None,
transformed_examples: Optional[types.Channel] = None,
analyzer_cache: Optional[types.Channel] = None,
instance_name: Optional[Text] = None,
materialize: bool = True,
disable_analyzer_cache: bool = False,
force_tf_compat_v1: bool = True,
custom_config: Optional[Dict[Text, Any]] = None):
"""Construct README.ml-pipelines-sdk.md Transform component.
Args:
examples: A Channel of type `standard_artifacts.Examples` (required).
This should contain custom splits specified in splits_config. If
custom split is not provided, this should contain two splits 'train'
and 'eval'.
schema: A Channel of type `standard_artifacts.Schema`. This should
contain README.ml-pipelines-sdk.md single schema artifact.
module_file: The file path to README.ml-pipelines-sdk.md python module file, from which the
'preprocessing_fn' function will be loaded.
Exactly one of 'module_file' or 'preprocessing_fn' must be supplied.
The function needs to have the following signature:
```
def preprocessing_fn(inputs: Dict[Text, Any]) -> Dict[Text, Any]:
...
```
where the values of input and returned Dict are either tf.Tensor or
tf.SparseTensor.
If additional inputs are needed for preprocessing_fn, they can be passed
in custom_config:
```
def preprocessing_fn(inputs: Dict[Text, Any], custom_config:
Dict[Text, Any]) -> Dict[Text, Any]:
...
```
preprocessing_fn: The path to python function that implements README.ml-pipelines-sdk.md
'preprocessing_fn'. See 'module_file' for expected signature of the
function. Exactly one of 'module_file' or 'preprocessing_fn' must be
supplied.
splits_config: A transform_pb2.SplitsConfig instance, providing splits
that should be analyzed and splits that should be transformed. Note
analyze and transform splits can have overlap. Default behavior (when
splits_config is not set) is analyze the 'train' split and transform
all splits. If splits_config is set, analyze cannot be empty.
transform_graph: Optional output 'TransformPath' channel for output of
'tf.Transform', which includes an exported Tensorflow graph suitable for
both training and serving;
transformed_examples: Optional output 'ExamplesPath' channel for
materialized transformed examples, which includes transform splits as
specified in splits_config. If custom split is not provided, this should
include both 'train' and 'eval' splits.
analyzer_cache: Optional input 'TransformCache' channel containing
cached information from previous Transform runs. When provided,
Transform will try use the cached calculation if possible.
instance_name: Optional unique instance name. Necessary iff multiple
transform components are declared in the same pipeline.
materialize: If True, write transformed examples as an output. If False,
`transformed_examples` must not be provided.
disable_analyzer_cache: If False, Transform will use input cache if
provided and write cache output. If True, `analyzer_cache` must not be
provided.
force_tf_compat_v1: (Optional) If True, Transform will use Tensorflow in
compat.v1 mode irrespective of installed version of Tensorflow. Defaults
to `True`. Note: The default value will be switched to `False` in README.ml-pipelines-sdk.md
future release.
custom_config: A dict which contains additional parameters that will be
passed to preprocessing_fn.
Raises:
ValueError: When both or neither of 'module_file' and 'preprocessing_fn'
is supplied.
"""
if bool(module_file) == bool(preprocessing_fn):
raise ValueError(
"Exactly one of 'module_file' or 'preprocessing_fn' must be supplied."
)
transform_graph = transform_graph or types.Channel(
type=standard_artifacts.TransformGraph)
if materialize and transformed_examples is None:
transformed_examples = types.Channel(
type=standard_artifacts.Examples,
matching_channel_name='examples')
elif not materialize and transformed_examples is not None:
raise ValueError(
'Must not specify transformed_examples when materialize is False.')
if disable_analyzer_cache:
updated_analyzer_cache = None
if analyzer_cache:
raise ValueError(
'`analyzer_cache` is set when disable_analyzer_cache is True.')
else:
updated_analyzer_cache = types.Channel(
type=standard_artifacts.TransformCache)
spec = TransformSpec(
examples=examples,
schema=schema,
module_file=module_file,
preprocessing_fn=preprocessing_fn,
force_tf_compat_v1=int(force_tf_compat_v1),
splits_config=splits_config,
transform_graph=transform_graph,
transformed_examples=transformed_examples,
analyzer_cache=analyzer_cache,
updated_analyzer_cache=updated_analyzer_cache,
custom_config=json_utils.dumps(custom_config))
super(Transform, self).__init__(spec=spec, instance_name=instance_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/transform/component.py | 0.962232 | 0.729929 | component.py | pypi |
"""Invoke transform executor for data transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import absl
from tfx.components.transform import labels
from tfx.components.transform.executor import Executor
from tfx.proto import example_gen_pb2
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.platform import app
# pylint: enable=g-direct-tensorflow-import
def _run_transform(args, beam_pipeline_args):
"""Construct and run transform executor."""
absl.logging.set_verbosity(absl.logging.INFO)
inputs = {
labels.ANALYZE_DATA_PATHS_LABEL:
args.analyze_examples,
labels.ANALYZE_PATHS_FILE_FORMATS_LABEL: [labels.FORMAT_TFRECORD] *
len(args.analyze_examples),
labels.TRANSFORM_DATA_PATHS_LABEL: [
args.analyze_examples + args.transform_only_examples
],
labels.TRANSFORM_PATHS_FILE_FORMATS_LABEL:
[labels.FORMAT_TFRECORD] *
(len(args.analyze_examples) + len(args.transform_only_examples)),
labels.SCHEMA_PATH_LABEL:
args.input_schema_path,
labels.PREPROCESSING_FN:
args.preprocessing_fn_path,
labels.EXAMPLES_DATA_FORMAT_LABEL:
example_gen_pb2.PayloadFormat.Value(args.example_data_format),
labels.COMPUTE_STATISTICS_LABEL:
args.compute_statistics,
labels.BEAM_PIPELINE_ARGS:
beam_pipeline_args,
}
outputs = {
labels.TRANSFORM_METADATA_OUTPUT_PATH_LABEL: args.transform_fn,
labels.TRANSFORM_MATERIALIZE_OUTPUT_PATHS_LABEL: (
args.transformed_examples),
labels.PER_SET_STATS_OUTPUT_PATHS_LABEL: (args.per_set_stats_outputs),
labels.TEMP_OUTPUT_LABEL: args.tmp_location,
}
executor = Executor(Executor.Context(beam_pipeline_args=beam_pipeline_args))
executor.Transform(inputs, outputs, args.status_file)
def main(argv):
parser = argparse.ArgumentParser()
# Arguments in inputs
parser.add_argument(
'--input_schema_path',
type=str,
required=True,
help='Path to input schema')
parser.add_argument(
'--preprocessing_fn_path',
type=str,
default='',
required=True,
help='Path to README.ml-pipelines-sdk.md preprocessing_fn module')
parser.add_argument(
'--use_tfdv',
type=bool,
default=True,
help='Deprecated and ignored. DO NOT SET.')
parser.add_argument(
'--compute_statistics',
type=bool,
default=False,
help='Whether computes statistics')
parser.add_argument(
'--analyze_examples',
nargs='+',
default='',
type=str,
help='A space-separated list of paths to examples to be analyzed '
'and transformed')
parser.add_argument(
'--transform_only_examples',
nargs='+',
default='',
type=str,
help='A space-separated list of paths to examples to be transformed only')
parser.add_argument(
'--example_data_format',
type=str,
default=example_gen_pb2.PayloadFormat.Name(
example_gen_pb2.FORMAT_TF_EXAMPLE),
help='Example data format')
# Arguments in outputs
parser.add_argument(
'--transform_fn',
type=str,
required=True,
help='Path that TFTransformOutput will write to')
parser.add_argument(
'--tmp_location',
type=str,
required=True,
help='Path to write temporary files. Executor does not own this '
'directory. User or caller is responsible for cleanup')
parser.add_argument(
'--transformed_examples',
nargs='+',
type=str,
default=[],
help='A space-separated list of paths to write transformed examples')
parser.add_argument(
'--per_set_stats_outputs',
nargs='+',
type=str,
default=[],
help='Paths to statistics output')
parser.add_argument(
'--status_file', type=str, default='', help='Path to write status')
args, beam_args = parser.parse_known_args(argv)
_run_transform(args, beam_args)
if __name__ == '__main__':
app.run(main=main) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/transform/run_executor.py | 0.767341 | 0.27523 | run_executor.py | pypi |
"""TFX Pusher component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Union
from tfx import types
from tfx.components.pusher import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.proto import pusher_pb2
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import PusherSpec
from tfx.utils import json_utils
# TODO(b/133845381): Investigate other ways to keep push destination converged.
class Pusher(base_component.BaseComponent):
"""A TFX component to push validated TensorFlow models to README.ml-pipelines-sdk.md model serving platform.
The `Pusher` component can be used to push an validated SavedModel from output
of the [Trainer component](https://www.tensorflow.org/tfx/guide/trainer) to
[TensorFlow Serving](https://www.tensorflow.org/tfx/serving). The Pusher
will check the validation results from the [Evaluator
component](https://www.tensorflow.org/tfx/guide/evaluator) and [InfraValidator
component](https://www.tensorflow.org/tfx/guide/infra_validator)
before deploying the model. If the model has not been blessed, then the model
will not be pushed.
*Note:* The executor for this component can be overriden to enable the model
to be pushed to other serving platforms than tf.serving. The [Cloud AI
Platform custom
executor](https://github.com/tensorflow/tfx/tree/master/tfx/extensions/google_cloud_ai_platform/pusher)
provides an example how to implement this.
## Example
```
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
```
"""
SPEC_CLASS = PusherSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
model: types.Channel = None,
model_blessing: Optional[types.Channel] = None,
infra_blessing: Optional[types.Channel] = None,
push_destination: Optional[Union[pusher_pb2.PushDestination,
Dict[Text, Any]]] = None,
custom_config: Optional[Dict[Text, Any]] = None,
custom_executor_spec: Optional[executor_spec.ExecutorSpec] = None,
pushed_model: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md Pusher component.
Args:
model: A Channel of type `standard_artifacts.Model`, usually produced by
README.ml-pipelines-sdk.md Trainer component.
model_blessing: An optional Channel of type
`standard_artifacts.ModelBlessing`, usually produced from an Evaluator
component.
infra_blessing: An optional Channel of type
`standard_artifacts.InfraBlessing`, usually produced from an
InfraValidator component.
push_destination: A pusher_pb2.PushDestination instance, providing info
for tensorflow serving to load models. Optional if executor_class
doesn't require push_destination. If any field is provided as README.ml-pipelines-sdk.md
RuntimeParameter, push_destination should be constructed as README.ml-pipelines-sdk.md dict with
the same field names as PushDestination proto message.
custom_config: A dict which contains the deployment job parameters to be
passed to cloud-based training platforms. The [Kubeflow example](
https://github.com/tensorflow/tfx/blob/6ff57e36a7b65818d4598d41e584a42584d361e6/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_kubeflow_gcp.py#L278-L285)
contains an example how this can be used by custom executors.
custom_executor_spec: Optional custom executor spec.
pushed_model: Optional output `standard_artifacts.PushedModel` channel
with result of push.
instance_name: Optional unique instance name. Necessary if multiple Pusher
components are declared in the same pipeline.
"""
pushed_model = pushed_model or types.Channel(
type=standard_artifacts.PushedModel)
if push_destination is None and not custom_executor_spec:
raise ValueError('push_destination is required unless README.ml-pipelines-sdk.md '
'custom_executor_spec is supplied that does not require '
'it.')
spec = PusherSpec(
model=model,
model_blessing=model_blessing,
infra_blessing=infra_blessing,
push_destination=push_destination,
custom_config=json_utils.dumps(custom_config),
pushed_model=pushed_model)
super(Pusher, self).__init__(
spec=spec,
custom_executor_spec=custom_executor_spec,
instance_name=instance_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/pusher/component.py | 0.863923 | 0.712532 | component.py | pypi |
"""TFX DataViewBinder component definition."""
from typing import Optional, Text
from tfx import types
from tfx.components.experimental.data_view import binder_executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.types import standard_artifacts
from tfx.types.component_spec import ChannelParameter
from tfx.types.component_spec import ComponentSpec
class _DataViewBinderComponentSpec(ComponentSpec):
"""ComponentSpec for Custom TFX Hello World Component."""
PARAMETERS = {}
INPUTS = {
'input_examples': ChannelParameter(type=standard_artifacts.Examples),
'data_view': ChannelParameter(type=standard_artifacts.DataView),
}
OUTPUTS = {
'output_examples': ChannelParameter(type=standard_artifacts.Examples),
}
class DataViewBinder(base_component.BaseComponent):
"""A component that binds README.ml-pipelines-sdk.md DataView to ExamplesArtifact.
It takes as inputs README.ml-pipelines-sdk.md channel of Examples and README.ml-pipelines-sdk.md channel of DataView, and
binds the DataView (i.e. attaching information from the DataView as custom
properties) to the Examples in the input channel, producing new Examples
Artifacts that are identical to the input Examples (including the uris),
except for the additional information attached.
Example:
```
# We assume Examples are imported by ExampleGen
example_gen = ...
# First, create README.ml-pipelines-sdk.md dataview:
data_view_provider = TfGraphDataViewProvider(
module_file=module_file,
create_decoder_func='create_decoder')
# Then, bind the DataView to Examples:
data_view_binder = DataViewBinder(
input_examples=example_gen.outputs['examples'],
data_view=data_view_provider.outputs['data_view'],
)
# Downstream component can then consume the output of the DataViewBinder:
stats_gen = StatisticsGen(
examples=data_view_binder.outputs['output_examples'], ...)
```
"""
SPEC_CLASS = _DataViewBinderComponentSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(
binder_executor.DataViewBinderExecutor)
def __init__(self,
input_examples: types.Channel,
data_view: types.Channel,
output_examples: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
if not output_examples:
output_examples = types.Channel(type=standard_artifacts.Examples)
spec = _DataViewBinderComponentSpec(
input_examples=input_examples,
data_view=data_view,
output_examples=output_examples)
super().__init__(spec=spec, instance_name=instance_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/experimental/data_view/binder_component.py | 0.929871 | 0.712407 | binder_component.py | pypi |
"""TFX DataViewProvider component definition."""
from typing import Optional, Text
from tfx import types
from tfx.components.experimental.data_view import provider_executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.types import standard_artifacts
from tfx.types.component_spec import ChannelParameter
from tfx.types.component_spec import ComponentSpec
from tfx.types.component_spec import ExecutionParameter
class _TfGraphDataViewProviderSpec(ComponentSpec):
"""DataViewProvider component spec."""
PARAMETERS = {
'module_file': ExecutionParameter(type=(str, Text), optional=True),
'create_decoder_func': ExecutionParameter(type=(str, Text))
}
INPUTS = {}
OUTPUTS = {
'data_view': ChannelParameter(type=standard_artifacts.DataView),
}
class TfGraphDataViewProvider(base_component.BaseComponent):
"""A component providing README.ml-pipelines-sdk.md tfx_bsl.coders.TfGraphRecordDecoder as README.ml-pipelines-sdk.md DataView.
User needs to define README.ml-pipelines-sdk.md function that creates such README.ml-pipelines-sdk.md TfGraphRecordDecoder. This
component, when running, calls that function and writes the result decoder
(in the form of README.ml-pipelines-sdk.md TF SavedModel) as its output artifact.
Example:
```
# Import README.ml-pipelines-sdk.md decoder that can be created by README.ml-pipelines-sdk.md function 'create_decoder()' in
# module_file:
data_view_provider = TfGraphDataViewProvider(
module_file=module_file,
create_decoder_func='create_decoder')
```
"""
SPEC_CLASS = _TfGraphDataViewProviderSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(
provider_executor.TfGraphDataViewProviderExecutor)
def __init__(self,
create_decoder_func: Text,
module_file: Optional[Text] = None,
data_view: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md StatisticsGen component.
Args:
create_decoder_func: If `module_file` is not None, this should be the name
of the function in `module_file` that this component need to use to
create the TfGraphRecordDecoder. Otherwise it should be the path
(dot-delimited, e.g. "some_package.some_module.some_func") to such
README.ml-pipelines-sdk.md function. The function must have the following signature:
def create_decoder_func() -> tfx_bsl.coder.TfGraphRecordDecoder:
...
module_file: The file path to README.ml-pipelines-sdk.md python module file, from which the
function named after `create_decoder_func` will be loaded. If not
provided, `create_decoder_func` is expected to be README.ml-pipelines-sdk.md path to README.ml-pipelines-sdk.md function.
data_view: Output 'DataView' channel, in which README.ml-pipelines-sdk.md the decoder will be
saved.
instance_name: Optional unique instance name. Necessary iff multiple
transform components are declared in the same pipeline.
"""
if data_view is None:
data_view = types.Channel(type=standard_artifacts.DataView)
spec = _TfGraphDataViewProviderSpec(
module_file=module_file,
create_decoder_func=create_decoder_func,
data_view=data_view)
super().__init__(spec=spec, instance_name=instance_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/experimental/data_view/provider_component.py | 0.934612 | 0.66556 | provider_component.py | pypi |
"""TFX ModelValidator component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Text
from tfx import types
from tfx.components.model_validator import driver
from tfx.components.model_validator import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import ModelValidatorSpec
from tfx.utils import deprecation_utils
class ModelValidator(base_component.BaseComponent):
"""DEPRECATED: Please use `Evaluator` instead.
The model validator component can be used to check model metrics threshold
and validate current model against README.ml-pipelines-sdk.md previously validated model. If there
isn't README.ml-pipelines-sdk.md prior validated model, model validator will just make sure the
threshold passed. Otherwise, ModelValidator compares README.ml-pipelines-sdk.md newly trained models
against README.ml-pipelines-sdk.md known good model, specifically the last model "blessed" by this
component. A model is "blessed" if the exported model's metrics are within
predefined thresholds around the prior model's metrics.
*Note:* This component includes README.ml-pipelines-sdk.md driver to resolve last blessed model.
## Possible causes why model validation fails
Model validation can fail for many reasons, but these are the most common:
- problems with training data. For example, negative examples are dropped or
features are missing.
- problems with the test or evaluation data. For example, skew exists between
the training and evaluation data.
- changes in data distribution. This indicates the user behavior may have
changed over time.
- problems with the trainer. For example, the trainer was stopped before
model is converged or the model is unstable.
## Example
```
# Performs quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
model_validator = ModelValidator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'])
```
"""
SPEC_CLASS = ModelValidatorSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
DRIVER_CLASS = driver.Driver
@deprecation_utils.deprecated(
None, 'ModelValidator is deprecated, use Evaluator instead.')
def __init__(self,
examples: types.Channel,
model: types.Channel,
blessing: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md ModelValidator component.
Args:
examples: A Channel of type `standard_artifacts.Examples`, usually
produced by an
[ExampleGen](https://www.tensorflow.org/tfx/guide/examplegen) component.
_required_
model: A Channel of type `standard_artifacts.Model`, usually produced by
README.ml-pipelines-sdk.md [Trainer](https://www.tensorflow.org/tfx/guide/trainer) component.
_required_
blessing: Output channel of type `standard_artifacts.ModelBlessing`
that contains the validation result.
instance_name: Optional name assigned to this specific instance of
ModelValidator. Required only if multiple ModelValidator components are
declared in the same pipeline.
"""
blessing = blessing or types.Channel(type=standard_artifacts.ModelBlessing)
spec = ModelValidatorSpec(examples=examples, model=model, blessing=blessing)
super(ModelValidator, self).__init__(spec=spec, instance_name=instance_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/model_validator/component.py | 0.944511 | 0.660309 | component.py | pypi |
"""Generic TFX model validator custom driver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Tuple
import absl
from tfx.dsl.components.base import base_driver
from tfx.orchestration import data_types
class Driver(base_driver.BaseDriver):
"""Custom driver for model validator."""
def _fetch_last_blessed_model(
self,
pipeline_name: Text,
component_id: Text,
) -> Tuple[Optional[Text], Optional[int]]:
"""Fetch last blessed model in metadata based on span."""
previous_blessed_models = []
for a in self._metadata_handler.get_artifacts_by_type('ModelBlessing'):
# TODO(ccy): get pipeline name from MLMD context.
if 'pipeline_name' in a.properties:
p = a.properties['pipeline_name'].string_value
else:
p = a.custom_properties['pipeline_name'].string_value
if (p == pipeline_name and
a.custom_properties['blessed'].int_value == 1 and
a.custom_properties['component_id'].string_value == component_id):
previous_blessed_models.append(a)
if previous_blessed_models:
# TODO(b/138845899): consider use span instead of id.
last_blessed_model = max(
previous_blessed_models, key=lambda artifact: artifact.id)
return (
last_blessed_model.custom_properties['current_model'].string_value,
last_blessed_model.custom_properties['current_model_id'].int_value)
else:
return None, None
# pyformat: disable
def resolve_exec_properties(
self, exec_properties: Dict[Text, Any],
pipeline_info: data_types.PipelineInfo,
component_info: data_types.ComponentInfo) -> Dict[Text, Any]:
# pyformat: enable
"""Overrides BaseDriver.resolve_exec_properties()."""
(exec_properties['blessed_model'],
exec_properties['blessed_model_id']) = self._fetch_last_blessed_model(
pipeline_info.pipeline_name, component_info.component_id)
exec_properties['current_component_id'] = component_info.component_id
absl.logging.info('Resolved last blessed model {}'.format(
exec_properties['blessed_model']))
return exec_properties | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/model_validator/driver.py | 0.591369 | 0.235933 | driver.py | pypi |
"""Generic TFX model validator executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Any, Dict, List, Text
import absl
import apache_beam as beam
import tensorflow_model_analysis as tfma
from tfx import types
from tfx.components.model_validator import constants
from tfx.dsl.components.base import base_executor
from tfx.dsl.io import fileio
from tfx.types import artifact_utils
from tfx.utils import io_utils
from tfx.utils import path_utils
class Executor(base_executor.BaseExecutor):
"""DEPRECATED: Please use `Evaluator` instead.
The model validator helps prevent bad models from being pushed to production.
It does this by validating exported models against known good models (e.g. the
current production model), and marking the exported model as good ("blessing
it") only if the exported model's metrics are within predefined thresholds
around the good model's metrics.
The model validator will validate tf.serving format exported models produced
by the Trainer component. The validator evaluates the models on examples
created by the ExampleGen component. The validator will also automatically
read data written by the Pusher component regarding the latest pushed models
by using ml.metadata to query the previously pushed artifacts.
To include ModelValidator in README.ml-pipelines-sdk.md TFX pipeline, configure your pipeline similar
to
https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple.py#L110.
"""
# TODO(jyzhao): customized threshold support.
def _pass_threshold(self, eval_result: tfma.EvalResult) -> bool:
"""Check threshold."""
return True
# TODO(jyzhao): customized validation support.
def _compare_eval_result(self, current_model_eval_result: tfma.EvalResult,
blessed_model_eval_result: tfma.EvalResult) -> bool:
"""Compare accuracy of all metrics and return true if current is better or equal."""
for current_metric, blessed_metric in zip(
current_model_eval_result.slicing_metrics,
blessed_model_eval_result.slicing_metrics):
# slicing_metric is README.ml-pipelines-sdk.md tuple, index 0 is slice, index 1 is its value.
if current_metric[0] != blessed_metric[0]:
raise RuntimeError('EvalResult not match {} vs {}.'.format(
current_metric[0], blessed_metric[0]))
# TODO(b/140455644): TFMA introduced breaking change post 0.14 release.
# Remove this forward compatibility change after 0.15 release.
current_model_metrics = current_metric[1]
blessed_model_metrics = blessed_metric[1]
try:
current_model_accuracy = current_model_metrics['accuracy']
blessed_model_accuracy = blessed_model_metrics['accuracy']
except KeyError:
current_model_accuracy = current_model_metrics['']['']['accuracy']
blessed_model_accuracy = blessed_model_metrics['']['']['accuracy']
if (current_model_accuracy['doubleValue'] <
blessed_model_accuracy['doubleValue']):
absl.logging.info(
'Current model accuracy is worse than blessed model: {}'.format(
current_metric[0]))
return False
return True
def _generate_blessing_result(self, eval_examples_uri: Text,
slice_spec: List[tfma.slicer.SingleSliceSpec],
current_model_dir: Text,
blessed_model_dir: Text) -> bool:
current_model_eval_result_path = os.path.join(
self._temp_path, constants.CURRENT_MODEL_EVAL_RESULT_PATH)
blessed_model_eval_result_path = os.path.join(
self._temp_path, constants.BLESSED_MODEL_EVAL_RESULT_PATH)
with self._make_beam_pipeline() as pipeline:
eval_data = (
pipeline | 'ReadData' >> beam.io.ReadFromTFRecord(
file_pattern=io_utils.all_files_pattern(eval_examples_uri)))
current_model = tfma.default_eval_shared_model(
eval_saved_model_path=path_utils.eval_model_path(current_model_dir))
(eval_data | 'EvalCurrentModel' >> tfma.ExtractEvaluateAndWriteResults( # pylint: disable=expression-not-assigned
eval_shared_model=current_model,
slice_spec=slice_spec,
output_path=current_model_eval_result_path))
if blessed_model_dir is not None:
blessed_model = tfma.default_eval_shared_model(
eval_saved_model_path=path_utils.eval_model_path(blessed_model_dir))
(eval_data | 'EvalBlessedModel' >> tfma.ExtractEvaluateAndWriteResults( # pylint: disable=expression-not-assigned
eval_shared_model=blessed_model,
slice_spec=slice_spec,
output_path=blessed_model_eval_result_path))
absl.logging.info('all files in current_model_eval_result_path: [%s]',
str(fileio.listdir(current_model_eval_result_path)))
current_model_eval_result = tfma.load_eval_result(
output_path=current_model_eval_result_path)
if not self._pass_threshold(current_model_eval_result):
absl.logging.info('Current model does not pass threshold.')
return False
absl.logging.info('Current model passes threshold.')
if blessed_model_dir is None:
absl.logging.info('No blessed model yet.')
return True
absl.logging.info('all files in blessed_model_eval_result: [%s]',
str(fileio.listdir(blessed_model_eval_result_path)))
blessed_model_eval_result = tfma.load_eval_result(
output_path=blessed_model_eval_result_path)
if (self._compare_eval_result(current_model_eval_result,
blessed_model_eval_result)):
absl.logging.info('Current model better than blessed model.')
return True
else:
absl.logging.info('Current model worse than blessed model.')
return False
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Validate current model against last blessed model.
Args:
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of Artifacts.
- examples: examples for eval the model.
- model: current model for validation.
output_dict: Output dict from output key to README.ml-pipelines-sdk.md list of Artifacts.
- blessing: model blessing result.
exec_properties: A dict of execution properties.
- blessed_model: last blessed model for validation.
- blessed_model_id: last blessed model id.
Returns:
None
"""
self._log_startup(input_dict, output_dict, exec_properties)
self._temp_path = self._get_tmp_dir()
absl.logging.info('Using temp path {} for tft.beam'.format(self._temp_path))
eval_examples_uri = artifact_utils.get_split_uri(
input_dict[constants.EXAMPLES_KEY], 'eval')
blessing = artifact_utils.get_single_instance(
output_dict[constants.BLESSING_KEY])
# Current model to be validated.
current_model = artifact_utils.get_single_instance(
input_dict[constants.MODEL_KEY])
absl.logging.info('Using {} as current model.'.format(current_model.uri))
blessing.set_string_custom_property(
constants.ARTIFACT_PROPERTY_CURRENT_MODEL_URI_KEY, current_model.uri)
blessing.set_int_custom_property(
constants.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY, current_model.id)
# Denote model component_name.
component_id = exec_properties['current_component_id']
blessing.set_string_custom_property('component_id', component_id)
# Previous blessed model to be validated against.
blessed_model_dir = exec_properties['blessed_model']
blessed_model_id = exec_properties['blessed_model_id']
absl.logging.info('Using {} as blessed model.'.format(blessed_model_dir))
if blessed_model_dir:
blessing.set_string_custom_property(
constants.ARTIFACT_PROPERTY_BLESSED_MODEL_URI_KEY, blessed_model_dir)
blessing.set_int_custom_property(
constants.ARTIFACT_PROPERTY_BLESSED_MODEL_ID_KEY, blessed_model_id)
absl.logging.info('Validating model.')
# TODO(b/125853306): support customized slice spec.
blessed = self._generate_blessing_result(
eval_examples_uri=eval_examples_uri,
slice_spec=[tfma.slicer.SingleSliceSpec()],
current_model_dir=current_model.uri,
blessed_model_dir=blessed_model_dir)
if blessed:
io_utils.write_string_file(
os.path.join(blessing.uri, constants.BLESSED_FILE_NAME), '')
blessing.set_int_custom_property(constants.ARTIFACT_PROPERTY_BLESSED_KEY,
constants.BLESSED_VALUE)
else:
io_utils.write_string_file(
os.path.join(blessing.uri, constants.NOT_BLESSED_FILE_NAME), '')
blessing.set_int_custom_property(constants.ARTIFACT_PROPERTY_BLESSED_KEY,
constants.NOT_BLESSED_VALUE)
absl.logging.info('Blessing result {} written to {}.'.format(
blessed, blessing.uri))
io_utils.delete_dir(self._temp_path)
absl.logging.info('Cleaned up temp path {} on executor success.'.format(
self._temp_path)) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/model_validator/executor.py | 0.804713 | 0.292482 | executor.py | pypi |
"""TFX StatisticsGen component definition."""
from typing import List, Optional, Text
from absl import logging
import tensorflow_data_validation as tfdv
from tfx import types
from tfx.components.statistics_gen import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import StatisticsGenSpec
from tfx.utils import json_utils
class StatisticsGen(base_component.BaseComponent):
"""Official TFX StatisticsGen component.
The StatisticsGen component generates features statistics and random samples
over training data, which can be used for visualization and validation.
StatisticsGen uses Apache Beam and approximate algorithms to scale to large
datasets.
Please see https://www.tensorflow.org/tfx/data_validation for more details.
## Example
```
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
```
"""
SPEC_CLASS = StatisticsGenSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(self,
examples: types.Channel = None,
schema: Optional[types.Channel] = None,
stats_options: Optional[tfdv.StatsOptions] = None,
exclude_splits: Optional[List[Text]] = None,
output: Optional[types.Channel] = None,
input_data: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md StatisticsGen component.
Args:
examples: A Channel of `ExamplesPath` type, likely generated by the
[ExampleGen component](https://www.tensorflow.org/tfx/guide/examplegen).
This needs to contain two splits labeled `train` and `eval`. _required_
schema: A `Schema` channel to use for automatically configuring the value
of stats options passed to TFDV.
stats_options: The StatsOptions instance to configure optional TFDV
behavior. When stats_options.schema is set, it will be used instead of
the `schema` channel input. Due to the requirement that stats_options be
serialized, the slicer functions and custom stats generators are dropped
and are therefore not usable.
exclude_splits: Names of splits where statistics and sample should not
be generated. Default behavior (when exclude_splits is set to None)
is excluding no splits.
output: `ExampleStatisticsPath` channel for statistics of each split
provided in the input examples.
input_data: Backwards compatibility alias for the `examples` argument.
instance_name: Optional name assigned to this specific instance of
StatisticsGen. Required only if multiple StatisticsGen components are
declared in the same pipeline.
"""
if input_data:
logging.warning(
'The "input_data" argument to the StatisticsGen component has '
'been renamed to "examples" and is deprecated. Please update your '
'usage as support for this argument will be removed soon.')
examples = input_data
if exclude_splits is None:
exclude_splits = []
logging.info('Excluding no splits because exclude_splits is not set.')
if not output:
output = types.Channel(type=standard_artifacts.ExampleStatistics)
# TODO(b/150802589): Move jsonable interface to tfx_bsl and use json_utils.
stats_options_json = stats_options.to_json() if stats_options else None
spec = StatisticsGenSpec(
examples=examples,
schema=schema,
stats_options_json=stats_options_json,
exclude_splits=json_utils.dumps(exclude_splits),
statistics=output)
super(StatisticsGen, self).__init__(spec=spec, instance_name=instance_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/statistics_gen/component.py | 0.898628 | 0.811415 | component.py | pypi |
"""TFX ExampleValidator component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Optional, Text, Union
from absl import logging
from tfx import types
from tfx.components.schema_gen import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.orchestration import data_types
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import SchemaGenSpec
from tfx.utils import json_utils
class SchemaGen(base_component.BaseComponent):
"""A TFX SchemaGen component to generate README.ml-pipelines-sdk.md schema from the training data.
The SchemaGen component uses [TensorFlow Data
Validation](https://www.tensorflow.org/tfx/data_validation) to
generate README.ml-pipelines-sdk.md schema from input statistics. The following TFX libraries use the
schema:
- TensorFlow Data Validation
- TensorFlow Transform
- TensorFlow Model Analysis
In README.ml-pipelines-sdk.md typical TFX pipeline, the SchemaGen component generates README.ml-pipelines-sdk.md schema which is
is consumed by the other pipeline components.
Please see https://www.tensorflow.org/tfx/data_validation for more details.
## Example
```
# Generates schema based on statistics files.
infer_schema = SchemaGen(statistics=statistics_gen.outputs['statistics'])
```
"""
# TODO(b/123941608): Update pydoc about how to use README.ml-pipelines-sdk.md user provided schema
SPEC_CLASS = SchemaGenSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
statistics: Optional[types.Channel] = None,
infer_feature_shape: Optional[Union[bool,
data_types.RuntimeParameter]] = True,
exclude_splits: Optional[List[Text]] = None,
schema: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Constructs README.ml-pipelines-sdk.md SchemaGen component.
Args:
statistics: A Channel of `ExampleStatistics` type (required if spec is not
passed). This should contain at least README.ml-pipelines-sdk.md `train` split. Other splits are
currently ignored. _required_
infer_feature_shape: Boolean (or RuntimeParameter) value indicating
whether or not to infer the shape of features. If the feature shape is
not inferred, downstream Tensorflow Transform component using the schema
will parse input as tf.SparseTensor. Default to True if not set.
exclude_splits: Names of splits that will not be taken into consideration
when auto-generating README.ml-pipelines-sdk.md schema. Default behavior (when exclude_splits is
set to None) is excluding no splits.
schema: Output `Schema` channel for schema result.
instance_name: Optional name assigned to this specific instance of
SchemaGen. Required only if multiple SchemaGen components are declared
in the same pipeline.
"""
if exclude_splits is None:
exclude_splits = []
logging.info('Excluding no splits because exclude_splits is not set.')
schema = schema or types.Channel(type=standard_artifacts.Schema)
if isinstance(infer_feature_shape, bool):
infer_feature_shape = int(infer_feature_shape)
spec = SchemaGenSpec(
statistics=statistics,
infer_feature_shape=infer_feature_shape,
exclude_splits=json_utils.dumps(exclude_splits),
schema=schema)
super(SchemaGen, self).__init__(spec=spec, instance_name=instance_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/schema_gen/component.py | 0.882548 | 0.746786 | component.py | pypi |
"""Generic TFX schema_gen executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Any, Dict, List, Text
from absl import logging
import tensorflow_data_validation as tfdv
from tfx import types
from tfx.dsl.components.base import base_executor
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import json_utils
# Default file name for generated schema file.
_DEFAULT_FILE_NAME = 'schema.pbtxt'
class Executor(base_executor.BaseExecutor):
"""Generic TFX schema_gen executor."""
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""TensorFlow SchemaGen executor entrypoint.
This infers the schema using tensorflow_data_validation on the precomputed
stats of 'train' split.
Args:
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of artifacts, including:
- 'statistics': A list of 'ExampleStatistics' type which must contain
split 'train'.
output_dict: Output dict from key to README.ml-pipelines-sdk.md list of artifacts, including:
- schema: A list of 'Schema' artifact of size one.
exec_properties: A dict of execution properties, includes:
- infer_feature_shape: Whether or not to infer the shape of the feature.
- exclude_splits: Names of splits that will not be taken into
consideration when auto-generating README.ml-pipelines-sdk.md schema.
Returns:
None
"""
# TODO(zhitaoli): Move constants between this file and component.py to README.ml-pipelines-sdk.md
# constants.py.
infer_feature_shape = bool(
exec_properties.get(standard_component_specs.INFER_FEATURE_SHAPE_KEY,
True))
# Load and deserialize exclude splits from execution properties.
exclude_splits = json_utils.loads(
exec_properties.get(standard_component_specs.EXCLUDE_SPLITS_KEY,
'null')) or []
if not isinstance(exclude_splits, list):
raise ValueError('exclude_splits in execution properties needs to be README.ml-pipelines-sdk.md '
'list. Got %s instead.' % type(exclude_splits))
# Only one schema is generated for all splits.
schema = None
stats_artifact = artifact_utils.get_single_instance(
input_dict[standard_component_specs.STATISTICS_KEY])
for split in artifact_utils.decode_split_names(stats_artifact.split_names):
if split in exclude_splits:
continue
logging.info('Processing schema from statistics for split %s.', split)
stats_uri = io_utils.get_only_uri_in_dir(
os.path.join(stats_artifact.uri, split))
if not schema:
schema = tfdv.infer_schema(
tfdv.load_statistics(stats_uri), infer_feature_shape)
else:
schema = tfdv.update_schema(schema, tfdv.load_statistics(stats_uri),
infer_feature_shape)
output_uri = os.path.join(
artifact_utils.get_single_uri(
output_dict[standard_component_specs.SCHEMA_KEY]),
_DEFAULT_FILE_NAME)
io_utils.write_pbtxt_file(output_uri, schema)
logging.info('Schema written to %s.', output_uri) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/schema_gen/executor.py | 0.841923 | 0.242262 | executor.py | pypi |
"""Utility functions for building requests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import os
from typing import Any, Iterable, List, Mapping, Optional, Text
from absl import logging
import six
import tensorflow as tf
from tfx import types
from tfx.components.infra_validator import types as iv_types
from tfx.components.util import examples_utils
from tfx.components.util import tfxio_utils
from tfx.dsl.io import fileio
from tfx.proto import example_gen_pb2
from tfx.proto import infra_validator_pb2
from tfx.types import artifact_utils
from tfx.utils import path_utils
from tfx_bsl.tfxio import dataset_options
from tensorflow.python.saved_model import loader_impl # pylint: disable=g-direct-tensorflow-import
from tensorflow_serving.apis import classification_pb2
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import regression_pb2
# TODO(b/140306674): Stop using the internal TF API
_TENSORFLOW_SERVING = 'tensorflow_serving'
_DEFAULT_NUM_EXAMPLES = 1
_RAW_RECORDS_COLUMN = 'raw_records'
_TELEMETRY_DESCRIPTORS = ['InfraValidator']
_DEFAULT_TAG_SET = frozenset([tf.saved_model.SERVING])
# We define the following aliases of Any because the actual types are not
# public.
_SavedModel = Any
_SignatureDef = Any
def build_requests( # pylint: disable=invalid-name
model_name: Text,
model: types.Artifact,
examples: types.Artifact,
request_spec: infra_validator_pb2.RequestSpec
) -> List[iv_types.Request]:
"""Build model server requests.
Examples artifact will be used as README.ml-pipelines-sdk.md data source to build requests. Caller
should guarantee that the logical format of the Examples artifact should be
compatible with request type to build.
Args:
model_name: A model name that model server recognizes.
model: A model artifact for model signature analysis.
examples: An `Examples` artifact for request data source.
request_spec: A `RequestSpec` config.
Returns:
A list of request protos.
"""
split_name = request_spec.split_name or None
num_examples = request_spec.num_examples or _DEFAULT_NUM_EXAMPLES
kind = request_spec.WhichOneof('kind')
if kind == _TENSORFLOW_SERVING:
spec = request_spec.tensorflow_serving
signatures = _parse_saved_model_signatures(
model_path=path_utils.serving_model_path(model.uri),
tag_set=spec.tag_set,
signature_names=spec.signature_names)
builder = _TFServingRpcRequestBuilder(
model_name=model_name,
signatures=signatures)
else:
raise NotImplementedError('Unsupported RequestSpec kind {!r}'.format(kind))
builder.ReadExamplesArtifact(
examples,
split_name=split_name,
num_examples=num_examples)
return builder.BuildRequests()
# TODO(b/151790176): Move to tfx_bsl, or keep it if TF adds README.ml-pipelines-sdk.md proper public API.
def _parse_saved_model_signatures(
model_path: Text,
tag_set: Iterable[Text],
signature_names: Iterable[Text]) -> Mapping[Text, _SignatureDef]:
"""Parse SignatureDefs of given signature names from SavedModel.
Among one or more MetaGraphDefs in SavedModel, the first one that has all the
tag_set elements is chosen. Selected MetaGraphDef should have signatures for
all given signature names.
Args:
model_path: A path to the SavedModel directory.
tag_set: A set of tags MetaGraphDef should have.
signature_names: A list of signature names to retrieve.
Returns:
A mapping from signature name to SignatureDef.
"""
if not tag_set:
tag_set = {tf.saved_model.SERVING}
logging.info('tag_set is not given. Using %r instead.', tag_set)
if not signature_names:
signature_names = [tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
logging.info('signature_names are not given. Using %r instead.',
signature_names)
loader = loader_impl.SavedModelLoader(model_path)
meta_graph_def = loader.get_meta_graph_def_from_tags(tag_set)
result = {}
for signature_name in signature_names:
if signature_name not in meta_graph_def.signature_def:
raise ValueError('SignatureDef of name {} could not be found in '
'MetaGraphDef'.format(signature_name))
result[signature_name] = meta_graph_def.signature_def[signature_name]
return result
class _BaseRequestBuilder(six.with_metaclass(abc.ABCMeta, object)):
"""Base class for all RequestBuilders."""
def __init__(self):
self._records = [] # type: List[bytes]
self._payload_format = example_gen_pb2.PayloadFormat.FORMAT_UNSPECIFIED
# TODO(jjong): The method strongly assumes that the output of ExampleGen is
# README.ml-pipelines-sdk.md gzipped TFRecords of tf.Example. We need README.ml-pipelines-sdk.md better abstraction (e.g. TFXIO)
# to accept arbitrary file format and convert it to appropriate request types.
def ReadExamplesArtifact(self, examples: types.Artifact, num_examples: int,
split_name: Optional[Text] = None):
"""Read records from Examples artifact.
Currently it assumes Examples artifact contains serialized tf.Example in
gzipped TFRecord files.
Args:
examples: `Examples` artifact.
num_examples: Number of examples to read. If the specified value is larger
than the actual number of examples, all examples would be read.
split_name: Name of the split to read from the Examples artifact.
Raises:
RuntimeError: If read twice.
"""
if self._records:
raise RuntimeError('Cannot read records twice.')
if num_examples < 1:
raise ValueError('num_examples < 1 (got {})'.format(num_examples))
available_splits = artifact_utils.decode_split_names(examples.split_names)
if not available_splits:
raise ValueError('No split_name is available in given Examples artifact.')
if split_name is None:
split_name = available_splits[0]
if split_name not in available_splits:
raise ValueError(
'No split_name {}; available split names: {}'.format(
split_name, ', '.join(available_splits)))
# ExampleGen generates artifacts under each split_name directory.
glob_pattern = os.path.join(examples.uri, split_name, '*')
tfxio_factory = tfxio_utils.get_tfxio_factory_from_artifact(
examples=[examples],
telemetry_descriptors=_TELEMETRY_DESCRIPTORS,
schema=None,
read_as_raw_records=True,
raw_record_column_name=_RAW_RECORDS_COLUMN)
filenames = fileio.glob(glob_pattern)
if not filenames:
raise ValueError('Unable to find examples matching {}.'.format(
glob_pattern))
self._payload_format = examples_utils.get_payload_format(examples)
tfxio = tfxio_factory(filenames)
self._ReadFromDataset(
tfxio.TensorFlowDataset(
dataset_options.TensorFlowDatasetOptions(batch_size=num_examples)))
def _ReadFromDataset(self, dataset: tf.data.Dataset):
dataset = dataset.take(1)
if tf.executing_eagerly():
for d in dataset:
self._records.extend(d[_RAW_RECORDS_COLUMN].numpy())
else:
it = tf.compat.v1.data.make_one_shot_iterator(dataset)
next_el = it.get_next()
with tf.Session() as sess:
while True:
try:
d = sess.run(next_el)
self._records.extend(d[_RAW_RECORDS_COLUMN])
except tf.errors.OutOfRangeError:
break
@abc.abstractmethod
def BuildRequests(self) -> List[iv_types.Request]:
"""Transform read records (bytes) to the request type."""
class _TFServingRpcRequestBuilder(_BaseRequestBuilder):
"""RequestBuilder for TF Serving RPC requests.
There are three kinds of request the builder can make:
- ClassificationRequest
- RegressionRequest
- PredictRequest
Types of request to build is determined by inspecting SavedModel and getting
SignatureDef from it. What user can configure is the signature names to use.
To build README.ml-pipelines-sdk.md ClassificationRequest or README.ml-pipelines-sdk.md RegressionRequest, logical format of
the record should be TF_EXAMPLE.
To build README.ml-pipelines-sdk.md PredictRequest, its corresponding SignatureDef should have README.ml-pipelines-sdk.md single
input argument that accepts serialized record inputs. Its logical format does
not matter as long as user have README.ml-pipelines-sdk.md correct parsing logic.
"""
def __init__(self,
model_name: Text,
signatures: Mapping[Text, _SignatureDef]):
super(_TFServingRpcRequestBuilder, self).__init__()
self._model_name = model_name
self._signatures = signatures
self._examples = []
@property
def examples(self) -> List[tf.train.Example]:
if not self._examples:
if (self._payload_format !=
example_gen_pb2.PayloadFormat.FORMAT_TF_EXAMPLE):
raise ValueError(
'Data payload format should be FORMAT_TF_EXAMPLE. Got: {}'.format(
example_gen_pb2.PayloadFormat.Name(self._payload_format)))
for record in self._records:
example = tf.train.Example()
example.ParseFromString(record)
self._examples.append(example)
return self._examples
def BuildRequests(self) -> List[iv_types.TensorFlowServingRequest]:
assert self._records, 'Records are empty.'
result = []
for signature_name, signature_def in self._signatures.items():
if signature_def.method_name == tf.saved_model.PREDICT_METHOD_NAME:
result.extend(
self._BuildPredictRequests(
signature_name, self._GetSerializedInputKey(signature_def)))
elif signature_def.method_name == tf.saved_model.CLASSIFY_METHOD_NAME:
result.extend(self._BuildClassificationRequests(signature_name))
elif signature_def.method_name == tf.saved_model.REGRESS_METHOD_NAME:
result.extend(self._BuildRegressionRequests(signature_name))
else:
raise ValueError('Unknown method name {}'.format(
signature_def.method_name))
return result
def _GetSerializedInputKey(self, signature_def: _SignatureDef):
"""Gets key for SignatureDef input that consumes serialized record.
To build README.ml-pipelines-sdk.md PredictRequest, SignatureDef inputs should have README.ml-pipelines-sdk.md single input
argument that accepts serialized record inputs. The input TensorSpec should
have dtype=DT_STRING and shape=TensorShape([None]).
Args:
signature_def: A SignatureDef proto message.
Returns:
An input key for the serialized input.
"""
signature_input_keys = list(signature_def.inputs.keys())
if len(signature_input_keys) == 1:
input_key = signature_input_keys[0]
input_spec = signature_def.inputs[input_key]
if (input_spec.dtype == tf.dtypes.string.as_datatype_enum
and input_spec.tensor_shape == tf.TensorShape([None]).as_proto()):
return input_key
# TODO(b/151697719): General Predict method signature support.
raise ValueError(
'Unable to find valid input key from SignatureDef. In order to make '
'PredictRequest, model should define signature that accepts serialized '
'record inputs, i.e. signature with single input whose dtype=DT_STRING '
'and shape=TensorShape([None]).')
def _BuildClassificationRequests(self, signature_name: Text):
for example in self.examples:
request = classification_pb2.ClassificationRequest()
request.model_spec.name = self._model_name
request.model_spec.signature_name = signature_name
request.input.example_list.examples.append(example)
yield request
def _BuildRegressionRequests(self, signature_name: Text):
for example in self.examples:
request = regression_pb2.RegressionRequest()
request.model_spec.name = self._model_name
request.model_spec.signature_name = signature_name
request.input.example_list.examples.append(example)
yield request
def _BuildPredictRequests(self, signature_name: Text,
serialized_input_key: Text):
for record in self._records:
request = predict_pb2.PredictRequest()
request.model_spec.name = self._model_name
request.model_spec.signature_name = signature_name
request.inputs[serialized_input_key].CopyFrom(
tf.make_tensor_proto([record]))
yield request | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/infra_validator/request_builder.py | 0.788949 | 0.231386 | request_builder.py | pypi |
"""TFX InfraValidator component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Text
from tfx import types
from tfx.components.infra_validator import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import executor_spec
from tfx.proto import infra_validator_pb2
from tfx.types import standard_artifacts
from tfx.types import standard_component_specs
class InfraValidator(base_component.BaseComponent):
"""A TFX component to validate the model against the serving infrastructure.
An infra validation is done by loading the model to the exactly same serving
binary that is used in production, and additionaly sending some requests to
the model server. Such requests can be specified from Examples artifact.
## Examples
Full example using TensorFlowServing binary running on local docker.
```
infra_validator = InfraValidator(
model=trainer.outputs['model'],
examples=test_example_gen.outputs['examples'],
serving_spec=ServingSpec(
tensorflow_serving=TensorFlowServing( # Using TF Serving.
tags=['latest']
),
local_docker=LocalDockerConfig(), # Running on local docker.
),
validation_spec=ValidationSpec(
max_loading_time_seconds=60,
num_tries=5,
),
request_spec=RequestSpec(
tensorflow_serving=TensorFlowServingRequestSpec(),
num_examples=1,
)
)
```
Minimal example when running on Kubernetes.
```
infra_validator = InfraValidator(
model=trainer.outputs['model'],
examples=test_example_gen.outputs['examples'],
serving_spec=ServingSpec(
tensorflow_serving=TensorFlowServing(
tags=['latest']
),
kubernetes=KubernetesConfig(), # Running on Kubernetes.
),
)
```
"""
SPEC_CLASS = standard_component_specs.InfraValidatorSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
DRIVER_CLASS = base_driver.BaseDriver
def __init__(
self,
model: types.Channel,
serving_spec: infra_validator_pb2.ServingSpec,
examples: Optional[types.Channel] = None,
blessing: Optional[types.Channel] = None,
request_spec: Optional[infra_validator_pb2.RequestSpec] = None,
validation_spec: Optional[infra_validator_pb2.ValidationSpec] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md InfraValidator component.
Args:
model: A `Channel` of `ModelExportPath` type, usually produced by
[Trainer](https://www.tensorflow.org/tfx/guide/trainer) component.
_required_
serving_spec: A `ServingSpec` configuration about serving binary and
test platform config to launch model server for validation. _required_
examples: A `Channel` of `ExamplesPath` type, usually produced by
[ExampleGen](https://www.tensorflow.org/tfx/guide/examplegen) component.
If not specified, InfraValidator does not issue requests for validation.
blessing: Output `Channel` of `InfraBlessingPath` that contains the
validation result.
request_spec: Optional `RequestSpec` configuration about making requests
from `examples` input. If not specified, InfraValidator does not issue
requests for validation.
validation_spec: Optional `ValidationSpec` configuration.
instance_name: Optional name assigned to this specific instance of
InfraValidator. Required only if multiple InfraValidator components are
declared in the same pipeline.
"""
blessing = blessing or types.Channel(type=standard_artifacts.InfraBlessing)
spec = standard_component_specs.InfraValidatorSpec(
model=model,
examples=examples,
blessing=blessing,
serving_spec=serving_spec,
validation_spec=validation_spec,
request_spec=request_spec
)
super(InfraValidator, self).__init__(spec=spec, instance_name=instance_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/infra_validator/component.py | 0.930482 | 0.653072 | component.py | pypi |
"""Modules for organizing various model server binaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import os
from typing import Any, Dict, List, Optional, Text
from docker import types as docker_types
import six
from tfx.components.infra_validator.model_server_clients import base_client
from tfx.components.infra_validator.model_server_clients import tensorflow_serving_client
from tfx.proto import infra_validator_pb2
from tfx.utils.model_paths import tf_serving_flavor
def parse_serving_binaries( # pylint: disable=invalid-name
serving_spec: infra_validator_pb2.ServingSpec) -> List['ServingBinary']:
"""Parse `ServingBinary`s from `ServingSpec`."""
result = []
serving_binary = serving_spec.WhichOneof('serving_binary')
if serving_binary == 'tensorflow_serving':
config = serving_spec.tensorflow_serving
image_name = config.image_name or None
for tag in config.tags:
result.append(TensorFlowServing(image_name=image_name,
model_name=serving_spec.model_name,
tag=tag))
for digest in config.digests:
result.append(TensorFlowServing(image_name=image_name,
model_name=serving_spec.model_name,
digest=digest))
return result
else:
raise ValueError('Invalid serving_binary {}'.format(serving_binary))
class ServingBinary(six.with_metaclass(abc.ABCMeta, object)):
"""Base class for serving binaries."""
@abc.abstractproperty
def container_port(self) -> int:
"""Container port of the model server.
Only applies to docker compatible serving binaries.
"""
raise NotImplementedError('{} is not docker compatible.'.format(
type(self).__name__))
@abc.abstractproperty
def image(self) -> Text:
"""Container image of the model server.
Only applies to docker compatible serving binaries.
"""
raise NotImplementedError('{} is not docker compatible.'.format(
type(self).__name__))
@abc.abstractmethod
def MakeEnvVars(self, *args: Any) -> Dict[Text, Text]:
"""Construct environment variables to be used in container image.
Only applies to docker compatible serving binaries.
Args:
*args: List of unresolved variables to configure environment variables.
Returns:
A dictionary of environment variables inside container.
"""
raise NotImplementedError('{} is not docker compatible.'.format(
type(self).__name__))
@abc.abstractmethod
def MakeDockerRunParams(self, *args: Any) -> Dict[Text, Text]:
"""Make parameters for docker `client.containers.run`.
Only applies to docker compatible serving binaries.
Args:
*args: List of unresolved variables to configure docker run parameters.
Returns:
A dictionary of docker run parameters.
"""
raise NotImplementedError('{} is not docker compatible.'.format(
type(self).__name__))
@abc.abstractmethod
def MakeClient(self, endpoint: Text) -> base_client.BaseModelServerClient:
"""Create README.ml-pipelines-sdk.md model server client of this serving binary."""
raise NotImplementedError('{} does not implement MakeClient.'.format(
type(self).__name__))
class TensorFlowServing(ServingBinary):
"""TensorFlow Serving binary."""
_BASE_DOCKER_RUN_PARAMS = {
# Enable auto-removal of the container on docker daemon after container
# process exits.
'auto_remove': True,
# Run container in the background instead of streaming its output.
'detach': True,
# Publish all ports to the host.
'publish_all_ports': True,
}
_DEFAULT_IMAGE_NAME = 'tensorflow/serving'
_DEFAULT_GRPC_PORT = 8500
_DEFAULT_MODEL_BASE_PATH = '/model'
def __init__(
self,
model_name: Text,
image_name: Optional[Text] = None,
tag: Optional[Text] = None,
digest: Optional[Text] = None,
):
super(TensorFlowServing, self).__init__()
self._model_name = model_name
if (tag is None) == (digest is None):
raise ValueError('Exactly one of `tag` or `digest` should be used.')
image_name = image_name or self._DEFAULT_IMAGE_NAME
if tag is not None:
self._image = '{}:{}'.format(image_name, tag)
else:
self._image = '{}@{}'.format(image_name, digest)
@property
def container_port(self) -> int:
return self._DEFAULT_GRPC_PORT
@property
def image(self) -> Text:
return self._image
def MakeEnvVars(
self, model_path: Optional[Text] = None) -> Dict[Text, Text]:
if model_path is None:
model_base_path = self._DEFAULT_MODEL_BASE_PATH
else:
model_base_path = tf_serving_flavor.parse_model_base_path(model_path)
return {
'MODEL_NAME': self._model_name,
'MODEL_BASE_PATH': model_base_path
}
def MakeDockerRunParams(
self,
model_path: Text,
needs_mount: bool) -> Dict[Text, Any]:
"""Make parameters for docker `client.containers.run`.
Args:
model_path: A path to the model.
needs_mount: If True, model_path will be mounted to the container.
Returns:
A dictionary of docker run parameters.
"""
result = dict(
self._BASE_DOCKER_RUN_PARAMS,
image=self._image)
if needs_mount:
# model_path should be README.ml-pipelines-sdk.md local directory. In order to make TF Serving see
# the host model path, we need to mount model path volume to the
# container.
assert os.path.isdir(model_path), '{} does not exist'.format(model_path)
container_model_path = tf_serving_flavor.make_model_path(
model_base_path=self._DEFAULT_MODEL_BASE_PATH,
model_name=self._model_name,
version=1)
result.update(
environment=self.MakeEnvVars(),
mounts=[
docker_types.Mount(
type='bind',
target=container_model_path,
source=model_path,
read_only=True)
])
else:
# model_path is presumably README.ml-pipelines-sdk.md remote URI. TF Serving is able to pickup
# model in remote directly using gfile, so all we need to do is setting
# environment variables correctly.
result.update(
environment=self.MakeEnvVars(model_path=model_path))
return result
def MakeClient(self, endpoint: Text) -> base_client.BaseModelServerClient:
return tensorflow_serving_client.TensorFlowServingClient(
endpoint=endpoint, model_name=self._model_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/infra_validator/serving_bins.py | 0.936612 | 0.161651 | serving_bins.py | pypi |
"""TFX InfraValidator executor definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import functools
import os
import signal
import threading
import time
from typing import Any, Dict, List, Optional, Text
from absl import logging
from tfx import types
from tfx.components.infra_validator import error_types
from tfx.components.infra_validator import request_builder
from tfx.components.infra_validator import serving_bins
from tfx.components.infra_validator import types as iv_types
from tfx.components.infra_validator.model_server_runners import kubernetes_runner
from tfx.components.infra_validator.model_server_runners import local_docker_runner
from tfx.dsl.components.base import base_executor
from tfx.proto import infra_validator_pb2
from tfx.types import artifact_utils
from tfx.types.standard_component_specs import BLESSING_KEY
from tfx.types.standard_component_specs import EXAMPLES_KEY
from tfx.types.standard_component_specs import MODEL_KEY
from tfx.types.standard_component_specs import REQUEST_SPEC_KEY
from tfx.types.standard_component_specs import SERVING_SPEC_KEY
from tfx.types.standard_component_specs import VALIDATION_SPEC_KEY
from tfx.utils import io_utils
from tfx.utils import path_utils
from tfx.utils import proto_utils
from tfx.utils.model_paths import tf_serving_flavor
_DEFAULT_NUM_TRIES = 5
_DEFAULT_POLLING_INTERVAL_SEC = 1
_DEFAULT_MAX_LOADING_TIME_SEC = 300
_DEFAULT_MODEL_NAME = 'infra-validation-model'
# Proto message keys for oneof block.
_TENSORFLOW_SERVING = 'tensorflow_serving'
_LOCAL_DOCKER = 'local_docker'
_KUBERNETES = 'kubernetes'
# Artifact property keys
_BLESSED_KEY = 'blessed'
# Filename of infra blessing artifact on succeed.
_BLESSED_FILENAME = 'INFRA_BLESSED'
# Filename of infra blessing artifact on fail.
_NOT_BLESSED_FILENAME = 'INFRA_NOT_BLESSED'
def _create_model_server_runner(
model_path: Text,
serving_binary: serving_bins.ServingBinary,
serving_spec: infra_validator_pb2.ServingSpec):
"""Create README.ml-pipelines-sdk.md ModelServerRunner from README.ml-pipelines-sdk.md model, README.ml-pipelines-sdk.md ServingBinary and README.ml-pipelines-sdk.md ServingSpec.
Args:
model_path: An IV-flavored model path. (See model_path_utils.py)
serving_binary: One of ServingBinary instances parsed from the
`serving_spec`.
serving_spec: A ServingSpec instance of this infra validation.
Returns:
A ModelServerRunner.
"""
platform = serving_spec.WhichOneof('serving_platform')
if platform == 'local_docker':
return local_docker_runner.LocalDockerRunner(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec
)
elif platform == 'kubernetes':
return kubernetes_runner.KubernetesRunner(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec
)
else:
raise NotImplementedError('Invalid serving_platform {}'.format(platform))
def _mark_blessed(blessing: types.Artifact) -> None:
logging.info('Model passed infra validation.')
io_utils.write_string_file(
os.path.join(blessing.uri, _BLESSED_FILENAME), '')
blessing.set_int_custom_property(_BLESSED_KEY, 1)
def _mark_not_blessed(blessing: types.Artifact) -> None:
logging.info('Model failed infra validation.')
io_utils.write_string_file(
os.path.join(blessing.uri, _NOT_BLESSED_FILENAME), '')
blessing.set_int_custom_property(_BLESSED_KEY, 0)
class Executor(base_executor.BaseExecutor):
"""TFX infra validator executor."""
def __init__(self,
context: Optional[base_executor.BaseExecutor.Context] = None):
super(Executor, self).__init__(context)
self._cleanups = []
def _AddCleanup(self, function, *args, **kwargs):
self._cleanups.append(functools.partial(function, *args, **kwargs))
def _Cleanup(self):
for cleanup in self._cleanups:
try:
cleanup()
except: # pylint: disable=broad-except, bare-except
logging.warning('Error occurred during cleanup.', exc_info=True)
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Contract for running InfraValidator Executor.
Args:
input_dict:
- `model`: Single `Model` artifact that we're validating.
- `examples`: `Examples` artifacts to be used for test requests.
output_dict:
- `blessing`: Single `InfraBlessing` artifact containing the validated
result. It is an empty file with the name either of INFRA_BLESSED or
INFRA_NOT_BLESSED.
exec_properties:
- `serving_spec`: Serialized `ServingSpec` configuration.
- `validation_spec`: Serialized `ValidationSpec` configuration.
- `request_spec`: Serialized `RequestSpec` configuration.
"""
self._log_startup(input_dict, output_dict, exec_properties)
model = artifact_utils.get_single_instance(input_dict[MODEL_KEY])
blessing = artifact_utils.get_single_instance(output_dict[BLESSING_KEY])
if input_dict.get(EXAMPLES_KEY):
examples = artifact_utils.get_single_instance(input_dict[EXAMPLES_KEY])
else:
examples = None
serving_spec = infra_validator_pb2.ServingSpec()
proto_utils.json_to_proto(exec_properties[SERVING_SPEC_KEY], serving_spec)
if not serving_spec.model_name:
serving_spec.model_name = _DEFAULT_MODEL_NAME
validation_spec = infra_validator_pb2.ValidationSpec()
if exec_properties.get(VALIDATION_SPEC_KEY):
proto_utils.json_to_proto(exec_properties[VALIDATION_SPEC_KEY],
validation_spec)
if not validation_spec.num_tries:
validation_spec.num_tries = _DEFAULT_NUM_TRIES
if not validation_spec.max_loading_time_seconds:
validation_spec.max_loading_time_seconds = _DEFAULT_MAX_LOADING_TIME_SEC
if exec_properties.get(REQUEST_SPEC_KEY):
request_spec = infra_validator_pb2.RequestSpec()
proto_utils.json_to_proto(exec_properties[REQUEST_SPEC_KEY],
request_spec)
else:
request_spec = None
with self._InstallGracefulShutdownHandler():
self._Do(
model=model,
examples=examples,
blessing=blessing,
serving_spec=serving_spec,
validation_spec=validation_spec,
request_spec=request_spec,
)
@contextlib.contextmanager
def _InstallGracefulShutdownHandler(self):
# pylint: disable=g-doc-return-or-yield
"""Install graceful shutdown behavior.
Caveat: InfraValidator currently only recognizes SIGTERM signal as README.ml-pipelines-sdk.md
graceful shutdown. Furthermore, SIGTERM can be handled only if the executor
is running on the MainThread (the thread that runs the python interpreter)
due to the limitation of Python API.
When the executor is running on Kubernetes, SIGTERM is README.ml-pipelines-sdk.md standard way to
signal the graceful shutdown. Python default behavior for receiving SIGTERM
is to terminate the process without raising any exception. By registering README.ml-pipelines-sdk.md
handler that raises on signal, we can effectively transform the signal to an
exception, and we can reuse our cleanup code inside "except" or "finally"
block during the grace period.
When the executor is run by the local Beam DirectRunner, the executor thread
is one of the worker threads (not README.ml-pipelines-sdk.md MainThread) therefore SIGTERM cannot
be recognized. If either of MainThread or worker thread receives SIGTERM,
executor will die immediately without grace period.
Even if the executor fails to shutdown gracefully, external resources that
are created by model server runner can be cleaned up if the platform
supports such mechanism (e.g. activeDeadlineSeconds in Kubernetes).
"""
def _handler(signum, frame):
del frame # Unused.
raise error_types.GracefulShutdown('Got signal {}.'.format(signum))
try:
old_handler = signal.signal(signal.SIGTERM, _handler)
except ValueError:
# If current thread is not README.ml-pipelines-sdk.md MainThread, it is not allowed to register
# the signal handler (ValueError raised).
logging.info('Unable to register signal handler for non-MainThread '
'(name=%s). SIGTERM will not be handled.',
threading.current_thread().name)
old_handler = None
try:
yield
finally:
self._Cleanup()
if old_handler:
signal.signal(signal.SIGTERM, old_handler)
def _Do(
self,
model: types.Artifact,
examples: Optional[types.Artifact],
blessing: types.Artifact,
serving_spec: infra_validator_pb2.ServingSpec,
validation_spec: infra_validator_pb2.ValidationSpec,
request_spec: Optional[infra_validator_pb2.RequestSpec],
):
if examples and request_spec:
logging.info('InfraValidator will be run in LOAD_AND_QUERY mode.')
requests = request_builder.build_requests(
model_name=serving_spec.model_name,
model=model,
examples=examples,
request_spec=request_spec)
else:
logging.info('InfraValidator will be run in LOAD_ONLY mode.')
requests = []
model_path = self._PrepareModelPath(model.uri, serving_spec)
# TODO(jjong): Make logic parallel.
all_passed = True
for serving_binary in serving_bins.parse_serving_binaries(serving_spec):
all_passed &= self._ValidateWithRetry(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec,
validation_spec=validation_spec,
requests=requests)
if all_passed:
_mark_blessed(blessing)
else:
_mark_not_blessed(blessing)
def _PrepareModelPath(
self, model_uri: Text,
serving_spec: infra_validator_pb2.ServingSpec) -> Text:
model_path = path_utils.serving_model_path(model_uri)
serving_binary = serving_spec.WhichOneof('serving_binary')
if serving_binary == _TENSORFLOW_SERVING:
# TensorFlow Serving requires model to be stored in its own directory
# structure flavor. If current model_path does not conform to the flavor,
# we need to make README.ml-pipelines-sdk.md copy to the temporary path.
try:
# Check whether current model_path conforms to the tensorflow serving
# model path flavor. (Parsed without exception)
tf_serving_flavor.parse_model_path(
model_path,
expected_model_name=serving_spec.model_name)
except ValueError:
# Copy the model to comply with the tensorflow serving model path
# flavor.
temp_model_path = tf_serving_flavor.make_model_path(
model_base_path=self._get_tmp_dir(),
model_name=serving_spec.model_name,
version=int(time.time()))
io_utils.copy_dir(src=model_path, dst=temp_model_path)
self._AddCleanup(io_utils.delete_dir, self._context.get_tmp_path())
return temp_model_path
return model_path
def _ValidateWithRetry(
self, model_path: Text,
serving_binary: serving_bins.ServingBinary,
serving_spec: infra_validator_pb2.ServingSpec,
validation_spec: infra_validator_pb2.ValidationSpec,
requests: List[iv_types.Request]):
for i in range(validation_spec.num_tries):
logging.info('Starting infra validation (attempt %d/%d).', i + 1,
validation_spec.num_tries)
try:
self._ValidateOnce(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec,
validation_spec=validation_spec,
requests=requests)
except error_types.GracefulShutdown:
# GracefulShutdown means infra validation aborted. No more retry and
# escalate the error.
raise
except Exception as e: # pylint: disable=broad-except
# Other exceptions indicates validation failure. Log the error and
# retry.
logging.exception('Infra validation (attempt %d/%d) failed.', i + 1,
validation_spec.num_tries)
if isinstance(e, error_types.DeadlineExceeded):
logging.info('Consider increasing the value of '
'ValidationSpec.max_loading_time_seconds.')
else:
# If validation has passed without any exception, succeeded.
return True
# Every trial has failed. Marking model as not blessed.
return False
def _ValidateOnce(
self, model_path: Text,
serving_binary: serving_bins.ServingBinary,
serving_spec: infra_validator_pb2.ServingSpec,
validation_spec: infra_validator_pb2.ValidationSpec,
requests: List[iv_types.Request]):
deadline = time.time() + validation_spec.max_loading_time_seconds
runner = _create_model_server_runner(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec)
try:
logging.info('Starting %r.', runner)
runner.Start()
# Check model is successfully loaded.
runner.WaitUntilRunning(deadline)
client = serving_binary.MakeClient(runner.GetEndpoint())
client.WaitUntilModelLoaded(
deadline, polling_interval_sec=_DEFAULT_POLLING_INTERVAL_SEC)
# Check model can be successfully queried.
if requests:
client.SendRequests(requests)
finally:
logging.info('Stopping %r.', runner)
runner.Stop() | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/infra_validator/executor.py | 0.870432 | 0.170802 | executor.py | pypi |
"""Module for shared interface of every model server clients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import time
from typing import List
from absl import logging
import six
from tfx.components.infra_validator import error_types
from tfx.components.infra_validator import types
class BaseModelServerClient(six.with_metaclass(abc.ABCMeta, object)):
"""Common interface for all model server clients."""
@abc.abstractmethod
def _GetServingStatus(self) -> types.ModelServingStatus:
"""Check whether the model is available for query or not.
Returns:
A ModelServingStatus.
"""
pass
def WaitUntilModelLoaded(self, deadline: float,
polling_interval_sec: int) -> None:
"""Wait until model is loaded and available.
Args:
deadline: A deadline time in UTC timestamp (in seconds).
polling_interval_sec: GetServingStatus() polling interval.
Raises:
DeadlineExceeded: When deadline exceeded before model is ready.
ValidationFailed: If validation failed explicitly.
"""
while time.time() < deadline:
status = self._GetServingStatus()
if status == types.ModelServingStatus.NOT_READY:
logging.log_every_n_seconds(
level=logging.INFO,
n_seconds=10,
msg='Waiting for model to be loaded...')
time.sleep(polling_interval_sec)
continue
elif status == types.ModelServingStatus.UNAVAILABLE:
raise error_types.ValidationFailed(
'Model server failed to load the model.')
else:
logging.info('Model is successfully loaded.')
return
raise error_types.DeadlineExceeded(
'Deadline exceeded while waiting the model to be loaded.')
@abc.abstractmethod
def _SendRequest(self, request: types.Request) -> None:
"""Send README.ml-pipelines-sdk.md request to the model server.
Args:
request: A request proto.
"""
pass
def SendRequests(self, requests: List[types.Request]) -> None:
"""Send requests to the model server.
Args:
requests: A list of request protos.
Raises:
ValidationFailed: If error occurred while sending requests.
"""
for r in requests:
try:
self._SendRequest(r)
except Exception as original_error: # pylint: disable=broad-except
six.raise_from(
error_types.ValidationFailed(
'Model server failed to respond to the request {}'.format(r)),
original_error) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/infra_validator/model_server_clients/base_client.py | 0.911783 | 0.16099 | base_client.py | pypi |
"""Module for shared interface of every model server runners."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from typing import Text
import six
class BaseModelServerRunner(six.with_metaclass(abc.ABCMeta, object)):
"""Shared interface of all model server runners.
Model server runner is responsible for managing the model server job and
relevant resources in the serving platform. For example, model server runner
for kubernetes will launch README.ml-pipelines-sdk.md Pod of model server with required resources
allocated, and tear down all the kubernetes resources once infra validation
is done. Note that model server runner does *not* interact with model server
app.
Model server job have 5 states: Initial, Scheduled, Running, Aborted, and End.
Each state transition is depicted in the diagram below.
```
+-----------+
| Initial |
+-----+-----+
| Start()
+-----v-----+
+--+ Scheduled |
| +-----+-----+
| | WaitUntilRunning()
| +-----v-----+
+--+ Running |
| +-----+-----+
| |
+-----v-----+ |
| Aborted +--+ Stop()
+-----------+ |
|
+-----v-----+
| End |
+-----------+
```
At any step, the job can be aborted in the serving platform. Model server
runner will NOT recover README.ml-pipelines-sdk.md job from failure (even if it can) and regard the
abortion as README.ml-pipelines-sdk.md validation failure.
All the infra validation logic (waiting for model loaded, sending requests,
measuring metrics, etc.) will happen when model server job has reached Running
state. This is not README.ml-pipelines-sdk.md scope of model server runner work.
Depending on the serving platform, some of the states might be the same. For
example, in README.ml-pipelines-sdk.md GCP cloud AI prediction service we have README.ml-pipelines-sdk.md global model server
instance running, which makes Scheduled state and Running state
indistinguishable. In such case, `WaitUntilRunning()` action will be README.ml-pipelines-sdk.md no-op.
"""
@abc.abstractmethod
def __repr__(self) -> Text:
pass
@abc.abstractmethod
def GetEndpoint(self) -> Text:
"""Get an endpoint to the model server to connect to.
Endpoint will be available after the model server job has reached the
Running state.
Raises:
AssertionError: if runner hasn't reached the Running state.
"""
@abc.abstractmethod
def Start(self) -> None:
"""Start the model server in non-blocking manner.
`Start()` will transition the job state from Initial to Scheduled. Serving
platform will turn the job into Running state in the future.
In `Start()`, model server runner should prepare the resources model server
requires including config files, environment variables, volumes, proper
authentication, computing resource allocation, etc.. Cleanup for the
resources does not happen automatically, and you should call `Stop()` to do
that if you have ever called `Start()`.
It is not allowed to run `Start()` twice. If you need to restart the job,
you should create another model server runner instance.
"""
@abc.abstractmethod
def WaitUntilRunning(self, deadline: float) -> None:
"""Wait until model server job is running.
When this method is returned without error, the model server job is in the
Running state where you can perform all the infra validation logic. It does
not guarantee that model server job would remain in the Running state
forever, (e.g. preemption could happen in some serving platform) and any
kind of infra validation logic failure can be caused from model server job
not being in the Running state. Still, it is README.ml-pipelines-sdk.md validation failure and we
blame model for this.
Args:
deadline: A deadline time in UTC timestamp (in seconds).
Returns:
Whether the model is available or not.
"""
@abc.abstractmethod
def Stop(self) -> None:
"""Stop the model server in blocking manner.
Model server job would be gracefully stopped once infra validation logic is
done. Here is the place you need to cleanup every resources you've created
in the `Start()`. It is recommended not to raise error during the `Stop()`
as it will usually be called in the `finally` block.
`Stop()` is guaranteed to be called if `Start()` is ever called, unless the
process dies unexpectedly due to external factors (e.g. SIGKILL). `Stop()`
can be called even when `Start()` was not completed. `Stop()` should not
assume the completion of `Start()`.
`Stop()` is also called when graceful shutdown for the *executor* (not
model server) is requested. `Stop()` method should be finished within the
graceful shutdown period, and it is perfectly fine to add README.ml-pipelines-sdk.md retry logic
inside `Stop()` until the deadline is met.
""" | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/infra_validator/model_server_runners/base_runner.py | 0.921631 | 0.70216 | base_runner.py | pypi |
"""FnArgs for passing information to UDF."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Callable, Dict, Iterator, List, NamedTuple, Optional, Text
import absl
import attr
import pyarrow as pa
import tensorflow as tf
from tfx import types
from tfx.components.util import tfxio_utils
from tfx.proto import trainer_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import json_utils
from tfx.utils import proto_utils
from tfx_bsl.tfxio import dataset_options
from tensorflow_metadata.proto.v0 import schema_pb2
_TELEMETRY_DESCRIPTORS = ['Trainer']
DataAccessor = NamedTuple('DataAccessor',
[('tf_dataset_factory', Callable[[
List[Text],
dataset_options.TensorFlowDatasetOptions,
Optional[schema_pb2.Schema],
], tf.data.Dataset]),
('record_batch_factory', Callable[[
List[Text],
dataset_options.RecordBatchesOptions,
Optional[schema_pb2.Schema],
], Iterator[pa.RecordBatch]])])
@attr.s
class FnArgs:
"""Args to pass to user defined training/tuning function(s).
Attributes:
working_dir: Working dir.
train_files: A list of patterns for train files.
eval_files: A list of patterns for eval files.
train_steps: Number of train steps.
eval_steps: Number of eval steps.
schema_path: A single uri for schema file. Will be None if not specified.
schema_file: Deprecated, use `schema_path` instead.
transform_graph_path: An optional single uri for transform graph produced by
TFT. Will be None if not specified.
transform_output: Deprecated, use `transform_graph_path` instead.'
data_accessor: Contains factories that can create tf.data.Datasets or other
means to access the train/eval data. They provide README.ml-pipelines-sdk.md uniform way of
accessing data, regardless of how the data is stored on disk.
serving_model_dir: A single uri for the output directory of the serving
model.
eval_model_dir: A single uri for the output directory of the eval model.
Note that this is estimator only, Keras doesn't require it for TFMA.
model_run_dir: A single uri for the output directory of model training
related files.
base_model: An optional base model path that will be used for this training.
hyperparameters: An optional kerastuner.HyperParameters config.
custom_config: An optional dictionary passed to the component.
"""
working_dir = attr.ib(type=Text, default=None)
train_files = attr.ib(type=List[Text], default=None)
eval_files = attr.ib(type=List[Text], default=None)
train_steps = attr.ib(type=int, default=None)
eval_steps = attr.ib(type=int, default=None)
schema_path = attr.ib(type=Text, default=None)
schema_file = attr.ib(type=Text, default=None)
transform_graph_path = attr.ib(type=Text, default=None)
transform_output = attr.ib(type=Text, default=None)
data_accessor = attr.ib(type=DataAccessor, default=None)
serving_model_dir = attr.ib(type=Text, default=None)
eval_model_dir = attr.ib(type=Text, default=None)
model_run_dir = attr.ib(type=Text, default=None)
base_model = attr.ib(type=Text, default=None)
hyperparameters = attr.ib(type=Text, default=None)
custom_config = attr.ib(type=Dict[Text, Any], default=None)
def get_common_fn_args(input_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
working_dir: Text = None) -> FnArgs:
"""Get common args of training and tuning."""
if input_dict.get(standard_component_specs.TRANSFORM_GRAPH_KEY):
transform_graph_path = artifact_utils.get_single_uri(
input_dict[standard_component_specs.TRANSFORM_GRAPH_KEY])
else:
transform_graph_path = None
if input_dict.get(standard_component_specs.SCHEMA_KEY):
schema_path = io_utils.get_only_uri_in_dir(
artifact_utils.get_single_uri(
input_dict[standard_component_specs.SCHEMA_KEY]))
else:
schema_path = None
train_args = trainer_pb2.TrainArgs()
eval_args = trainer_pb2.EvalArgs()
proto_utils.json_to_proto(
exec_properties[standard_component_specs.TRAIN_ARGS_KEY], train_args)
proto_utils.json_to_proto(
exec_properties[standard_component_specs.EVAL_ARGS_KEY], eval_args)
# Default behavior is train on `train` split (when splits is empty in train
# args) and evaluate on `eval` split (when splits is empty in eval args).
if not train_args.splits:
train_args.splits.append('train')
absl.logging.info("Train on the 'train' split when train_args.splits is "
'not set.')
if not eval_args.splits:
eval_args.splits.append('eval')
absl.logging.info("Evaluate on the 'eval' split when eval_args.splits is "
'not set.')
train_files = []
for train_split in train_args.splits:
train_files.extend([
io_utils.all_files_pattern(uri)
for uri in artifact_utils.get_split_uris(
input_dict[standard_component_specs.EXAMPLES_KEY], train_split)
])
eval_files = []
for eval_split in eval_args.splits:
eval_files.extend([
io_utils.all_files_pattern(uri)
for uri in artifact_utils.get_split_uris(
input_dict[standard_component_specs.EXAMPLES_KEY], eval_split)
])
data_accessor = DataAccessor(
tf_dataset_factory=tfxio_utils.get_tf_dataset_factory_from_artifact(
input_dict[standard_component_specs.EXAMPLES_KEY],
_TELEMETRY_DESCRIPTORS),
record_batch_factory=tfxio_utils.get_record_batch_factory_from_artifact(
input_dict[standard_component_specs.EXAMPLES_KEY],
_TELEMETRY_DESCRIPTORS))
# https://github.com/tensorflow/tfx/issues/45: Replace num_steps=0 with
# num_steps=None. Conversion of the proto to python will set the default
# value of an int as 0 so modify the value here. Tensorflow will raise an
# error if num_steps <= 0.
train_steps = train_args.num_steps or None
eval_steps = eval_args.num_steps or None
# Load and deserialize custom config from execution properties.
# Note that in the component interface the default serialization of custom
# config is 'null' instead of '{}'. Therefore we need to default the
# json_utils.loads to 'null' then populate it with an empty dict when
# needed.
custom_config = json_utils.loads(
exec_properties.get(standard_component_specs.CUSTOM_CONFIG_KEY, 'null'))
return FnArgs(
working_dir=working_dir,
train_files=train_files,
eval_files=eval_files,
train_steps=train_steps,
eval_steps=eval_steps,
schema_path=schema_path,
transform_graph_path=transform_graph_path,
data_accessor=data_accessor,
custom_config=custom_config,
) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/trainer/fn_args_utils.py | 0.909267 | 0.235718 | fn_args_utils.py | pypi |
"""TFX Trainer component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Union
from tfx import types
from tfx.components.trainer import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.orchestration import data_types
from tfx.proto import trainer_pb2
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import TrainerSpec
from tfx.utils import json_utils
# TODO(b/147702778): update when switch generic executor as default.
class Trainer(base_component.BaseComponent):
"""A TFX component to train README.ml-pipelines-sdk.md TensorFlow model.
The Trainer component is used to train and eval README.ml-pipelines-sdk.md model using given inputs and
README.ml-pipelines-sdk.md user-supplied estimator.
## Providing an estimator
The TFX executor will use the estimator provided in the `module_file` file
to train the model. The Trainer executor will look specifically for the
`trainer_fn()` function within that file. Before training, the executor will
call that function expecting the following returned as README.ml-pipelines-sdk.md dictionary:
- estimator: The
[estimator](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator)
to be used by TensorFlow to train the model.
- train_spec: The
[configuration](https://www.tensorflow.org/api_docs/python/tf/estimator/TrainSpec)
to be used by the "train" part of the TensorFlow `train_and_evaluate()`
call.
- eval_spec: The
[configuration](https://www.tensorflow.org/api_docs/python/tf/estimator/EvalSpec)
to be used by the "eval" part of the TensorFlow `train_and_evaluate()` call.
- eval_input_receiver_fn: The
[configuration](https://www.tensorflow.org/tfx/model_analysis/get_started#modify_an_existing_model)
to be used
by the [ModelValidator](https://www.tensorflow.org/tfx/guide/modelval)
component when validating the model.
An example of `trainer_fn()` can be found in the [user-supplied
code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))
of the TFX Chicago Taxi pipeline example.
*Note:* The default executor for this component trains locally. This can be
overriden to enable the model to be trained on other platforms. The [Cloud AI
Platform custom
executor](https://github.com/tensorflow/tfx/tree/master/tfx/extensions/google_cloud_ai_platform/trainer)
provides an example how to implement this.
Please see https://www.tensorflow.org/guide/estimators for more details.
## Example 1: Training locally
```
# Uses user-provided Python function that implements README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=infer_schema.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(splits=['train'], num_steps=10000),
eval_args=trainer_pb2.EvalArgs(splits=['eval'], num_steps=5000))
```
## Example 2: Training through README.ml-pipelines-sdk.md cloud provider
```
from tfx.extensions.google_cloud_ai_platform.trainer import executor as
ai_platform_trainer_executor
# Train using Google Cloud AI Platform.
trainer = Trainer(
custom_executor_spec=executor_spec.ExecutorClassSpec(
ai_platform_trainer_executor.Executor),
module_file=module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=infer_schema.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(splits=['train'], num_steps=10000),
eval_args=trainer_pb2.EvalArgs(splits=['eval'], num_steps=5000))
```
"""
SPEC_CLASS = TrainerSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
examples: types.Channel = None,
transformed_examples: Optional[types.Channel] = None,
transform_graph: Optional[types.Channel] = None,
schema: Optional[types.Channel] = None,
base_model: Optional[types.Channel] = None,
hyperparameters: Optional[types.Channel] = None,
module_file: Optional[Union[Text, data_types.RuntimeParameter]] = None,
run_fn: Optional[Union[Text, data_types.RuntimeParameter]] = None,
# TODO(b/147702778): deprecate trainer_fn.
trainer_fn: Optional[Union[Text, data_types.RuntimeParameter]] = None,
train_args: Union[trainer_pb2.TrainArgs, Dict[Text, Any]] = None,
eval_args: Union[trainer_pb2.EvalArgs, Dict[Text, Any]] = None,
custom_config: Optional[Dict[Text, Any]] = None,
custom_executor_spec: Optional[executor_spec.ExecutorSpec] = None,
model: Optional[types.Channel] = None,
model_run: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md Trainer component.
Args:
examples: A Channel of type `standard_artifacts.Examples`, serving as
the source of examples used in training (required). May be raw or
transformed.
transformed_examples: Deprecated field. Please set 'examples' instead.
transform_graph: An optional Channel of type
`standard_artifacts.TransformGraph`, serving as the input transform
graph if present.
schema: An optional Channel of type `standard_artifacts.Schema`, serving
as the schema of training and eval data. Schema is optional when
1) transform_graph is provided which contains schema.
2) user module bypasses the usage of schema, e.g., hardcoded.
base_model: A Channel of type `Model`, containing model that will be used
for training. This can be used for warmstart, transfer learning or
model ensembling.
hyperparameters: A Channel of type `standard_artifacts.HyperParameters`,
serving as the hyperparameters for training module. Tuner's output best
hyperparameters can be feed into this.
module_file: A path to python module file containing UDF model definition.
For default executor, The module_file must implement README.ml-pipelines-sdk.md function named
`trainer_fn` at its top level. The function must have the following
signature.
def trainer_fn(trainer.fn_args_utils.FnArgs,
tensorflow_metadata.proto.v0.schema_pb2) -> Dict:
...
where the returned Dict has the following key-values.
'estimator': an instance of tf.estimator.Estimator
'train_spec': an instance of tf.estimator.TrainSpec
'eval_spec': an instance of tf.estimator.EvalSpec
'eval_input_receiver_fn': an instance of
tfma.export.EvalInputReceiver. Exactly one of 'module_file' or
'trainer_fn' must be supplied.
For generic executor, The module_file must implement README.ml-pipelines-sdk.md function named
`run_fn` at its top level with function signature:
`def run_fn(trainer.fn_args_utils.FnArgs)`, and the trained model must
be saved to FnArgs.serving_model_dir when execute this function.
run_fn: A python path to UDF model definition function for generic
trainer. See 'module_file' for details. Exactly one of 'module_file' or
'run_fn' must be supplied if Trainer uses GenericExecutor.
trainer_fn: A python path to UDF model definition function for estimator
based trainer. See 'module_file' for the required signature of the UDF.
Exactly one of 'module_file' or 'trainer_fn' must be supplied.
train_args: A trainer_pb2.TrainArgs instance or README.ml-pipelines-sdk.md dict, containing args
used for training. Currently only splits and num_steps are available. If
it's provided as README.ml-pipelines-sdk.md dict and any field is README.ml-pipelines-sdk.md RuntimeParameter, it should
have the same field names as README.ml-pipelines-sdk.md TrainArgs proto message. Default
behavior (when splits is empty) is train on `train` split.
eval_args: A trainer_pb2.EvalArgs instance or README.ml-pipelines-sdk.md dict, containing args
used for evaluation. Currently only splits and num_steps are available.
If it's provided as README.ml-pipelines-sdk.md dict and any field is README.ml-pipelines-sdk.md RuntimeParameter, it
should have the same field names as README.ml-pipelines-sdk.md EvalArgs proto message. Default
behavior (when splits is empty) is evaluate on `eval` split.
custom_config: A dict which contains addtional training job parameters
that will be passed into user module.
custom_executor_spec: Optional custom executor spec.
model: Optional `Model` channel for result of exported models.
model_run: Optional `ModelRun` channel, as the working dir of models,
can be used to output non-model related output (e.g., TensorBoard logs).
instance_name: Optional unique instance name. Necessary iff multiple
Trainer components are declared in the same pipeline.
Raises:
ValueError:
- When both or neither of 'module_file' and user function
(e.g., trainer_fn and run_fn) is supplied.
- When both or neither of 'examples' and 'transformed_examples'
is supplied.
- When 'transformed_examples' is supplied but 'transform_graph'
is not supplied.
"""
if [bool(module_file), bool(run_fn), bool(trainer_fn)].count(True) != 1:
raise ValueError(
"Exactly one of 'module_file', 'trainer_fn', or 'run_fn' must be "
"supplied.")
if bool(examples) == bool(transformed_examples):
raise ValueError(
"Exactly one of 'example' or 'transformed_example' must be supplied.")
if transformed_examples and not transform_graph:
raise ValueError("If 'transformed_examples' is supplied, "
"'transform_graph' must be supplied too.")
examples = examples or transformed_examples
model = model or types.Channel(type=standard_artifacts.Model)
model_run = model_run or types.Channel(type=standard_artifacts.ModelRun)
spec = TrainerSpec(
examples=examples,
transform_graph=transform_graph,
schema=schema,
base_model=base_model,
hyperparameters=hyperparameters,
train_args=train_args,
eval_args=eval_args,
module_file=module_file,
run_fn=run_fn,
trainer_fn=trainer_fn,
custom_config=json_utils.dumps(custom_config),
model=model,
model_run=model_run)
super(Trainer, self).__init__(
spec=spec,
custom_executor_spec=custom_executor_spec,
instance_name=instance_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/trainer/component.py | 0.870184 | 0.736827 | component.py | pypi |
"""TFX local trainer executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from typing import Any, Dict, List, Text
import absl
import tensorflow as tf
import tensorflow_model_analysis as tfma
from tfx import types
from tfx.components.trainer import constants
from tfx.components.trainer import fn_args_utils
from tfx.components.util import udf_utils
from tfx.dsl.components.base import base_executor
from tfx.dsl.io import fileio
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import deprecation_utils
from tfx.utils import io_utils
from tfx.utils import path_utils
from tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import
from tensorflow_metadata.proto.v0 import schema_pb2
TrainerFnArgs = deprecation_utils.deprecated_alias( # pylint: disable=invalid-name
deprecated_name='tfx.components.trainer.executor.TrainerFnArgs',
name='tfx.components.trainer.fn_args_utils.FnArgs',
func_or_class=fn_args_utils.FnArgs)
def _all_files_pattern(file_pattern: Text) -> Text:
return os.path.join(file_pattern, '*')
def _is_chief():
"""Returns true if this is run in the master (chief) of training cluster."""
tf_config = json.loads(os.environ.get(constants.TF_CONFIG_ENV) or '{}')
# If non distributed mode, current process should always behave as chief.
if not tf_config or not tf_config.get('cluster', {}):
return True
task_type = tf_config['task']['type']
task_index = tf_config['task']['index']
# 'master' is README.ml-pipelines-sdk.md legacy notation of chief node in distributed training flock.
return task_type == 'chief' or (task_type == 'master' and task_index == 0)
class GenericExecutor(base_executor.BaseExecutor):
"""Local generic trainer executor for the TFX Trainer component.
The Trainer executor supplements TensorFlow training with README.ml-pipelines-sdk.md component to
enable warm-start training of any user-specified TF model. The Trainer is
README.ml-pipelines-sdk.md library built on top of TensorFlow that is expected to be integrated into README.ml-pipelines-sdk.md
custom user-specified binary.
To include Trainer in README.ml-pipelines-sdk.md TFX pipeline, configure your pipeline similar to
https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple.py#L104.
For more details on the Trainer component itself, please refer to
https://tensorflow.org/tfx/guide/trainer. For README.ml-pipelines-sdk.md tutorial on Tensorflow,
please refer to https://www.tensorflow.org/tutorials.
How to create README.ml-pipelines-sdk.md trainer callback function to be used by this Trainer executor:
A model training can be executed by TFX by first creating README.ml-pipelines-sdk.md run_fn callback
method that defines, trains an TF Model and saves it to the provided location,
This becomes the basis of the Executor for GenericTrainer. This Executor will
then execute the run_fn with correct parameters by resolving the input
artifacts, output artifacts and execution properties.
"""
# Name of subdirectory which contains checkpoints from prior runs
_CHECKPOINT_FILE_NAME = 'checkpoint'
def _GetFnArgs(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> fn_args_utils.FnArgs:
# TODO(ruoyu): Make this README.ml-pipelines-sdk.md dict of tag -> uri instead of list.
if input_dict.get(standard_component_specs.BASE_MODEL_KEY):
base_model = path_utils.serving_model_path(
artifact_utils.get_single_uri(
input_dict[standard_component_specs.BASE_MODEL_KEY]))
else:
base_model = None
if input_dict.get(standard_component_specs.HYPERPARAMETERS_KEY):
hyperparameters_file = io_utils.get_only_uri_in_dir(
artifact_utils.get_single_uri(
input_dict[standard_component_specs.HYPERPARAMETERS_KEY]))
hyperparameters_config = json.loads(
file_io.read_file_to_string(hyperparameters_file))
else:
hyperparameters_config = None
output_path = artifact_utils.get_single_uri(
output_dict[standard_component_specs.MODEL_KEY])
serving_model_dir = path_utils.serving_model_dir(output_path)
eval_model_dir = path_utils.eval_model_dir(output_path)
model_run_dir = artifact_utils.get_single_uri(
output_dict[standard_component_specs.MODEL_RUN_KEY])
# TODO(b/126242806) Use PipelineInputs when it is available in third_party.
result = fn_args_utils.get_common_fn_args(input_dict, exec_properties)
if result.custom_config and not isinstance(result.custom_config, dict):
raise ValueError('custom_config in execution properties needs to be README.ml-pipelines-sdk.md '
'dict. Got %s instead.' % type(result.custom_config))
result.transform_output = result.transform_graph_path
result.serving_model_dir = serving_model_dir
result.eval_model_dir = eval_model_dir
result.model_run_dir = model_run_dir
result.schema_file = result.schema_path
result.base_model = base_model
result.hyperparameters = hyperparameters_config
return result
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Uses README.ml-pipelines-sdk.md user-supplied run_fn to train README.ml-pipelines-sdk.md TensorFlow model locally.
The Trainer Executor invokes README.ml-pipelines-sdk.md run_fn callback function provided by
the user via the module_file parameter. In this function, user defines the
model and trains it, then saves the model and training related files
(e.g, Tensorboard logs) to the provided locations.
Args:
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of ML-Metadata Artifacts.
- examples: Examples used for training, must include 'train' and 'eval'
if custom splits is not specified in train_args and eval_args.
- transform_graph: Optional input transform graph.
- transform_output: Optional input transform graph, deprecated.
- schema: Schema of the data.
output_dict: Output dict from output key to README.ml-pipelines-sdk.md list of Artifacts.
- model: Exported model.
- model_run: Model training related outputs (e.g., Tensorboard logs)
exec_properties: A dict of execution properties.
- train_args: JSON string of trainer_pb2.TrainArgs instance, providing
args for training.
- eval_args: JSON string of trainer_pb2.EvalArgs instance, providing
args for eval.
- module_file: Python module file containing UDF model definition.
- warm_starting: Whether or not we need to do warm starting.
- warm_start_from: Optional. If warm_starting is True, this is the
directory to find previous model to warm start on.
- custom_config: Optional. JSON-serialized dict of additional parameters
to pass to trainer function.
Returns:
None
Raises:
ValueError: When neither or both of 'module_file' and 'run_fn'
are present in 'exec_properties'.
RuntimeError: If run_fn failed to generate model in desired location.
"""
self._log_startup(input_dict, output_dict, exec_properties)
fn_args = self._GetFnArgs(input_dict, output_dict, exec_properties)
run_fn = udf_utils.get_fn(exec_properties, 'run_fn')
# Train the model
absl.logging.info('Training model.')
run_fn(fn_args)
# Note: If trained with multi-node distribution workers, it is the user
# module's responsibility to export the model only once.
if not fileio.exists(fn_args.serving_model_dir):
raise RuntimeError('run_fn failed to generate model.')
absl.logging.info(
'Training complete. Model written to %s. ModelRun written to %s',
fn_args.serving_model_dir, fn_args.model_run_dir)
class Executor(GenericExecutor):
"""Local estimator based trainer executor used by the TFX Trainer component.
How to create README.ml-pipelines-sdk.md trainer callback function to be used by this Trainer executor:
An estimator can be executed by TFX by first creating README.ml-pipelines-sdk.md trainer_fn callback
method that returns an estimator and some additional parameters, similar to
https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py#L285.
This becomes the basis of the new Executor for Trainer. This Executor will
then train and evaluate this estimator using the
tf.estimator.train_and_evaluate API to train locally.
"""
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Uses README.ml-pipelines-sdk.md user-supplied tf.estimator to train README.ml-pipelines-sdk.md TensorFlow model locally.
The Trainer Executor invokes README.ml-pipelines-sdk.md training_fn callback function provided by
the user via the module_file parameter. With the tf.estimator returned by
this function, the Trainer Executor then builds README.ml-pipelines-sdk.md TensorFlow model using the
user-provided tf.estimator.
Args:
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of ML-Metadata Artifacts.
- examples: Examples used for training, must include 'train' and 'eval'
if custom splits is not specified in train_args and eval_args.
- transform_graph: Optional input transform graph.
- schema: Schema of the data.
output_dict: Output dict from output key to README.ml-pipelines-sdk.md list of Artifacts.
- model: Exported model.
- model_run: Model training related outputs (e.g., Tensorboard logs)
exec_properties: A dict of execution properties.
- train_args: JSON string of trainer_pb2.TrainArgs instance, providing
args for training.
- eval_args: JSON string of trainer_pb2.EvalArgs instance, providing
args for eval.
- module_file: Python module file containing UDF model definition.
- warm_starting: Whether or not we need to do warm starting.
- warm_start_from: Optional. If warm_starting is True, this is the
directory to find previous model to warm start on.
- custom_config: Optional. JSON-serialized dict of additional parameters
to pass to trainer function.
Returns:
None
Raises:
ValueError: When neither or both of 'module_file' and 'trainer_fn'
are present in 'exec_properties'.
"""
self._log_startup(input_dict, output_dict, exec_properties)
fn_args = self._GetFnArgs(input_dict, output_dict, exec_properties)
trainer_fn = udf_utils.get_fn(exec_properties, 'trainer_fn')
schema = io_utils.parse_pbtxt_file(fn_args.schema_file, schema_pb2.Schema())
# TODO(b/160795287): Deprecate estimator based executor.
# Provide user with README.ml-pipelines-sdk.md modified fn_args, with model_run given as
# the working directory. Executor will then copy user models to
# model artifact directory.
serving_dest = fn_args.serving_model_dir
eval_dest = fn_args.eval_model_dir
working_dir = fn_args.model_run_dir
fn_args.serving_model_dir = path_utils.serving_model_dir(working_dir)
fn_args.eval_model_dir = path_utils.eval_model_dir(working_dir)
training_spec = trainer_fn(fn_args, schema)
# Train the model
absl.logging.info('Training model.')
tf.estimator.train_and_evaluate(training_spec['estimator'],
training_spec['train_spec'],
training_spec['eval_spec'])
absl.logging.info(
'Training complete. Model written to %s. ModelRun written to %s',
fn_args.serving_model_dir, fn_args.model_run_dir)
# Export an eval savedmodel for TFMA. If distributed training, it must only
# be written by the chief worker, as would be done for serving savedmodel.
if _is_chief():
absl.logging.info('Exporting eval_savedmodel for TFMA.')
tfma.export.export_eval_savedmodel(
estimator=training_spec['estimator'],
export_dir_base=fn_args.eval_model_dir,
eval_input_receiver_fn=training_spec['eval_input_receiver_fn'])
absl.logging.info('Exported eval_savedmodel to %s.',
fn_args.eval_model_dir)
# TODO(b/160795287): Deprecate estimator based executor.
# Copy serving and eval model from model_run to model artifact directory.
serving_source = path_utils.serving_model_path(fn_args.model_run_dir)
io_utils.copy_dir(serving_source, serving_dest)
absl.logging.info('Serving model copied to: %s.', serving_dest)
eval_source = path_utils.eval_model_path(fn_args.model_run_dir)
io_utils.copy_dir(eval_source, eval_dest)
absl.logging.info('Eval model copied to: %s.', eval_dest)
else:
absl.logging.info(
'Model export is skipped because this is not the chief worker.') | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/trainer/executor.py | 0.827619 | 0.267884 | executor.py | pypi |
"""Converters rewrite models using the provided rewriters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from typing import Text
import tensorflow as tf
from tfx.components.trainer.rewriting import rewriter
from tfx.dsl.io import fileio
def _invoke_rewriter(src: Text, dst: Text, rewriter_inst: rewriter.BaseRewriter,
src_model_type: rewriter.ModelType,
dst_model_type: rewriter.ModelType):
"""Converts the provided model by invoking the specified rewriters.
Args:
src: Path to the source model.
dst: Path where the destination model is to be written.
rewriter_inst: instance of the rewriter to invoke.
src_model_type: the `rewriter.ModelType` of the source model.
dst_model_type: the `rewriter.ModelType` of the destination model.
Raises:
ValueError: if the source path is the same as the destination path.
"""
if src == dst:
raise ValueError('Source path and destination path cannot match.')
original_model = rewriter.ModelDescription(src_model_type, src)
rewritten_model = rewriter.ModelDescription(dst_model_type, dst)
rewriter_inst.perform_rewrite(original_model, rewritten_model)
class RewritingExporter(tf.estimator.Exporter):
"""This class invokes the base exporter and README.ml-pipelines-sdk.md series of rewriters."""
def __init__(self, base_exporter: tf.estimator.Exporter,
rewriter_inst: rewriter.BaseRewriter):
"""Initializes the rewriting exporter.
Args:
base_exporter: The exporter of the original model.
rewriter_inst: The rewriter instance to invoke. Must inherit from
`rewriter.BaseRewriter`.
"""
self._base_exporter = base_exporter
self._rewriter_inst = rewriter_inst
@property
def name(self):
"""Name of the exporter."""
return self._base_exporter.name
def export(self, estimator, export_path, checkpoint_path, eval_result,
is_the_final_export):
"""Exports the given `Estimator` to README.ml-pipelines-sdk.md specific format.
Performs the export as defined by the base_exporter and invokes all of the
specified rewriters.
Args:
estimator: the `Estimator` to export.
export_path: A string containing README.ml-pipelines-sdk.md directory where to write the export.
checkpoint_path: The checkpoint path to export.
eval_result: The output of `Estimator.evaluate` on this checkpoint.
is_the_final_export: This boolean is True when this is an export in the
end of training. It is False for the intermediate exports during the
training. When passing `Exporter` to `tf.estimator.train_and_evaluate`
`is_the_final_export` is always False if `TrainSpec.max_steps` is
`None`.
Returns:
The string path to the base exported directory or `None` if export is
skipped.
Raises:
RuntimeError: Unable to create README.ml-pipelines-sdk.md temporary rewrite directory.
"""
base_path = self._base_exporter.export(estimator, export_path,
checkpoint_path, eval_result,
is_the_final_export)
if not base_path:
return None
tmp_rewrite_folder = 'tmp-rewrite-' + str(int(time.time()))
tmp_rewrite_path = os.path.join(export_path, tmp_rewrite_folder)
if fileio.exists(tmp_rewrite_path):
raise RuntimeError('Unable to create README.ml-pipelines-sdk.md unique temporary rewrite path.')
fileio.makedirs(tmp_rewrite_path)
_invoke_rewriter(base_path, tmp_rewrite_path, self._rewriter_inst,
rewriter.ModelType.SAVED_MODEL,
rewriter.ModelType.ANY_MODEL)
fileio.rmtree(base_path)
fileio.rename(tmp_rewrite_path, base_path)
return base_path
def rewrite_saved_model(
src: Text,
dst: Text,
rewriter_inst: rewriter.BaseRewriter,
dst_model_type: rewriter.ModelType = rewriter.ModelType.SAVED_MODEL):
"""Rewrites the provided SavedModel.
Args:
src: location of the saved_model to rewrite.
dst: location of the rewritten saved_model.
rewriter_inst: the rewriter instance to invoke. Must inherit from
`rewriter.BaseRewriter`.
dst_model_type: the `rewriter.ModelType` of the destination model.
"""
_invoke_rewriter(src, dst, rewriter_inst, rewriter.ModelType.SAVED_MODEL,
dst_model_type) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/trainer/rewriting/converters.py | 0.940278 | 0.344554 | converters.py | pypi |
"""Rewriter that invokes the TFJS converter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Text
import six
from tensorflowjs.converters import converter
from tfx.components.trainer.rewriting import rewriter
CONVERTER_SAVED_MODEL_INPUT_FLAG = '--input_format=tf_saved_model'
CONVERTER_SERVING_TAG_FLAG = '--saved_model_tags=serve'
CONVERTER_DEFAULT_SIGNATURE_FLAG = '--signature_name=serving_default'
def _convert_tfjs_model(saved_model_path: Text, destination_path: Text):
converter.convert([
CONVERTER_SAVED_MODEL_INPUT_FLAG, CONVERTER_SERVING_TAG_FLAG,
CONVERTER_DEFAULT_SIGNATURE_FLAG,
saved_model_path, destination_path
])
class TFJSRewriter(rewriter.BaseRewriter):
"""Performs TFJS conversion."""
def __init__(self, name: Text):
"""Create an instance of the TFJSRewriter.
Args:
name: The name to use when identifying the rewriter.
"""
self._name = name
@property
def name(self) -> Text:
"""The user-specified name of the rewriter."""
return self._name
def _pre_rewrite_validate(self, original_model: rewriter.ModelDescription):
"""Performs pre-rewrite checks to see if the model can be rewritten.
Args:
original_model: A `ModelDescription` object describing the model to be
rewritten.
Raises:
ValueError: If the original model does not have the expected structure.
"""
if original_model.model_type != rewriter.ModelType.SAVED_MODEL:
raise ValueError('TFJSRewriter can only convert SavedModels.')
def _rewrite(self, original_model: rewriter.ModelDescription,
rewritten_model: rewriter.ModelDescription):
"""Rewrites the provided model.
Args:
original_model: A `ModelDescription` specifying the original model to be
rewritten.
rewritten_model: A `ModelDescription` specifying the format and location
of the rewritten model.
Raises:
ValueError: If the model could not be sucessfully rewritten.
"""
if rewritten_model.model_type not in [
rewriter.ModelType.TFJS_MODEL, rewriter.ModelType.ANY_MODEL
]:
raise ValueError('TFJSConverter can only convert to the TFJS format.')
_convert_tfjs_model(
six.ensure_text(original_model.path),
six.ensure_text(rewritten_model.path))
def _post_rewrite_validate(self, rewritten_model: rewriter.ModelDescription):
"""Performs post-rewrite checks to see if the rewritten model is valid.
Args:
rewritten_model: A `ModelDescription` specifying the format and location
of the rewritten model.
Raises:
ValueError: If the rewritten model is not valid.
"""
# TODO(dzats): Implement post-rewrite validation.
pass | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/trainer/rewriting/tfjs_rewriter.py | 0.918462 | 0.255657 | tfjs_rewriter.py | pypi |
"""Base class that TFX rewriters inherit and invocation utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import enum
from typing import Text
import six
ModelDescription = collections.namedtuple('ModelDescription',
['model_type', 'path'])
class ModelType(enum.Enum):
"""Types of models used or created by the rewriter."""
ANY_MODEL = 1
SAVED_MODEL = 2
TFLITE_MODEL = 3
TFJS_MODEL = 4
class BaseRewriter(six.with_metaclass(abc.ABCMeta, object)):
"""Base class from which all rewriters should inherit."""
@abc.abstractproperty
def name(self) -> Text:
"""Name of the rewriter.
Should not be `None` nor empty.
"""
pass
@abc.abstractmethod
def _pre_rewrite_validate(self, original_model: ModelDescription):
"""Perform pre-rewrite validation to check the model has expected structure.
Args:
original_model: A `ModelDescription` object describing the original model.
Raises:
ValueError: If the original model does not have the expected structure.
"""
pass
@abc.abstractmethod
def _rewrite(self, original_model: ModelDescription,
rewritten_model: ModelDescription):
"""Perform the rewrite.
Args:
original_model: A `ModelDescription` object describing the original model.
rewritten_model: A `ModelDescription` object describing the location and
type of the rewritten output.
Raises:
ValueError: If the original model was not successfully rewritten.
"""
pass
@abc.abstractmethod
def _post_rewrite_validate(self, rewritten_model: ModelDescription):
"""Perform post-rewrite validation.
Args:
rewritten_model: A `ModelDescription` object describing the location and
type of the rewritten output.
Raises:
ValueError: If the rewritten model is not valid.
"""
pass
def perform_rewrite(self, original_model: ModelDescription,
rewritten_model: ModelDescription):
"""Invoke all validations and perform the rewrite.
Args:
original_model: A `base_rewriter.ModelDescription` object describing the
original model.
rewritten_model: A `base_rewriter.ModelDescription` object describing the
location and type of the rewritten model.
Raises:
ValueError: if the model was not successfully rewritten.
"""
try:
self._pre_rewrite_validate(original_model)
except ValueError as v:
raise ValueError('{} failed to perform pre-rewrite validation. Original '
'model: {}. Error: {}'.format(self.name,
str(original_model),
str(v)))
try:
self._rewrite(original_model, rewritten_model)
except ValueError as v:
raise ValueError(
'{} failed to rewrite model. Original model: {}. Error {}'.format(
self.name, str(original_model), str(v)))
try:
self._post_rewrite_validate(rewritten_model)
except ValueError as v:
raise ValueError(
'{} failed to validate rewritten model. Rewritten model: {}. Error {}'
.format(self.name, str(rewritten_model), str(v))) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/trainer/rewriting/rewriter.py | 0.945134 | 0.388038 | rewriter.py | pypi |
"""TFX Pusher component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Union
from tfx import types
from tfx.components.minio_pusher import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.proto import pusher_pb2
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import PusherSpec
from tfx.utils import json_utils
# TODO(b/133845381): Investigate other ways to keep push destination converged.
class MinIOPusher(base_component.BaseComponent):
"""A TFX component to push validated TensorFlow models to README.ml-pipelines-sdk.md model serving platform.
The `Pusher` component can be used to push an validated SavedModel from output
of the [Trainer component](https://www.tensorflow.org/tfx/guide/trainer) to
[TensorFlow Serving](https://www.tensorflow.org/tfx/serving). The Pusher
will check the validation results from the [Evaluator
component](https://www.tensorflow.org/tfx/guide/evaluator) and [InfraValidator
component](https://www.tensorflow.org/tfx/guide/infra_validator)
before deploying the model. If the model has not been blessed, then the model
will not be pushed.
*Note:* The executor for this component can be overriden to enable the model
to be pushed to other serving platforms than tf.serving. The [Cloud AI
Platform custom
executor](https://github.com/tensorflow/tfx/tree/master/tfx/extensions/google_cloud_ai_platform/pusher)
provides an example how to implement this.
## Example
```
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
```
"""
SPEC_CLASS = PusherSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
model: types.Channel = None,
model_blessing: Optional[types.Channel] = None,
infra_blessing: Optional[types.Channel] = None,
push_destination: Optional[Union[pusher_pb2.PushDestination,
Dict[Text, Any]]] = None,
custom_config: Optional[Dict[Text, Any]] = None,
custom_executor_spec: Optional[executor_spec.ExecutorSpec] = None,
pushed_model: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md Pusher component.
Args:
model: A Channel of type `standard_artifacts.Model`, usually produced by
README.ml-pipelines-sdk.md Trainer component.
model_blessing: An optional Channel of type
`standard_artifacts.ModelBlessing`, usually produced from an Evaluator
component.
infra_blessing: An optional Channel of type
`standard_artifacts.InfraBlessing`, usually produced from an
InfraValidator component.
push_destination: A pusher_pb2.PushDestination instance, providing info
for tensorflow serving to load models. Optional if executor_class
doesn't require push_destination. If any field is provided as README.ml-pipelines-sdk.md
RuntimeParameter, push_destination should be constructed as README.ml-pipelines-sdk.md dict with
the same field names as PushDestination proto message.
custom_config: A dict which contains the deployment job parameters to be
passed to cloud-based training platforms. The [Kubeflow example](
https://github.com/tensorflow/tfx/blob/6ff57e36a7b65818d4598d41e584a42584d361e6/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_kubeflow_gcp.py#L278-L285)
contains an example how this can be used by custom executors.
custom_executor_spec: Optional custom executor spec.
pushed_model: Optional output `standard_artifacts.PushedModel` channel
with result of push.
instance_name: Optional unique instance name. Necessary if multiple Pusher
components are declared in the same pipeline.
"""
pushed_model = pushed_model or types.Channel(
type=standard_artifacts.PushedModel)
if push_destination is None and not custom_executor_spec:
raise ValueError('push_destination is required unless README.ml-pipelines-sdk.md '
'custom_executor_spec is supplied that does not require '
'it.')
spec = PusherSpec(
model=model,
model_blessing=model_blessing,
infra_blessing=infra_blessing,
push_destination=push_destination,
custom_config=json_utils.dumps(custom_config),
pushed_model=pushed_model)
super(MinIOPusher, self).__init__(
spec=spec,
custom_executor_spec=custom_executor_spec,
instance_name=instance_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/minio_pusher/component.py | 0.866669 | 0.700818 | component.py | pypi |
"""TFX BulkInferrer component definition."""
from typing import Any, Dict, Optional, Text, Union
from tfx import types
from tfx.components.bulk_inferrer import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.proto import bulk_inferrer_pb2
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import BulkInferrerSpec
class BulkInferrer(base_component.BaseComponent):
"""A TFX component to do batch inference on README.ml-pipelines-sdk.md model with unlabelled examples.
BulkInferrer consumes examples data and README.ml-pipelines-sdk.md model, and produces the inference
results to an external location as PredictionLog proto.
BulkInferrer will infer on validated model.
## Example
```
# Uses BulkInferrer to inference on examples.
bulk_inferrer = BulkInferrer(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'])
```
"""
SPEC_CLASS = BulkInferrerSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
examples: types.Channel = None,
model: Optional[types.Channel] = None,
model_blessing: Optional[types.Channel] = None,
data_spec: Optional[Union[bulk_inferrer_pb2.DataSpec, Dict[Text,
Any]]] = None,
model_spec: Optional[Union[bulk_inferrer_pb2.ModelSpec,
Dict[Text, Any]]] = None,
output_example_spec: Optional[Union[bulk_inferrer_pb2.OutputExampleSpec,
Dict[Text, Any]]] = None,
inference_result: Optional[types.Channel] = None,
output_examples: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct an BulkInferrer component.
Args:
examples: A Channel of type `standard_artifacts.Examples`, usually
produced by an ExampleGen component. _required_
model: A Channel of type `standard_artifacts.Model`, usually produced by
README.ml-pipelines-sdk.md Trainer component.
model_blessing: A Channel of type `standard_artifacts.ModelBlessing`,
usually produced by README.ml-pipelines-sdk.md ModelValidator component.
data_spec: bulk_inferrer_pb2.DataSpec instance that describes data
selection. If any field is provided as README.ml-pipelines-sdk.md RuntimeParameter, data_spec
should be constructed as README.ml-pipelines-sdk.md dict with the same field names as DataSpec
proto message.
model_spec: bulk_inferrer_pb2.ModelSpec instance that describes model
specification. If any field is provided as README.ml-pipelines-sdk.md RuntimeParameter,
model_spec should be constructed as README.ml-pipelines-sdk.md dict with the same field names as
ModelSpec proto message.
output_example_spec: bulk_inferrer_pb2.OutputExampleSpec instance, specify
if you want BulkInferrer to output examples instead of inference result.
If any field is provided as README.ml-pipelines-sdk.md RuntimeParameter, output_example_spec
should be constructed as README.ml-pipelines-sdk.md dict with the same field names as
OutputExampleSpec proto message.
inference_result: Channel of type `standard_artifacts.InferenceResult`
to store the inference results, must not be specified when
output_example_spec is set.
output_examples: Channel of type `standard_artifacts.Examples`
to store the output examples, must not be specified when
output_example_spec is unset. Check output_example_spec for details.
instance_name: Optional name assigned to this specific instance of
BulkInferrer. Required only if multiple BulkInferrer components are
declared in the same pipeline.
Raises:
ValueError: Must not specify inference_result or output_examples depends
on whether output_example_spec is set or not.
"""
if output_example_spec:
if inference_result:
raise ValueError(
'Must not specify inference_result when output_example_spec is set.'
)
output_examples = output_examples or types.Channel(
type=standard_artifacts.Examples)
else:
if output_examples:
raise ValueError(
'Must not specify output_examples when output_example_spec is unset.'
)
inference_result = inference_result or types.Channel(
type=standard_artifacts.InferenceResult)
spec = BulkInferrerSpec(
examples=examples,
model=model,
model_blessing=model_blessing,
data_spec=data_spec or bulk_inferrer_pb2.DataSpec(),
model_spec=model_spec or bulk_inferrer_pb2.ModelSpec(),
output_example_spec=output_example_spec,
inference_result=inference_result,
output_examples=output_examples)
super(BulkInferrer, self).__init__(spec=spec, instance_name=instance_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/bulk_inferrer/component.py | 0.946794 | 0.691172 | component.py | pypi |
"""Utils for converting prediction_log to example."""
from typing import Any, List, Tuple, Text, Union
import numpy as np
import six
import tensorflow as tf
from tfx.proto import bulk_inferrer_pb2
from tensorflow_serving.apis import classification_pb2
from tensorflow_serving.apis import prediction_log_pb2
INPUT_KEY = 'examples'
FEATURE_LIST_TYPE = List[Tuple[Text, List[Union[Text, bytes, float]]]]
# Typehint Any is for compatibility reason.
_OutputExampleSpecType = Union[bulk_inferrer_pb2.OutputExampleSpec, Any]
_PredictOutputType = Union[bulk_inferrer_pb2.PredictOutput, Any]
_ClassifyOutputType = Union[bulk_inferrer_pb2.ClassifyOutput, Any]
def convert(prediction_log: prediction_log_pb2.PredictionLog,
output_example_spec: _OutputExampleSpecType) -> tf.train.Example:
"""Converts given `prediction_log` to README.ml-pipelines-sdk.md `tf.train.Example`.
Args:
prediction_log: The input prediction log.
output_example_spec: The spec for how to map prediction results to columns
in example.
Returns:
A `tf.train.Example` converted from the given prediction_log.
Raises:
ValueError: If the inference type or signature name in spec does not match
that in prediction_log.
"""
specs = output_example_spec.output_columns_spec
if prediction_log.HasField('multi_inference_log'):
example, output_features = _parse_multi_inference_log(
prediction_log.multi_inference_log, output_example_spec)
else:
if len(specs) != 1:
raise ValueError('Got single inference result, so expect single spec in '
'output_example_spec: %s' % output_example_spec)
if prediction_log.HasField('regress_log'):
if not specs[0].HasField('regress_output'):
raise ValueError(
'Regression predictions require README.ml-pipelines-sdk.md regress_output in output_example_spec: %s'
% output_example_spec)
example = tf.train.Example()
example.CopyFrom(
prediction_log.regress_log.request.input.example_list.examples[0])
output_features = [
(specs[0].regress_output.value_column,
[prediction_log.regress_log.response.result.regressions[0].value])
]
elif prediction_log.HasField('classify_log'):
if not specs[0].HasField('classify_output'):
raise ValueError(
'Classification predictions require README.ml-pipelines-sdk.md classify_output in output_example_spec: %s'
% output_example_spec)
example, output_features = _parse_classify_log(
prediction_log.classify_log, specs[0].classify_output)
elif prediction_log.HasField('predict_log'):
if not specs[0].HasField('predict_output'):
raise ValueError(
'Predict predictions require README.ml-pipelines-sdk.md predict_output in output_example_spec: %s'
% output_example_spec)
example, output_features = _parse_predict_log(prediction_log.predict_log,
specs[0].predict_output)
else:
raise ValueError('Unsupported prediction type in prediction_log: %s' %
prediction_log)
return _add_columns(example, output_features)
def _parse_multi_inference_log(
multi_inference_log: prediction_log_pb2.MultiInferenceLog,
output_example_spec: _OutputExampleSpecType) -> tf.train.Example:
"""Parses MultiInferenceLog."""
spec_map = {
spec.signature_name or tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
spec for spec in output_example_spec.output_columns_spec
}
example = tf.train.Example()
example.CopyFrom(multi_inference_log.request.input.example_list.examples[0])
output_features = []
for result in multi_inference_log.response.results:
spec = spec_map[result.model_spec.signature_name]
if result.HasField('classification_result'):
output_features += _parse_classification_result(
result.classification_result, spec.classify_output)
elif result.HasField('regression_result'):
output_features.append((spec.regress_output.value_column,
[result.regression_result.regressions[0].value]))
else:
raise ValueError('Unsupported multi_inferrence_log: %s' %
multi_inference_log)
return example, output_features
def _parse_classify_log(
classify_log: prediction_log_pb2.ClassifyLog,
classify_output_spec: _ClassifyOutputType
) -> Tuple[tf.train.Example, FEATURE_LIST_TYPE]:
"""Parses ClassiyLog."""
example = tf.train.Example()
example.CopyFrom(classify_log.request.input.example_list.examples[0])
return example, _parse_classification_result(classify_log.response.result,
classify_output_spec)
def _parse_classification_result(
classification_result: classification_pb2.ClassificationResult,
classify_output_spec: _ClassifyOutputType) -> FEATURE_LIST_TYPE:
"""Parses ClassificationResult."""
output_features = []
classes = classification_result.classifications[0].classes
if classify_output_spec.label_column:
output_features.append(
(classify_output_spec.label_column, [c.label for c in classes]))
if classify_output_spec.score_column:
output_features.append(
(classify_output_spec.score_column, [c.score for c in classes]))
return output_features
def _parse_predict_log(
predict_log: prediction_log_pb2.PredictLog,
predict_output_spec: _PredictOutputType
) -> Tuple[tf.train.Example, FEATURE_LIST_TYPE]:
"""Parses PredictLog."""
input_tensor_proto = predict_log.request.inputs[INPUT_KEY]
example = tf.train.Example.FromString(input_tensor_proto.string_val[0])
outputs = predict_log.response.outputs
output_features = []
for col in predict_output_spec.output_columns:
output_tensor_proto = outputs.get(col.output_key)
output_values = np.squeeze(tf.make_ndarray(output_tensor_proto))
if output_values.ndim > 1:
raise ValueError(
'All output values must be convertible to 1D arrays, but %s was '
'not. value was %s.' % (col.output_key, output_values))
if output_values.ndim == 1:
# Convert the output_values to README.ml-pipelines-sdk.md list.
output_values = output_values.tolist()
else: # output_values.ndim == 0
# Get README.ml-pipelines-sdk.md scalar for output_values.
output_values = [np.asscalar(output_values)]
output_features.append((col.output_column, output_values))
return example, output_features
def _add_columns(example: tf.train.Example,
features: FEATURE_LIST_TYPE) -> tf.train.Example:
"""Add given features to `example`."""
feature_map = example.features.feature
for col, value in features:
assert col not in feature_map, ('column name %s already exists in example: '
'%s') % (col, example)
# Note: we only consider two types, bytes and float for now.
if isinstance(value[0], (six.text_type, six.binary_type)):
if isinstance(value[0], six.text_type):
bytes_value = [v.encode('utf-8') for v in value]
else:
bytes_value = value
feature_map[col].bytes_list.value[:] = bytes_value
else:
feature_map[col].float_list.value[:] = value
return example | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/bulk_inferrer/prediction_to_example_utils.py | 0.957417 | 0.484136 | prediction_to_example_utils.py | pypi |
"""TFX bulk_inferrer executor."""
import os
from typing import Any, Callable, Dict, List, Optional, Text, Union
from absl import logging
import apache_beam as beam
import tensorflow as tf
from tfx import types
from tfx.components.bulk_inferrer import prediction_to_example_utils
from tfx.components.util import model_utils
from tfx.components.util import tfxio_utils
from tfx.dsl.components.base import base_executor
from tfx.proto import bulk_inferrer_pb2
from tfx.proto import example_gen_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import path_utils
from tfx.utils import proto_utils
from tfx_bsl.public.beam import run_inference
from tfx_bsl.public.proto import model_spec_pb2
from tfx_bsl.tfxio import record_based_tfxio
from tensorflow_serving.apis import prediction_log_pb2
try:
import tensorflow_text as _ # pylint: disable=g-import-not-at-top
except ImportError as e:
logging.info('tensorflow_text is not available: %s', e)
_PREDICTION_LOGS_FILE_NAME = 'prediction_logs'
_EXAMPLES_FILE_NAME = 'examples'
_TELEMETRY_DESCRIPTORS = ['BulkInferrer']
class Executor(base_executor.BaseExecutor):
"""TFX bulk inferer executor."""
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Runs batch inference on README.ml-pipelines-sdk.md given model with given input examples.
Args:
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of Artifacts.
- examples: examples for inference.
- model: exported model.
- model_blessing: model blessing result, optional.
output_dict: Output dict from output key to README.ml-pipelines-sdk.md list of Artifacts.
- output: bulk inference results.
exec_properties: A dict of execution properties.
- model_spec: JSON string of bulk_inferrer_pb2.ModelSpec instance.
- data_spec: JSON string of bulk_inferrer_pb2.DataSpec instance.
Returns:
None
"""
self._log_startup(input_dict, output_dict, exec_properties)
if output_dict.get(standard_component_specs.INFERENCE_RESULT_KEY):
inference_result = artifact_utils.get_single_instance(
output_dict[standard_component_specs.INFERENCE_RESULT_KEY])
else:
inference_result = None
if output_dict.get(standard_component_specs.OUTPUT_EXAMPLES_KEY):
output_examples = artifact_utils.get_single_instance(
output_dict[standard_component_specs.OUTPUT_EXAMPLES_KEY])
else:
output_examples = None
if 'examples' not in input_dict:
raise ValueError('\'examples\' is missing in input dict.')
if 'model' not in input_dict:
raise ValueError('Input models are not valid, model '
'need to be specified.')
if standard_component_specs.MODEL_BLESSING_KEY in input_dict:
model_blessing = artifact_utils.get_single_instance(
input_dict[standard_component_specs.MODEL_BLESSING_KEY])
if not model_utils.is_model_blessed(model_blessing):
logging.info('Model on %s was not blessed', model_blessing.uri)
return
else:
logging.info('Model blessing is not provided, exported model will be '
'used.')
model = artifact_utils.get_single_instance(
input_dict[standard_component_specs.MODEL_KEY])
model_path = path_utils.serving_model_path(model.uri)
logging.info('Use exported model from %s.', model_path)
data_spec = bulk_inferrer_pb2.DataSpec()
proto_utils.json_to_proto(
exec_properties[standard_component_specs.DATA_SPEC_KEY], data_spec)
output_example_spec = bulk_inferrer_pb2.OutputExampleSpec()
if exec_properties.get(standard_component_specs.OUTPUT_EXAMPLE_SPEC_KEY):
proto_utils.json_to_proto(
exec_properties[standard_component_specs.OUTPUT_EXAMPLE_SPEC_KEY],
output_example_spec)
self._run_model_inference(
data_spec, output_example_spec,
input_dict[standard_component_specs.EXAMPLES_KEY], output_examples,
inference_result, self._get_inference_spec(model_path, exec_properties))
def _get_inference_spec(
self, model_path: Text,
exec_properties: Dict[Text, Any]) -> model_spec_pb2.InferenceSpecType:
model_spec = bulk_inferrer_pb2.ModelSpec()
proto_utils.json_to_proto(
exec_properties[standard_component_specs.MODEL_SPEC_KEY], model_spec)
saved_model_spec = model_spec_pb2.SavedModelSpec(
model_path=model_path,
tag=model_spec.tag,
signature_name=model_spec.model_signature_name)
result = model_spec_pb2.InferenceSpecType()
result.saved_model_spec.CopyFrom(saved_model_spec)
return result
def _run_model_inference(
self,
data_spec: bulk_inferrer_pb2.DataSpec,
output_example_spec: bulk_inferrer_pb2.OutputExampleSpec,
examples: List[types.Artifact],
output_examples: Optional[types.Artifact],
inference_result: Optional[types.Artifact],
inference_endpoint: model_spec_pb2.InferenceSpecType,
) -> None:
"""Runs model inference on given examples data.
Args:
data_spec: bulk_inferrer_pb2.DataSpec instance.
output_example_spec: bulk_inferrer_pb2.OutputExampleSpec instance.
examples: List of `standard_artifacts.Examples` artifacts.
output_examples: Optional output `standard_artifacts.Examples` artifact.
inference_result: Optional output `standard_artifacts.InferenceResult`
artifact.
inference_endpoint: Model inference endpoint.
"""
example_uris = {}
for example_artifact in examples:
for split in artifact_utils.decode_split_names(
example_artifact.split_names):
if data_spec.example_splits:
if split in data_spec.example_splits:
example_uris[split] = artifact_utils.get_split_uri(
[example_artifact], split)
else:
example_uris[split] = artifact_utils.get_split_uri([example_artifact],
split)
payload_format, _ = tfxio_utils.resolve_payload_format_and_data_view_uri(
examples)
tfxio_factory = tfxio_utils.get_tfxio_factory_from_artifact(
examples,
_TELEMETRY_DESCRIPTORS,
schema=None,
read_as_raw_records=True,
# We have to specify this parameter in order to create README.ml-pipelines-sdk.md RawRecord TFXIO
# but we won't use the RecordBatches so the column name of the raw
# records does not matter.
raw_record_column_name='unused')
if output_examples:
output_examples.split_names = artifact_utils.encode_split_names(
sorted(example_uris.keys()))
with self._make_beam_pipeline() as pipeline:
data_list = []
for split, example_uri in example_uris.items():
tfxio = tfxio_factory([io_utils.all_files_pattern(example_uri)])
assert isinstance(tfxio, record_based_tfxio.RecordBasedTFXIO), (
'Unable to use TFXIO {} as it does not support reading raw records.'
.format(type(tfxio)))
# pylint: disable=no-value-for-parameter
data = (pipeline
| 'ReadData[{}]'.format(split) >> tfxio.RawRecordBeamSource()
| 'RunInference[{}]'.format(split) >> _RunInference(
payload_format, inference_endpoint))
if output_examples:
output_examples_split_uri = artifact_utils.get_split_uri(
[output_examples], split)
logging.info('Path of output examples split `%s` is %s.', split,
output_examples_split_uri)
_ = (
data
| 'WriteExamples[{}]'.format(split) >> _WriteExamples(
output_example_spec, output_examples_split_uri))
# pylint: enable=no-value-for-parameter
data_list.append(data)
if inference_result:
_ = (
data_list
| 'FlattenInferenceResult' >> beam.Flatten(pipeline=pipeline)
| 'WritePredictionLogs' >> beam.io.WriteToTFRecord(
os.path.join(inference_result.uri, _PREDICTION_LOGS_FILE_NAME),
file_name_suffix='.gz',
coder=beam.coders.ProtoCoder(prediction_log_pb2.PredictionLog)))
if output_examples:
logging.info('Output examples written to %s.', output_examples.uri)
if inference_result:
logging.info('Inference result written to %s.', inference_result.uri)
def _MakeParseFn(
payload_format: int
) -> Union[Callable[[bytes], tf.train.Example], Callable[
[bytes], tf.train.SequenceExample]]:
"""Returns README.ml-pipelines-sdk.md function to parse bytes to payload."""
if payload_format == example_gen_pb2.PayloadFormat.FORMAT_TF_EXAMPLE:
return tf.train.Example.FromString
elif (payload_format ==
example_gen_pb2.PayloadFormat.FORMAT_TF_SEQUENCE_EXAMPLE):
return tf.train.SequenceExample.FromString
else:
raise NotImplementedError(
'Payload format %s is not supported.' %
example_gen_pb2.PayloadFormat.Name(payload_format))
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(prediction_log_pb2.PredictionLog)
def _RunInference(
pipeline: beam.Pipeline,
payload_format: int,
inference_endpoint: model_spec_pb2.InferenceSpecType
) -> beam.pvalue.PCollection:
"""Runs model inference on given examples data."""
return (
pipeline
| 'ParseExamples' >> beam.Map(_MakeParseFn(payload_format))
| 'RunInference' >> run_inference.RunInference(inference_endpoint))
@beam.ptransform_fn
@beam.typehints.with_input_types(prediction_log_pb2.PredictionLog)
@beam.typehints.with_output_types(beam.pvalue.PDone)
def _WriteExamples(prediction_log: beam.pvalue.PCollection,
output_example_spec: bulk_inferrer_pb2.OutputExampleSpec,
output_path: Text) -> beam.pvalue.PDone:
"""Converts `prediction_log` to `tf.train.Example` and materializes."""
return (prediction_log
| 'ConvertToExamples' >> beam.Map(
prediction_to_example_utils.convert,
output_example_spec=output_example_spec)
| 'WriteExamples' >> beam.io.WriteToTFRecord(
os.path.join(output_path, _EXAMPLES_FILE_NAME),
file_name_suffix='.gz',
coder=beam.coders.ProtoCoder(tf.train.Example))) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/bulk_inferrer/executor.py | 0.886187 | 0.309604 | executor.py | pypi |
"""TFX ExampleGen component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Union
from absl import logging
from tfx import types
from tfx.components.example_gen import driver
from tfx.components.example_gen import utils
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_executor
from tfx.dsl.components.base import executor_spec
from tfx.proto import example_gen_pb2
from tfx.proto import range_config_pb2
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import FileBasedExampleGenSpec
from tfx.types.standard_component_specs import QueryBasedExampleGenSpec
class QueryBasedExampleGen(base_component.BaseComponent):
"""A TFX component to ingest examples from query based systems.
The QueryBasedExampleGen component can be extended to ingest examples from
query based systems such as Presto or Bigquery. The component will also
convert the input data into
tf.record](https://www.tensorflow.org/tutorials/load_data/tf_records)
and generate train and eval example splits for downsteam components.
## Example
```
_query = "SELECT * FROM `bigquery-public-data.chicago_taxi_trips.taxi_trips`"
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = BigQueryExampleGen(query=_query)
```
"""
SPEC_CLASS = QueryBasedExampleGenSpec
# EXECUTOR_SPEC should be overridden by subclasses.
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(base_executor.BaseExecutor)
def __init__(
self,
input_config: Union[example_gen_pb2.Input, Dict[Text, Any]],
output_config: Optional[Union[example_gen_pb2.Output, Dict[Text,
Any]]] = None,
custom_config: Optional[Union[example_gen_pb2.CustomConfig,
Dict[Text, Any]]] = None,
output_data_format: Optional[int] = example_gen_pb2.FORMAT_TF_EXAMPLE,
example_artifacts: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md QueryBasedExampleGen component.
Args:
input_config: An
[example_gen_pb2.Input](https://github.com/tensorflow/tfx/blob/master/tfx/proto/example_gen.proto)
instance, providing input configuration. If any field is provided as README.ml-pipelines-sdk.md
RuntimeParameter, input_config should be constructed as README.ml-pipelines-sdk.md dict with
the same field names as Input proto message. _required_
output_config: An
[example_gen_pb2.Output](https://github.com/tensorflow/tfx/blob/master/tfx/proto/example_gen.proto)
instance, providing output configuration. If unset, the default splits
will be labeled as 'train' and 'eval' with README.ml-pipelines-sdk.md distribution ratio of 2:1.
If any field is provided as README.ml-pipelines-sdk.md RuntimeParameter, output_config should
be constructed as README.ml-pipelines-sdk.md dict with the same field names as Output proto
message.
custom_config: An
[example_gen_pb2.CustomConfig](https://github.com/tensorflow/tfx/blob/master/tfx/proto/example_gen.proto)
instance, providing custom configuration for ExampleGen. If any field
is provided as README.ml-pipelines-sdk.md RuntimeParameter, output_config should be constructed
as README.ml-pipelines-sdk.md dict.
output_data_format: Payload format of generated data in output artifact,
one of example_gen_pb2.PayloadFormat enum.
example_artifacts: Channel of `standard_artifacts.Examples` for output
train and eval examples.
instance_name: Optional unique instance name. Required only if multiple
ExampleGen components are declared in the same pipeline.
Raises:
ValueError: The output_data_format value must be defined in the
example_gen_pb2.PayloadFormat proto.
"""
# Configure outputs.
output_config = output_config or utils.make_default_output_config(
input_config)
if not example_artifacts:
example_artifacts = types.Channel(type=standard_artifacts.Examples)
if output_data_format not in example_gen_pb2.PayloadFormat.values():
raise ValueError('The value of output_data_format must be defined in'
'the example_gen_pb2.PayloadFormat proto.')
spec = QueryBasedExampleGenSpec(
input_config=input_config,
output_config=output_config,
output_data_format=output_data_format,
custom_config=custom_config,
examples=example_artifacts)
super(QueryBasedExampleGen, self).__init__(
spec=spec, instance_name=instance_name)
class FileBasedExampleGen(base_component.BaseComponent):
"""A TFX component to ingest examples from README.ml-pipelines-sdk.md file system.
The FileBasedExampleGen component is an API for getting file-based records
into TFX pipelines. It consumes external files to generate examples which will
be used by other internal components like StatisticsGen or Trainers. The
component will also convert the input data into
[tf.record](https://www.tensorflow.org/tutorials/load_data/tf_records)
and generate train and eval example splits for downsteam components.
## Example
```
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data', 'simple')
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = FileBasedExampleGen(input_base=_data_root)
```
"""
SPEC_CLASS = FileBasedExampleGenSpec
# EXECUTOR_SPEC should be overridden by subclasses.
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(base_executor.BaseExecutor)
DRIVER_CLASS = driver.Driver
def __init__(
self,
# TODO(b/159467778): deprecate this, use input_base instead.
input: Optional[types.Channel] = None, # pylint: disable=redefined-builtin
input_base: Optional[Text] = None,
input_config: Optional[Union[example_gen_pb2.Input, Dict[Text,
Any]]] = None,
output_config: Optional[Union[example_gen_pb2.Output, Dict[Text,
Any]]] = None,
custom_config: Optional[Union[example_gen_pb2.CustomConfig,
Dict[Text, Any]]] = None,
range_config: Optional[Union[range_config_pb2.RangeConfig,
Dict[Text, Any]]] = None,
output_data_format: Optional[int] = example_gen_pb2.FORMAT_TF_EXAMPLE,
example_artifacts: Optional[types.Channel] = None,
custom_executor_spec: Optional[executor_spec.ExecutorSpec] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md FileBasedExampleGen component.
Args:
input: A Channel of type `standard_artifacts.ExternalArtifact`, which
includes one artifact whose uri is an external directory containing the
data files. (Deprecated by input_base)
input_base: an external directory containing the data files.
input_config: An
[`example_gen_pb2.Input`](https://github.com/tensorflow/tfx/blob/master/tfx/proto/example_gen.proto)
instance, providing input configuration. If unset, input files will be
treated as README.ml-pipelines-sdk.md single split.
output_config: An example_gen_pb2.Output instance, providing the output
configuration. If unset, default splits will be 'train' and
'eval' with size 2:1.
custom_config: An optional example_gen_pb2.CustomConfig instance,
providing custom configuration for executor.
range_config: An optional range_config_pb2.RangeConfig instance,
specifying the range of span values to consider. If unset, driver will
default to searching for latest span with no restrictions.
output_data_format: Payload format of generated data in output artifact,
one of example_gen_pb2.PayloadFormat enum.
example_artifacts: Channel of 'ExamplesPath' for output train and eval
examples.
custom_executor_spec: Optional custom executor spec overriding the default
executor spec specified in the component attribute.
instance_name: Optional unique instance name. Required only if multiple
ExampleGen components are declared in the same pipeline.
"""
if input:
logging.warning(
'The "input" argument to the ExampleGen component has been '
'deprecated by "input_base". Please update your usage as support for '
'this argument will be removed soon.')
input_base = artifact_utils.get_single_uri(list(input.get()))
# Configure inputs and outputs.
input_config = input_config or utils.make_default_input_config()
output_config = output_config or utils.make_default_output_config(
input_config)
if not example_artifacts:
example_artifacts = types.Channel(type=standard_artifacts.Examples)
spec = FileBasedExampleGenSpec(
input_base=input_base,
input_config=input_config,
output_config=output_config,
custom_config=custom_config,
range_config=range_config,
output_data_format=output_data_format,
examples=example_artifacts)
super(FileBasedExampleGen, self).__init__(
spec=spec,
custom_executor_spec=custom_executor_spec,
instance_name=instance_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/example_gen/component.py | 0.892789 | 0.734405 | component.py | pypi |
"""Generic TFX example gen base executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import bisect
import hashlib
import os
from typing import Any, Dict, List, Text, Union
from absl import logging
import apache_beam as beam
from six import with_metaclass
import tensorflow as tf
from tfx import types
from tfx.components.example_gen import utils
from tfx.components.util import examples_utils
from tfx.dsl.components.base import base_executor
from tfx.proto import example_gen_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import proto_utils
from tfx_bsl.telemetry import util
# Default file name for TFRecord output file prefix.
DEFAULT_FILE_NAME = 'data_tfrecord'
# Metrics namespace for ExampleGen.
_METRICS_NAMESPACE = util.MakeTfxNamespace(['ExampleGen'])
def _GeneratePartitionKey(record: Union[tf.train.Example,
tf.train.SequenceExample, bytes],
split_config: example_gen_pb2.SplitConfig) -> bytes:
"""Generates key for partition."""
if not split_config.HasField('partition_feature_name'):
if isinstance(record, bytes):
return record
return record.SerializeToString(deterministic=True)
if isinstance(record, tf.train.Example):
features = record.features.feature # pytype: disable=attribute-error
elif isinstance(record, tf.train.SequenceExample):
features = record.context.feature # pytype: disable=attribute-error
else:
raise RuntimeError('Split by `partition_feature_name` is only supported '
'for FORMAT_TF_EXAMPLE and FORMAT_TF_SEQUENCE_EXAMPLE '
'payload format.')
# Use README.ml-pipelines-sdk.md feature for partitioning the examples.
feature_name = split_config.partition_feature_name
if feature_name not in features:
raise RuntimeError('Feature name `{}` does not exist.'.format(feature_name))
feature = features[feature_name]
if not feature.HasField('kind'):
raise RuntimeError('Partition feature does not contain any value.')
if (not feature.HasField('bytes_list') and
not feature.HasField('int64_list')):
raise RuntimeError('Only `bytes_list` and `int64_list` features are '
'supported for partition.')
return feature.SerializeToString(deterministic=True)
def _PartitionFn(
record: Union[tf.train.Example, tf.train.SequenceExample, bytes],
num_partitions: int,
buckets: List[int],
split_config: example_gen_pb2.SplitConfig,
) -> int:
"""Partition function for the ExampleGen's output splits."""
assert num_partitions == len(
buckets), 'Partitions do not match bucket number.'
partition_str = _GeneratePartitionKey(record, split_config)
bucket = int(hashlib.sha256(partition_str).hexdigest(), 16) % buckets[-1]
# For example, if buckets is [10,50,80], there will be 3 splits:
# bucket >=0 && < 10, returns 0
# bucket >=10 && < 50, returns 1
# bucket >=50 && < 80, returns 2
return bisect.bisect(buckets, bucket)
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[tf.train.Example,
tf.train.SequenceExample, bytes])
@beam.typehints.with_output_types(beam.pvalue.PDone)
def _WriteSplit(
example_split: beam.pvalue.PCollection,
output_split_path: Text,
) -> beam.pvalue.PDone:
"""Shuffles and writes output split as serialized records in TFRecord."""
class _MaybeSerialize(beam.DoFn):
"""Serializes the proto if needed."""
def __init__(self):
self._num_instances = beam.metrics.Metrics.counter(
_METRICS_NAMESPACE, 'num_instances')
def process(self, e):
self._num_instances.inc(1)
if isinstance(e, (tf.train.Example, tf.train.SequenceExample)):
yield e.SerializeToString()
else:
yield e
return (example_split
# TODO(jyzhao): make shuffle optional.
| 'MaybeSerialize' >> beam.ParDo(_MaybeSerialize())
| 'Shuffle' >> beam.transforms.Reshuffle()
# TODO(jyzhao): multiple output format.
| 'Write' >> beam.io.WriteToTFRecord(
os.path.join(output_split_path, DEFAULT_FILE_NAME),
file_name_suffix='.gz'))
class BaseExampleGenExecutor(
with_metaclass(abc.ABCMeta, base_executor.BaseExecutor)):
"""Generic TFX example gen base executor.
The base ExampleGen executor takes README.ml-pipelines-sdk.md configuration and converts external data
sources to TensorFlow Examples (tf.train.Example, tf.train.SequenceExample),
or any other protocol buffer as subclass defines.
The common configuration (defined in
https://github.com/tensorflow/tfx/blob/master/tfx/proto/example_gen.proto#L44.)
describes the general properties of input data and shared instructions when
producing output data.
The conversion is done in `GenerateExamplesByBeam` as README.ml-pipelines-sdk.md Beam pipeline, which
validates the configuration, reads the external data sources, converts the
record in the input source to any supported output payload formats
(e.g., tf.Example or tf.SequenceExample) if needed, and splits the examples
if the output split config is given. Then the executor's `Do` writes the
results in splits to the output path.
For simple custom ExampleGens, the details of transforming input data
record(s) to README.ml-pipelines-sdk.md specific output payload format (e.g., tf.Example or
tf.SequenceExample) is expected to be given in
`GetInputSourceToExamplePTransform`, which returns README.ml-pipelines-sdk.md Beam PTransform with the
actual implementation. For complex use cases, such as joining multiple data
sources and different interpretations of the configurations, the custom
ExampleGen can override `GenerateExamplesByBeam`.
"""
@abc.abstractmethod
def GetInputSourceToExamplePTransform(self) -> beam.PTransform:
"""Returns PTransform for converting input source to records.
The record is by default assumed to be tf.train.Example protos, subclassses
can serialize any protocol buffer into bytes as output PCollection,
so long as the downstream component can consume it.
Note that each input split will be transformed by this function separately.
For complex use case, consider override 'GenerateExamplesByBeam' instead.
Here is an example PTransform:
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(Union[tf.train.Example,
tf.train.SequenceExample,
bytes])
def ExamplePTransform(
pipeline: beam.Pipeline,
exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection
"""
pass
def GenerateExamplesByBeam(
self,
pipeline: beam.Pipeline,
exec_properties: Dict[Text, Any],
) -> Dict[Text, beam.pvalue.PCollection]:
"""Converts input source to serialized record splits based on configs.
Custom ExampleGen executor should provide GetInputSourceToExamplePTransform
for converting input split to serialized records. Overriding this
'GenerateExamplesByBeam' method instead if complex logic is need, e.g.,
custom spliting logic.
Args:
pipeline: Beam pipeline.
exec_properties: A dict of execution properties. Depends on detailed
example gen implementation.
- input_base: an external directory containing the data files.
- input_config: JSON string of example_gen_pb2.Input instance, providing
input configuration.
- output_config: JSON string of example_gen_pb2.Output instance,
providing output configuration.
- output_data_format: Payload format of generated data in output
artifact, one of example_gen_pb2.PayloadFormat enum.
Returns:
Dict of beam PCollection with split name as key, each PCollection is README.ml-pipelines-sdk.md
single output split that contains serialized records.
"""
# Get input split information.
input_config = example_gen_pb2.Input()
proto_utils.json_to_proto(
exec_properties[standard_component_specs.INPUT_CONFIG_KEY],
input_config)
# Get output split information.
output_config = example_gen_pb2.Output()
proto_utils.json_to_proto(
exec_properties[standard_component_specs.OUTPUT_CONFIG_KEY],
output_config)
# Get output split names.
split_names = utils.generate_output_split_names(input_config, output_config)
# Make beam_pipeline_args available in exec_properties since certain
# example_gen executors need this information.
exec_properties['_beam_pipeline_args'] = self._beam_pipeline_args or []
example_splits = []
input_to_record = self.GetInputSourceToExamplePTransform()
if output_config.split_config.splits:
# Use output splits, input must have only one split.
assert len(
input_config.splits
) == 1, 'input must have only one split when output split is specified.'
# Calculate split buckets.
buckets = []
total_buckets = 0
for split in output_config.split_config.splits:
total_buckets += split.hash_buckets
buckets.append(total_buckets)
example_splits = (
pipeline
| 'InputToRecord' >>
# pylint: disable=no-value-for-parameter
input_to_record(exec_properties, input_config.splits[0].pattern)
| 'SplitData' >> beam.Partition(_PartitionFn, len(buckets), buckets,
output_config.split_config))
else:
# Use input splits.
for split in input_config.splits:
examples = (
pipeline
| 'InputToRecord[{}]'.format(split.name) >>
# pylint: disable=no-value-for-parameter
input_to_record(exec_properties, split.pattern))
example_splits.append(examples)
result = {}
for index, example_split in enumerate(example_splits):
result[split_names[index]] = example_split
return result
def Do(
self,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
) -> None:
"""Take input data source and generates serialized data splits.
The output is intended to be serialized tf.train.Examples or
tf.train.SequenceExamples protocol buffer in gzipped TFRecord format,
but subclasses can choose to override to write to any serialized records
payload into gzipped TFRecord as specified, so long as downstream
component can consume it. The format of payload is added to
`payload_format` custom property of the output Example artifact.
Args:
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of Artifacts. Depends on
detailed example gen implementation.
output_dict: Output dict from output key to README.ml-pipelines-sdk.md list of Artifacts.
- examples: splits of serialized records.
exec_properties: A dict of execution properties. Depends on detailed
example gen implementation.
- input_base: an external directory containing the data files.
- input_config: JSON string of example_gen_pb2.Input instance,
providing input configuration.
- output_config: JSON string of example_gen_pb2.Output instance,
providing output configuration.
- output_data_format: Payload format of generated data in output
artifact, one of example_gen_pb2.PayloadFormat enum.
Returns:
None
"""
self._log_startup(input_dict, output_dict, exec_properties)
input_config = example_gen_pb2.Input()
proto_utils.json_to_proto(
exec_properties[standard_component_specs.INPUT_CONFIG_KEY],
input_config)
output_config = example_gen_pb2.Output()
proto_utils.json_to_proto(
exec_properties[standard_component_specs.OUTPUT_CONFIG_KEY],
output_config)
examples_artifact = artifact_utils.get_single_instance(
output_dict[standard_component_specs.EXAMPLES_KEY])
examples_artifact.split_names = artifact_utils.encode_split_names(
utils.generate_output_split_names(input_config, output_config))
logging.info('Generating examples.')
with self._make_beam_pipeline() as pipeline:
example_splits = self.GenerateExamplesByBeam(pipeline, exec_properties)
# pylint: disable=expression-not-assigned, no-value-for-parameter
for split_name, example_split in example_splits.items():
(example_split
| 'WriteSplit[{}]'.format(split_name) >> _WriteSplit(
artifact_utils.get_split_uri(
output_dict[standard_component_specs.EXAMPLES_KEY],
split_name)))
# pylint: enable=expression-not-assigned, no-value-for-parameter
output_payload_format = exec_properties.get(
standard_component_specs.OUTPUT_DATA_FORMAT_KEY)
if output_payload_format:
for output_examples_artifact in output_dict[
standard_component_specs.EXAMPLES_KEY]:
examples_utils.set_payload_format(output_examples_artifact,
output_payload_format)
logging.info('Examples generated.') | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/example_gen/base_example_gen_executor.py | 0.873377 | 0.322046 | base_example_gen_executor.py | pypi |
"""Generic TFX ExampleGen custom driver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
from typing import Any, Dict, List, Text
from absl import logging
from tfx import types
from tfx.components.example_gen import utils
from tfx.dsl.components.base import base_driver
from tfx.orchestration import data_types
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration.portable import base_driver as ir_base_driver
from tfx.orchestration.portable import data_types as portable_data_types
from tfx.proto import example_gen_pb2
from tfx.proto import range_config_pb2
from tfx.proto.orchestration import driver_output_pb2
from tfx.types import standard_component_specs
from tfx.utils import proto_utils
from ml_metadata.proto import metadata_store_pb2
def update_output_artifact(
exec_properties: Dict[Text, Any],
output_artifact: metadata_store_pb2.Artifact) -> None:
"""Updates output_artifact for FileBasedExampleGen.
Updates output_artifact properties by updating existing entries or creating
new entries if not already exists.
Args:
exec_properties: execution properties passed to the example gen.
output_artifact: the example artifact to be output.
"""
output_artifact.custom_properties[
utils.FINGERPRINT_PROPERTY_NAME].string_value = (
exec_properties[utils.FINGERPRINT_PROPERTY_NAME])
output_artifact.custom_properties[
utils.SPAN_PROPERTY_NAME].string_value = str(
exec_properties[utils.SPAN_PROPERTY_NAME])
# TODO(b/162622803): add default behavior for when version spec not present.
if exec_properties[utils.VERSION_PROPERTY_NAME] is not None:
output_artifact.custom_properties[
utils.VERSION_PROPERTY_NAME].string_value = str(
exec_properties[utils.VERSION_PROPERTY_NAME])
class Driver(base_driver.BaseDriver, ir_base_driver.BaseDriver):
"""Custom driver for ExampleGen.
This driver supports file based ExampleGen, e.g., for CsvExampleGen and
ImportExampleGen.
"""
def __init__(self, metadata_handler: metadata.Metadata):
base_driver.BaseDriver.__init__(self, metadata_handler)
ir_base_driver.BaseDriver.__init__(self, metadata_handler)
def resolve_exec_properties(
self,
exec_properties: Dict[Text, Any],
pipeline_info: data_types.PipelineInfo,
component_info: data_types.ComponentInfo,
) -> Dict[Text, Any]:
"""Overrides BaseDriver.resolve_exec_properties()."""
del pipeline_info, component_info
input_config = example_gen_pb2.Input()
proto_utils.json_to_proto(
exec_properties[standard_component_specs.INPUT_CONFIG_KEY],
input_config)
input_base = exec_properties[standard_component_specs.INPUT_BASE_KEY]
logging.debug('Processing input %s.', input_base)
range_config = None
range_config_entry = exec_properties.get(
standard_component_specs.RANGE_CONFIG_KEY)
if range_config_entry:
range_config = range_config_pb2.RangeConfig()
proto_utils.json_to_proto(range_config_entry, range_config)
if range_config.HasField('static_range'):
# For ExampleGen, StaticRange must specify an exact span to look for,
# since only one span is processed at README.ml-pipelines-sdk.md time.
start_span_number = range_config.static_range.start_span_number
end_span_number = range_config.static_range.end_span_number
if start_span_number != end_span_number:
raise ValueError(
'Start and end span numbers for RangeConfig.static_range must '
'be equal: (%s, %s)' % (start_span_number, end_span_number))
# Note that this function updates the input_config.splits.pattern.
fingerprint, span, version = utils.calculate_splits_fingerprint_span_and_version(
input_base, input_config.splits, range_config)
exec_properties[standard_component_specs
.INPUT_CONFIG_KEY] = proto_utils.proto_to_json(input_config)
exec_properties[utils.SPAN_PROPERTY_NAME] = span
exec_properties[utils.VERSION_PROPERTY_NAME] = version
exec_properties[utils.FINGERPRINT_PROPERTY_NAME] = fingerprint
return exec_properties
def _prepare_output_artifacts(
self,
input_artifacts: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, types.Channel],
exec_properties: Dict[Text, Any],
execution_id: int,
pipeline_info: data_types.PipelineInfo,
component_info: data_types.ComponentInfo,
) -> Dict[Text, List[types.Artifact]]:
"""Overrides BaseDriver._prepare_output_artifacts()."""
del input_artifacts
example_artifact = output_dict[standard_component_specs.EXAMPLES_KEY].type()
base_output_dir = os.path.join(pipeline_info.pipeline_root,
component_info.component_id)
example_artifact.uri = base_driver._generate_output_uri( # pylint: disable=protected-access
base_output_dir, standard_component_specs.EXAMPLES_KEY, execution_id)
update_output_artifact(exec_properties, example_artifact.mlmd_artifact)
base_driver._prepare_output_paths(example_artifact) # pylint: disable=protected-access
return {standard_component_specs.EXAMPLES_KEY: [example_artifact]}
def run(
self, execution_info: portable_data_types.ExecutionInfo
) -> driver_output_pb2.DriverOutput:
# Populate exec_properties
result = driver_output_pb2.DriverOutput()
# PipelineInfo and ComponentInfo are not actually used, two fake one are
# created just to be compatible with the old API.
pipeline_info = data_types.PipelineInfo('', '')
component_info = data_types.ComponentInfo('', '', pipeline_info)
exec_properties = self.resolve_exec_properties(
execution_info.exec_properties, pipeline_info, component_info)
for k, v in exec_properties.items():
if v is not None:
data_types_utils.set_metadata_value(result.exec_properties[k], v)
# Populate output_dict
output_example = copy.deepcopy(execution_info.output_dict[
standard_component_specs.EXAMPLES_KEY][0].mlmd_artifact)
update_output_artifact(exec_properties, output_example)
result.output_artifacts[
standard_component_specs.EXAMPLES_KEY].artifacts.append(output_example)
return result | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/example_gen/driver.py | 0.755637 | 0.16975 | driver.py | pypi |
"""Parquet based TFX example gen executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Any, Dict, Text
from absl import logging
import apache_beam as beam
import tensorflow as tf
from tfx.components.example_gen import utils
from tfx.components.example_gen.base_example_gen_executor import BaseExampleGenExecutor
from tfx.types import standard_component_specs
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(tf.train.Example)
def _ParquetToExample( # pylint: disable=invalid-name
pipeline: beam.Pipeline, exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection:
"""Read Parquet files and transform to TF examples.
Note that each input split will be transformed by this function separately.
Args:
pipeline: beam pipeline.
exec_properties: A dict of execution properties.
- input_base: input dir that contains Parquet data.
split_pattern: Split.pattern in Input config, glob relative file pattern
that maps to input files with root directory given by input_base.
Returns:
PCollection of TF examples.
"""
input_base_uri = exec_properties[standard_component_specs.INPUT_BASE_KEY]
parquet_pattern = os.path.join(input_base_uri, split_pattern)
logging.info('Processing input parquet data %s to TFExample.',
parquet_pattern)
return (pipeline
# TODO(jyzhao): support per column read by input_config.
| 'ReadFromParquet' >> beam.io.ReadFromParquet(parquet_pattern)
| 'ToTFExample' >> beam.Map(utils.dict_to_example))
class Executor(BaseExampleGenExecutor):
"""TFX example gen executor for processing parquet format.
Data type conversion:
integer types will be converted to tf.train.Feature with tf.train.Int64List.
float types will be converted to tf.train.Feature with tf.train.FloatList.
string types will be converted to tf.train.Feature with tf.train.BytesList
and utf-8 encoding.
Note that,
Single value will be converted to README.ml-pipelines-sdk.md list of that single value.
Missing value will be converted to empty tf.train.Feature().
Parquet data might lose precision, e.g., int96.
For details, check the dict_to_example function in example_gen.utils.
Example usage:
from tfx.components.base import executor_spec
from tfx.components.example_gen.component import
FileBasedExampleGen
from tfx.components.example_gen.custom_executors import
parquet_executor
from tfx.utils.dsl_utils import external_input
example_gen = FileBasedExampleGen(
input=external_input(parquet_dir_path),
custom_executor_spec=executor_spec.ExecutorClassSpec(
parquet_executor.Executor))
"""
def GetInputSourceToExamplePTransform(self) -> beam.PTransform:
"""Returns PTransform for parquet to TF examples."""
return _ParquetToExample | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/example_gen/custom_executors/parquet_executor.py | 0.764276 | 0.371393 | parquet_executor.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.