code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
"""TFX Pusher component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Union
from tfx import types
from tfx.components.minio_pusher import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.proto import pusher_pb2
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import PusherSpec
from tfx.utils import json_utils
# TODO(b/133845381): Investigate other ways to keep push destination converged.
class MinIOPusher(base_component.BaseComponent):
"""A TFX component to push validated TensorFlow models to README.ml-pipelines-sdk.md model serving platform.
The `Pusher` component can be used to push an validated SavedModel from output
of the [Trainer component](https://www.tensorflow.org/tfx/guide/trainer) to
[TensorFlow Serving](https://www.tensorflow.org/tfx/serving). The Pusher
will check the validation results from the [Evaluator
component](https://www.tensorflow.org/tfx/guide/evaluator) and [InfraValidator
component](https://www.tensorflow.org/tfx/guide/infra_validator)
before deploying the model. If the model has not been blessed, then the model
will not be pushed.
*Note:* The executor for this component can be overriden to enable the model
to be pushed to other serving platforms than tf.serving. The [Cloud AI
Platform custom
executor](https://github.com/tensorflow/tfx/tree/master/tfx/extensions/google_cloud_ai_platform/pusher)
provides an example how to implement this.
## Example
```
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
```
"""
SPEC_CLASS = PusherSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
model: types.Channel = None,
model_blessing: Optional[types.Channel] = None,
infra_blessing: Optional[types.Channel] = None,
push_destination: Optional[Union[pusher_pb2.PushDestination,
Dict[Text, Any]]] = None,
custom_config: Optional[Dict[Text, Any]] = None,
custom_executor_spec: Optional[executor_spec.ExecutorSpec] = None,
pushed_model: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md Pusher component.
Args:
model: A Channel of type `standard_artifacts.Model`, usually produced by
README.ml-pipelines-sdk.md Trainer component.
model_blessing: An optional Channel of type
`standard_artifacts.ModelBlessing`, usually produced from an Evaluator
component.
infra_blessing: An optional Channel of type
`standard_artifacts.InfraBlessing`, usually produced from an
InfraValidator component.
push_destination: A pusher_pb2.PushDestination instance, providing info
for tensorflow serving to load models. Optional if executor_class
doesn't require push_destination. If any field is provided as README.ml-pipelines-sdk.md
RuntimeParameter, push_destination should be constructed as README.ml-pipelines-sdk.md dict with
the same field names as PushDestination proto message.
custom_config: A dict which contains the deployment job parameters to be
passed to cloud-based training platforms. The [Kubeflow example](
https://github.com/tensorflow/tfx/blob/6ff57e36a7b65818d4598d41e584a42584d361e6/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_kubeflow_gcp.py#L278-L285)
contains an example how this can be used by custom executors.
custom_executor_spec: Optional custom executor spec.
pushed_model: Optional output `standard_artifacts.PushedModel` channel
with result of push.
instance_name: Optional unique instance name. Necessary if multiple Pusher
components are declared in the same pipeline.
"""
pushed_model = pushed_model or types.Channel(
type=standard_artifacts.PushedModel)
if push_destination is None and not custom_executor_spec:
raise ValueError('push_destination is required unless README.ml-pipelines-sdk.md '
'custom_executor_spec is supplied that does not require '
'it.')
spec = PusherSpec(
model=model,
model_blessing=model_blessing,
infra_blessing=infra_blessing,
push_destination=push_destination,
custom_config=json_utils.dumps(custom_config),
pushed_model=pushed_model)
super(MinIOPusher, self).__init__(
spec=spec,
custom_executor_spec=custom_executor_spec,
instance_name=instance_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/minio_pusher/component.py | 0.866669 | 0.700818 | component.py | pypi |
"""TFX BulkInferrer component definition."""
from typing import Any, Dict, Optional, Text, Union
from tfx import types
from tfx.components.bulk_inferrer import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.proto import bulk_inferrer_pb2
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import BulkInferrerSpec
class BulkInferrer(base_component.BaseComponent):
"""A TFX component to do batch inference on README.ml-pipelines-sdk.md model with unlabelled examples.
BulkInferrer consumes examples data and README.ml-pipelines-sdk.md model, and produces the inference
results to an external location as PredictionLog proto.
BulkInferrer will infer on validated model.
## Example
```
# Uses BulkInferrer to inference on examples.
bulk_inferrer = BulkInferrer(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'])
```
"""
SPEC_CLASS = BulkInferrerSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
examples: types.Channel = None,
model: Optional[types.Channel] = None,
model_blessing: Optional[types.Channel] = None,
data_spec: Optional[Union[bulk_inferrer_pb2.DataSpec, Dict[Text,
Any]]] = None,
model_spec: Optional[Union[bulk_inferrer_pb2.ModelSpec,
Dict[Text, Any]]] = None,
output_example_spec: Optional[Union[bulk_inferrer_pb2.OutputExampleSpec,
Dict[Text, Any]]] = None,
inference_result: Optional[types.Channel] = None,
output_examples: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct an BulkInferrer component.
Args:
examples: A Channel of type `standard_artifacts.Examples`, usually
produced by an ExampleGen component. _required_
model: A Channel of type `standard_artifacts.Model`, usually produced by
README.ml-pipelines-sdk.md Trainer component.
model_blessing: A Channel of type `standard_artifacts.ModelBlessing`,
usually produced by README.ml-pipelines-sdk.md ModelValidator component.
data_spec: bulk_inferrer_pb2.DataSpec instance that describes data
selection. If any field is provided as README.ml-pipelines-sdk.md RuntimeParameter, data_spec
should be constructed as README.ml-pipelines-sdk.md dict with the same field names as DataSpec
proto message.
model_spec: bulk_inferrer_pb2.ModelSpec instance that describes model
specification. If any field is provided as README.ml-pipelines-sdk.md RuntimeParameter,
model_spec should be constructed as README.ml-pipelines-sdk.md dict with the same field names as
ModelSpec proto message.
output_example_spec: bulk_inferrer_pb2.OutputExampleSpec instance, specify
if you want BulkInferrer to output examples instead of inference result.
If any field is provided as README.ml-pipelines-sdk.md RuntimeParameter, output_example_spec
should be constructed as README.ml-pipelines-sdk.md dict with the same field names as
OutputExampleSpec proto message.
inference_result: Channel of type `standard_artifacts.InferenceResult`
to store the inference results, must not be specified when
output_example_spec is set.
output_examples: Channel of type `standard_artifacts.Examples`
to store the output examples, must not be specified when
output_example_spec is unset. Check output_example_spec for details.
instance_name: Optional name assigned to this specific instance of
BulkInferrer. Required only if multiple BulkInferrer components are
declared in the same pipeline.
Raises:
ValueError: Must not specify inference_result or output_examples depends
on whether output_example_spec is set or not.
"""
if output_example_spec:
if inference_result:
raise ValueError(
'Must not specify inference_result when output_example_spec is set.'
)
output_examples = output_examples or types.Channel(
type=standard_artifacts.Examples)
else:
if output_examples:
raise ValueError(
'Must not specify output_examples when output_example_spec is unset.'
)
inference_result = inference_result or types.Channel(
type=standard_artifacts.InferenceResult)
spec = BulkInferrerSpec(
examples=examples,
model=model,
model_blessing=model_blessing,
data_spec=data_spec or bulk_inferrer_pb2.DataSpec(),
model_spec=model_spec or bulk_inferrer_pb2.ModelSpec(),
output_example_spec=output_example_spec,
inference_result=inference_result,
output_examples=output_examples)
super(BulkInferrer, self).__init__(spec=spec, instance_name=instance_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/bulk_inferrer/component.py | 0.946794 | 0.691172 | component.py | pypi |
"""Utils for converting prediction_log to example."""
from typing import Any, List, Tuple, Text, Union
import numpy as np
import six
import tensorflow as tf
from tfx.proto import bulk_inferrer_pb2
from tensorflow_serving.apis import classification_pb2
from tensorflow_serving.apis import prediction_log_pb2
INPUT_KEY = 'examples'
FEATURE_LIST_TYPE = List[Tuple[Text, List[Union[Text, bytes, float]]]]
# Typehint Any is for compatibility reason.
_OutputExampleSpecType = Union[bulk_inferrer_pb2.OutputExampleSpec, Any]
_PredictOutputType = Union[bulk_inferrer_pb2.PredictOutput, Any]
_ClassifyOutputType = Union[bulk_inferrer_pb2.ClassifyOutput, Any]
def convert(prediction_log: prediction_log_pb2.PredictionLog,
output_example_spec: _OutputExampleSpecType) -> tf.train.Example:
"""Converts given `prediction_log` to README.ml-pipelines-sdk.md `tf.train.Example`.
Args:
prediction_log: The input prediction log.
output_example_spec: The spec for how to map prediction results to columns
in example.
Returns:
A `tf.train.Example` converted from the given prediction_log.
Raises:
ValueError: If the inference type or signature name in spec does not match
that in prediction_log.
"""
specs = output_example_spec.output_columns_spec
if prediction_log.HasField('multi_inference_log'):
example, output_features = _parse_multi_inference_log(
prediction_log.multi_inference_log, output_example_spec)
else:
if len(specs) != 1:
raise ValueError('Got single inference result, so expect single spec in '
'output_example_spec: %s' % output_example_spec)
if prediction_log.HasField('regress_log'):
if not specs[0].HasField('regress_output'):
raise ValueError(
'Regression predictions require README.ml-pipelines-sdk.md regress_output in output_example_spec: %s'
% output_example_spec)
example = tf.train.Example()
example.CopyFrom(
prediction_log.regress_log.request.input.example_list.examples[0])
output_features = [
(specs[0].regress_output.value_column,
[prediction_log.regress_log.response.result.regressions[0].value])
]
elif prediction_log.HasField('classify_log'):
if not specs[0].HasField('classify_output'):
raise ValueError(
'Classification predictions require README.ml-pipelines-sdk.md classify_output in output_example_spec: %s'
% output_example_spec)
example, output_features = _parse_classify_log(
prediction_log.classify_log, specs[0].classify_output)
elif prediction_log.HasField('predict_log'):
if not specs[0].HasField('predict_output'):
raise ValueError(
'Predict predictions require README.ml-pipelines-sdk.md predict_output in output_example_spec: %s'
% output_example_spec)
example, output_features = _parse_predict_log(prediction_log.predict_log,
specs[0].predict_output)
else:
raise ValueError('Unsupported prediction type in prediction_log: %s' %
prediction_log)
return _add_columns(example, output_features)
def _parse_multi_inference_log(
multi_inference_log: prediction_log_pb2.MultiInferenceLog,
output_example_spec: _OutputExampleSpecType) -> tf.train.Example:
"""Parses MultiInferenceLog."""
spec_map = {
spec.signature_name or tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
spec for spec in output_example_spec.output_columns_spec
}
example = tf.train.Example()
example.CopyFrom(multi_inference_log.request.input.example_list.examples[0])
output_features = []
for result in multi_inference_log.response.results:
spec = spec_map[result.model_spec.signature_name]
if result.HasField('classification_result'):
output_features += _parse_classification_result(
result.classification_result, spec.classify_output)
elif result.HasField('regression_result'):
output_features.append((spec.regress_output.value_column,
[result.regression_result.regressions[0].value]))
else:
raise ValueError('Unsupported multi_inferrence_log: %s' %
multi_inference_log)
return example, output_features
def _parse_classify_log(
classify_log: prediction_log_pb2.ClassifyLog,
classify_output_spec: _ClassifyOutputType
) -> Tuple[tf.train.Example, FEATURE_LIST_TYPE]:
"""Parses ClassiyLog."""
example = tf.train.Example()
example.CopyFrom(classify_log.request.input.example_list.examples[0])
return example, _parse_classification_result(classify_log.response.result,
classify_output_spec)
def _parse_classification_result(
classification_result: classification_pb2.ClassificationResult,
classify_output_spec: _ClassifyOutputType) -> FEATURE_LIST_TYPE:
"""Parses ClassificationResult."""
output_features = []
classes = classification_result.classifications[0].classes
if classify_output_spec.label_column:
output_features.append(
(classify_output_spec.label_column, [c.label for c in classes]))
if classify_output_spec.score_column:
output_features.append(
(classify_output_spec.score_column, [c.score for c in classes]))
return output_features
def _parse_predict_log(
predict_log: prediction_log_pb2.PredictLog,
predict_output_spec: _PredictOutputType
) -> Tuple[tf.train.Example, FEATURE_LIST_TYPE]:
"""Parses PredictLog."""
input_tensor_proto = predict_log.request.inputs[INPUT_KEY]
example = tf.train.Example.FromString(input_tensor_proto.string_val[0])
outputs = predict_log.response.outputs
output_features = []
for col in predict_output_spec.output_columns:
output_tensor_proto = outputs.get(col.output_key)
output_values = np.squeeze(tf.make_ndarray(output_tensor_proto))
if output_values.ndim > 1:
raise ValueError(
'All output values must be convertible to 1D arrays, but %s was '
'not. value was %s.' % (col.output_key, output_values))
if output_values.ndim == 1:
# Convert the output_values to README.ml-pipelines-sdk.md list.
output_values = output_values.tolist()
else: # output_values.ndim == 0
# Get README.ml-pipelines-sdk.md scalar for output_values.
output_values = [np.asscalar(output_values)]
output_features.append((col.output_column, output_values))
return example, output_features
def _add_columns(example: tf.train.Example,
features: FEATURE_LIST_TYPE) -> tf.train.Example:
"""Add given features to `example`."""
feature_map = example.features.feature
for col, value in features:
assert col not in feature_map, ('column name %s already exists in example: '
'%s') % (col, example)
# Note: we only consider two types, bytes and float for now.
if isinstance(value[0], (six.text_type, six.binary_type)):
if isinstance(value[0], six.text_type):
bytes_value = [v.encode('utf-8') for v in value]
else:
bytes_value = value
feature_map[col].bytes_list.value[:] = bytes_value
else:
feature_map[col].float_list.value[:] = value
return example | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/bulk_inferrer/prediction_to_example_utils.py | 0.957417 | 0.484136 | prediction_to_example_utils.py | pypi |
"""TFX bulk_inferrer executor."""
import os
from typing import Any, Callable, Dict, List, Optional, Text, Union
from absl import logging
import apache_beam as beam
import tensorflow as tf
from tfx import types
from tfx.components.bulk_inferrer import prediction_to_example_utils
from tfx.components.util import model_utils
from tfx.components.util import tfxio_utils
from tfx.dsl.components.base import base_executor
from tfx.proto import bulk_inferrer_pb2
from tfx.proto import example_gen_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import path_utils
from tfx.utils import proto_utils
from tfx_bsl.public.beam import run_inference
from tfx_bsl.public.proto import model_spec_pb2
from tfx_bsl.tfxio import record_based_tfxio
from tensorflow_serving.apis import prediction_log_pb2
try:
import tensorflow_text as _ # pylint: disable=g-import-not-at-top
except ImportError as e:
logging.info('tensorflow_text is not available: %s', e)
_PREDICTION_LOGS_FILE_NAME = 'prediction_logs'
_EXAMPLES_FILE_NAME = 'examples'
_TELEMETRY_DESCRIPTORS = ['BulkInferrer']
class Executor(base_executor.BaseExecutor):
"""TFX bulk inferer executor."""
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Runs batch inference on README.ml-pipelines-sdk.md given model with given input examples.
Args:
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of Artifacts.
- examples: examples for inference.
- model: exported model.
- model_blessing: model blessing result, optional.
output_dict: Output dict from output key to README.ml-pipelines-sdk.md list of Artifacts.
- output: bulk inference results.
exec_properties: A dict of execution properties.
- model_spec: JSON string of bulk_inferrer_pb2.ModelSpec instance.
- data_spec: JSON string of bulk_inferrer_pb2.DataSpec instance.
Returns:
None
"""
self._log_startup(input_dict, output_dict, exec_properties)
if output_dict.get(standard_component_specs.INFERENCE_RESULT_KEY):
inference_result = artifact_utils.get_single_instance(
output_dict[standard_component_specs.INFERENCE_RESULT_KEY])
else:
inference_result = None
if output_dict.get(standard_component_specs.OUTPUT_EXAMPLES_KEY):
output_examples = artifact_utils.get_single_instance(
output_dict[standard_component_specs.OUTPUT_EXAMPLES_KEY])
else:
output_examples = None
if 'examples' not in input_dict:
raise ValueError('\'examples\' is missing in input dict.')
if 'model' not in input_dict:
raise ValueError('Input models are not valid, model '
'need to be specified.')
if standard_component_specs.MODEL_BLESSING_KEY in input_dict:
model_blessing = artifact_utils.get_single_instance(
input_dict[standard_component_specs.MODEL_BLESSING_KEY])
if not model_utils.is_model_blessed(model_blessing):
logging.info('Model on %s was not blessed', model_blessing.uri)
return
else:
logging.info('Model blessing is not provided, exported model will be '
'used.')
model = artifact_utils.get_single_instance(
input_dict[standard_component_specs.MODEL_KEY])
model_path = path_utils.serving_model_path(model.uri)
logging.info('Use exported model from %s.', model_path)
data_spec = bulk_inferrer_pb2.DataSpec()
proto_utils.json_to_proto(
exec_properties[standard_component_specs.DATA_SPEC_KEY], data_spec)
output_example_spec = bulk_inferrer_pb2.OutputExampleSpec()
if exec_properties.get(standard_component_specs.OUTPUT_EXAMPLE_SPEC_KEY):
proto_utils.json_to_proto(
exec_properties[standard_component_specs.OUTPUT_EXAMPLE_SPEC_KEY],
output_example_spec)
self._run_model_inference(
data_spec, output_example_spec,
input_dict[standard_component_specs.EXAMPLES_KEY], output_examples,
inference_result, self._get_inference_spec(model_path, exec_properties))
def _get_inference_spec(
self, model_path: Text,
exec_properties: Dict[Text, Any]) -> model_spec_pb2.InferenceSpecType:
model_spec = bulk_inferrer_pb2.ModelSpec()
proto_utils.json_to_proto(
exec_properties[standard_component_specs.MODEL_SPEC_KEY], model_spec)
saved_model_spec = model_spec_pb2.SavedModelSpec(
model_path=model_path,
tag=model_spec.tag,
signature_name=model_spec.model_signature_name)
result = model_spec_pb2.InferenceSpecType()
result.saved_model_spec.CopyFrom(saved_model_spec)
return result
def _run_model_inference(
self,
data_spec: bulk_inferrer_pb2.DataSpec,
output_example_spec: bulk_inferrer_pb2.OutputExampleSpec,
examples: List[types.Artifact],
output_examples: Optional[types.Artifact],
inference_result: Optional[types.Artifact],
inference_endpoint: model_spec_pb2.InferenceSpecType,
) -> None:
"""Runs model inference on given examples data.
Args:
data_spec: bulk_inferrer_pb2.DataSpec instance.
output_example_spec: bulk_inferrer_pb2.OutputExampleSpec instance.
examples: List of `standard_artifacts.Examples` artifacts.
output_examples: Optional output `standard_artifacts.Examples` artifact.
inference_result: Optional output `standard_artifacts.InferenceResult`
artifact.
inference_endpoint: Model inference endpoint.
"""
example_uris = {}
for example_artifact in examples:
for split in artifact_utils.decode_split_names(
example_artifact.split_names):
if data_spec.example_splits:
if split in data_spec.example_splits:
example_uris[split] = artifact_utils.get_split_uri(
[example_artifact], split)
else:
example_uris[split] = artifact_utils.get_split_uri([example_artifact],
split)
payload_format, _ = tfxio_utils.resolve_payload_format_and_data_view_uri(
examples)
tfxio_factory = tfxio_utils.get_tfxio_factory_from_artifact(
examples,
_TELEMETRY_DESCRIPTORS,
schema=None,
read_as_raw_records=True,
# We have to specify this parameter in order to create README.ml-pipelines-sdk.md RawRecord TFXIO
# but we won't use the RecordBatches so the column name of the raw
# records does not matter.
raw_record_column_name='unused')
if output_examples:
output_examples.split_names = artifact_utils.encode_split_names(
sorted(example_uris.keys()))
with self._make_beam_pipeline() as pipeline:
data_list = []
for split, example_uri in example_uris.items():
tfxio = tfxio_factory([io_utils.all_files_pattern(example_uri)])
assert isinstance(tfxio, record_based_tfxio.RecordBasedTFXIO), (
'Unable to use TFXIO {} as it does not support reading raw records.'
.format(type(tfxio)))
# pylint: disable=no-value-for-parameter
data = (pipeline
| 'ReadData[{}]'.format(split) >> tfxio.RawRecordBeamSource()
| 'RunInference[{}]'.format(split) >> _RunInference(
payload_format, inference_endpoint))
if output_examples:
output_examples_split_uri = artifact_utils.get_split_uri(
[output_examples], split)
logging.info('Path of output examples split `%s` is %s.', split,
output_examples_split_uri)
_ = (
data
| 'WriteExamples[{}]'.format(split) >> _WriteExamples(
output_example_spec, output_examples_split_uri))
# pylint: enable=no-value-for-parameter
data_list.append(data)
if inference_result:
_ = (
data_list
| 'FlattenInferenceResult' >> beam.Flatten(pipeline=pipeline)
| 'WritePredictionLogs' >> beam.io.WriteToTFRecord(
os.path.join(inference_result.uri, _PREDICTION_LOGS_FILE_NAME),
file_name_suffix='.gz',
coder=beam.coders.ProtoCoder(prediction_log_pb2.PredictionLog)))
if output_examples:
logging.info('Output examples written to %s.', output_examples.uri)
if inference_result:
logging.info('Inference result written to %s.', inference_result.uri)
def _MakeParseFn(
payload_format: int
) -> Union[Callable[[bytes], tf.train.Example], Callable[
[bytes], tf.train.SequenceExample]]:
"""Returns README.ml-pipelines-sdk.md function to parse bytes to payload."""
if payload_format == example_gen_pb2.PayloadFormat.FORMAT_TF_EXAMPLE:
return tf.train.Example.FromString
elif (payload_format ==
example_gen_pb2.PayloadFormat.FORMAT_TF_SEQUENCE_EXAMPLE):
return tf.train.SequenceExample.FromString
else:
raise NotImplementedError(
'Payload format %s is not supported.' %
example_gen_pb2.PayloadFormat.Name(payload_format))
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(prediction_log_pb2.PredictionLog)
def _RunInference(
pipeline: beam.Pipeline,
payload_format: int,
inference_endpoint: model_spec_pb2.InferenceSpecType
) -> beam.pvalue.PCollection:
"""Runs model inference on given examples data."""
return (
pipeline
| 'ParseExamples' >> beam.Map(_MakeParseFn(payload_format))
| 'RunInference' >> run_inference.RunInference(inference_endpoint))
@beam.ptransform_fn
@beam.typehints.with_input_types(prediction_log_pb2.PredictionLog)
@beam.typehints.with_output_types(beam.pvalue.PDone)
def _WriteExamples(prediction_log: beam.pvalue.PCollection,
output_example_spec: bulk_inferrer_pb2.OutputExampleSpec,
output_path: Text) -> beam.pvalue.PDone:
"""Converts `prediction_log` to `tf.train.Example` and materializes."""
return (prediction_log
| 'ConvertToExamples' >> beam.Map(
prediction_to_example_utils.convert,
output_example_spec=output_example_spec)
| 'WriteExamples' >> beam.io.WriteToTFRecord(
os.path.join(output_path, _EXAMPLES_FILE_NAME),
file_name_suffix='.gz',
coder=beam.coders.ProtoCoder(tf.train.Example))) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/bulk_inferrer/executor.py | 0.886187 | 0.309604 | executor.py | pypi |
"""TFX ExampleGen component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Union
from absl import logging
from tfx import types
from tfx.components.example_gen import driver
from tfx.components.example_gen import utils
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_executor
from tfx.dsl.components.base import executor_spec
from tfx.proto import example_gen_pb2
from tfx.proto import range_config_pb2
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import FileBasedExampleGenSpec
from tfx.types.standard_component_specs import QueryBasedExampleGenSpec
class QueryBasedExampleGen(base_component.BaseComponent):
"""A TFX component to ingest examples from query based systems.
The QueryBasedExampleGen component can be extended to ingest examples from
query based systems such as Presto or Bigquery. The component will also
convert the input data into
tf.record](https://www.tensorflow.org/tutorials/load_data/tf_records)
and generate train and eval example splits for downsteam components.
## Example
```
_query = "SELECT * FROM `bigquery-public-data.chicago_taxi_trips.taxi_trips`"
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = BigQueryExampleGen(query=_query)
```
"""
SPEC_CLASS = QueryBasedExampleGenSpec
# EXECUTOR_SPEC should be overridden by subclasses.
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(base_executor.BaseExecutor)
def __init__(
self,
input_config: Union[example_gen_pb2.Input, Dict[Text, Any]],
output_config: Optional[Union[example_gen_pb2.Output, Dict[Text,
Any]]] = None,
custom_config: Optional[Union[example_gen_pb2.CustomConfig,
Dict[Text, Any]]] = None,
output_data_format: Optional[int] = example_gen_pb2.FORMAT_TF_EXAMPLE,
example_artifacts: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md QueryBasedExampleGen component.
Args:
input_config: An
[example_gen_pb2.Input](https://github.com/tensorflow/tfx/blob/master/tfx/proto/example_gen.proto)
instance, providing input configuration. If any field is provided as README.ml-pipelines-sdk.md
RuntimeParameter, input_config should be constructed as README.ml-pipelines-sdk.md dict with
the same field names as Input proto message. _required_
output_config: An
[example_gen_pb2.Output](https://github.com/tensorflow/tfx/blob/master/tfx/proto/example_gen.proto)
instance, providing output configuration. If unset, the default splits
will be labeled as 'train' and 'eval' with README.ml-pipelines-sdk.md distribution ratio of 2:1.
If any field is provided as README.ml-pipelines-sdk.md RuntimeParameter, output_config should
be constructed as README.ml-pipelines-sdk.md dict with the same field names as Output proto
message.
custom_config: An
[example_gen_pb2.CustomConfig](https://github.com/tensorflow/tfx/blob/master/tfx/proto/example_gen.proto)
instance, providing custom configuration for ExampleGen. If any field
is provided as README.ml-pipelines-sdk.md RuntimeParameter, output_config should be constructed
as README.ml-pipelines-sdk.md dict.
output_data_format: Payload format of generated data in output artifact,
one of example_gen_pb2.PayloadFormat enum.
example_artifacts: Channel of `standard_artifacts.Examples` for output
train and eval examples.
instance_name: Optional unique instance name. Required only if multiple
ExampleGen components are declared in the same pipeline.
Raises:
ValueError: The output_data_format value must be defined in the
example_gen_pb2.PayloadFormat proto.
"""
# Configure outputs.
output_config = output_config or utils.make_default_output_config(
input_config)
if not example_artifacts:
example_artifacts = types.Channel(type=standard_artifacts.Examples)
if output_data_format not in example_gen_pb2.PayloadFormat.values():
raise ValueError('The value of output_data_format must be defined in'
'the example_gen_pb2.PayloadFormat proto.')
spec = QueryBasedExampleGenSpec(
input_config=input_config,
output_config=output_config,
output_data_format=output_data_format,
custom_config=custom_config,
examples=example_artifacts)
super(QueryBasedExampleGen, self).__init__(
spec=spec, instance_name=instance_name)
class FileBasedExampleGen(base_component.BaseComponent):
"""A TFX component to ingest examples from README.ml-pipelines-sdk.md file system.
The FileBasedExampleGen component is an API for getting file-based records
into TFX pipelines. It consumes external files to generate examples which will
be used by other internal components like StatisticsGen or Trainers. The
component will also convert the input data into
[tf.record](https://www.tensorflow.org/tutorials/load_data/tf_records)
and generate train and eval example splits for downsteam components.
## Example
```
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data', 'simple')
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = FileBasedExampleGen(input_base=_data_root)
```
"""
SPEC_CLASS = FileBasedExampleGenSpec
# EXECUTOR_SPEC should be overridden by subclasses.
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(base_executor.BaseExecutor)
DRIVER_CLASS = driver.Driver
def __init__(
self,
# TODO(b/159467778): deprecate this, use input_base instead.
input: Optional[types.Channel] = None, # pylint: disable=redefined-builtin
input_base: Optional[Text] = None,
input_config: Optional[Union[example_gen_pb2.Input, Dict[Text,
Any]]] = None,
output_config: Optional[Union[example_gen_pb2.Output, Dict[Text,
Any]]] = None,
custom_config: Optional[Union[example_gen_pb2.CustomConfig,
Dict[Text, Any]]] = None,
range_config: Optional[Union[range_config_pb2.RangeConfig,
Dict[Text, Any]]] = None,
output_data_format: Optional[int] = example_gen_pb2.FORMAT_TF_EXAMPLE,
example_artifacts: Optional[types.Channel] = None,
custom_executor_spec: Optional[executor_spec.ExecutorSpec] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md FileBasedExampleGen component.
Args:
input: A Channel of type `standard_artifacts.ExternalArtifact`, which
includes one artifact whose uri is an external directory containing the
data files. (Deprecated by input_base)
input_base: an external directory containing the data files.
input_config: An
[`example_gen_pb2.Input`](https://github.com/tensorflow/tfx/blob/master/tfx/proto/example_gen.proto)
instance, providing input configuration. If unset, input files will be
treated as README.ml-pipelines-sdk.md single split.
output_config: An example_gen_pb2.Output instance, providing the output
configuration. If unset, default splits will be 'train' and
'eval' with size 2:1.
custom_config: An optional example_gen_pb2.CustomConfig instance,
providing custom configuration for executor.
range_config: An optional range_config_pb2.RangeConfig instance,
specifying the range of span values to consider. If unset, driver will
default to searching for latest span with no restrictions.
output_data_format: Payload format of generated data in output artifact,
one of example_gen_pb2.PayloadFormat enum.
example_artifacts: Channel of 'ExamplesPath' for output train and eval
examples.
custom_executor_spec: Optional custom executor spec overriding the default
executor spec specified in the component attribute.
instance_name: Optional unique instance name. Required only if multiple
ExampleGen components are declared in the same pipeline.
"""
if input:
logging.warning(
'The "input" argument to the ExampleGen component has been '
'deprecated by "input_base". Please update your usage as support for '
'this argument will be removed soon.')
input_base = artifact_utils.get_single_uri(list(input.get()))
# Configure inputs and outputs.
input_config = input_config or utils.make_default_input_config()
output_config = output_config or utils.make_default_output_config(
input_config)
if not example_artifacts:
example_artifacts = types.Channel(type=standard_artifacts.Examples)
spec = FileBasedExampleGenSpec(
input_base=input_base,
input_config=input_config,
output_config=output_config,
custom_config=custom_config,
range_config=range_config,
output_data_format=output_data_format,
examples=example_artifacts)
super(FileBasedExampleGen, self).__init__(
spec=spec,
custom_executor_spec=custom_executor_spec,
instance_name=instance_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/example_gen/component.py | 0.892789 | 0.734405 | component.py | pypi |
"""Generic TFX example gen base executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import bisect
import hashlib
import os
from typing import Any, Dict, List, Text, Union
from absl import logging
import apache_beam as beam
from six import with_metaclass
import tensorflow as tf
from tfx import types
from tfx.components.example_gen import utils
from tfx.components.util import examples_utils
from tfx.dsl.components.base import base_executor
from tfx.proto import example_gen_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import proto_utils
from tfx_bsl.telemetry import util
# Default file name for TFRecord output file prefix.
DEFAULT_FILE_NAME = 'data_tfrecord'
# Metrics namespace for ExampleGen.
_METRICS_NAMESPACE = util.MakeTfxNamespace(['ExampleGen'])
def _GeneratePartitionKey(record: Union[tf.train.Example,
tf.train.SequenceExample, bytes],
split_config: example_gen_pb2.SplitConfig) -> bytes:
"""Generates key for partition."""
if not split_config.HasField('partition_feature_name'):
if isinstance(record, bytes):
return record
return record.SerializeToString(deterministic=True)
if isinstance(record, tf.train.Example):
features = record.features.feature # pytype: disable=attribute-error
elif isinstance(record, tf.train.SequenceExample):
features = record.context.feature # pytype: disable=attribute-error
else:
raise RuntimeError('Split by `partition_feature_name` is only supported '
'for FORMAT_TF_EXAMPLE and FORMAT_TF_SEQUENCE_EXAMPLE '
'payload format.')
# Use README.ml-pipelines-sdk.md feature for partitioning the examples.
feature_name = split_config.partition_feature_name
if feature_name not in features:
raise RuntimeError('Feature name `{}` does not exist.'.format(feature_name))
feature = features[feature_name]
if not feature.HasField('kind'):
raise RuntimeError('Partition feature does not contain any value.')
if (not feature.HasField('bytes_list') and
not feature.HasField('int64_list')):
raise RuntimeError('Only `bytes_list` and `int64_list` features are '
'supported for partition.')
return feature.SerializeToString(deterministic=True)
def _PartitionFn(
record: Union[tf.train.Example, tf.train.SequenceExample, bytes],
num_partitions: int,
buckets: List[int],
split_config: example_gen_pb2.SplitConfig,
) -> int:
"""Partition function for the ExampleGen's output splits."""
assert num_partitions == len(
buckets), 'Partitions do not match bucket number.'
partition_str = _GeneratePartitionKey(record, split_config)
bucket = int(hashlib.sha256(partition_str).hexdigest(), 16) % buckets[-1]
# For example, if buckets is [10,50,80], there will be 3 splits:
# bucket >=0 && < 10, returns 0
# bucket >=10 && < 50, returns 1
# bucket >=50 && < 80, returns 2
return bisect.bisect(buckets, bucket)
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[tf.train.Example,
tf.train.SequenceExample, bytes])
@beam.typehints.with_output_types(beam.pvalue.PDone)
def _WriteSplit(
example_split: beam.pvalue.PCollection,
output_split_path: Text,
) -> beam.pvalue.PDone:
"""Shuffles and writes output split as serialized records in TFRecord."""
class _MaybeSerialize(beam.DoFn):
"""Serializes the proto if needed."""
def __init__(self):
self._num_instances = beam.metrics.Metrics.counter(
_METRICS_NAMESPACE, 'num_instances')
def process(self, e):
self._num_instances.inc(1)
if isinstance(e, (tf.train.Example, tf.train.SequenceExample)):
yield e.SerializeToString()
else:
yield e
return (example_split
# TODO(jyzhao): make shuffle optional.
| 'MaybeSerialize' >> beam.ParDo(_MaybeSerialize())
| 'Shuffle' >> beam.transforms.Reshuffle()
# TODO(jyzhao): multiple output format.
| 'Write' >> beam.io.WriteToTFRecord(
os.path.join(output_split_path, DEFAULT_FILE_NAME),
file_name_suffix='.gz'))
class BaseExampleGenExecutor(
with_metaclass(abc.ABCMeta, base_executor.BaseExecutor)):
"""Generic TFX example gen base executor.
The base ExampleGen executor takes README.ml-pipelines-sdk.md configuration and converts external data
sources to TensorFlow Examples (tf.train.Example, tf.train.SequenceExample),
or any other protocol buffer as subclass defines.
The common configuration (defined in
https://github.com/tensorflow/tfx/blob/master/tfx/proto/example_gen.proto#L44.)
describes the general properties of input data and shared instructions when
producing output data.
The conversion is done in `GenerateExamplesByBeam` as README.ml-pipelines-sdk.md Beam pipeline, which
validates the configuration, reads the external data sources, converts the
record in the input source to any supported output payload formats
(e.g., tf.Example or tf.SequenceExample) if needed, and splits the examples
if the output split config is given. Then the executor's `Do` writes the
results in splits to the output path.
For simple custom ExampleGens, the details of transforming input data
record(s) to README.ml-pipelines-sdk.md specific output payload format (e.g., tf.Example or
tf.SequenceExample) is expected to be given in
`GetInputSourceToExamplePTransform`, which returns README.ml-pipelines-sdk.md Beam PTransform with the
actual implementation. For complex use cases, such as joining multiple data
sources and different interpretations of the configurations, the custom
ExampleGen can override `GenerateExamplesByBeam`.
"""
@abc.abstractmethod
def GetInputSourceToExamplePTransform(self) -> beam.PTransform:
"""Returns PTransform for converting input source to records.
The record is by default assumed to be tf.train.Example protos, subclassses
can serialize any protocol buffer into bytes as output PCollection,
so long as the downstream component can consume it.
Note that each input split will be transformed by this function separately.
For complex use case, consider override 'GenerateExamplesByBeam' instead.
Here is an example PTransform:
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(Union[tf.train.Example,
tf.train.SequenceExample,
bytes])
def ExamplePTransform(
pipeline: beam.Pipeline,
exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection
"""
pass
def GenerateExamplesByBeam(
self,
pipeline: beam.Pipeline,
exec_properties: Dict[Text, Any],
) -> Dict[Text, beam.pvalue.PCollection]:
"""Converts input source to serialized record splits based on configs.
Custom ExampleGen executor should provide GetInputSourceToExamplePTransform
for converting input split to serialized records. Overriding this
'GenerateExamplesByBeam' method instead if complex logic is need, e.g.,
custom spliting logic.
Args:
pipeline: Beam pipeline.
exec_properties: A dict of execution properties. Depends on detailed
example gen implementation.
- input_base: an external directory containing the data files.
- input_config: JSON string of example_gen_pb2.Input instance, providing
input configuration.
- output_config: JSON string of example_gen_pb2.Output instance,
providing output configuration.
- output_data_format: Payload format of generated data in output
artifact, one of example_gen_pb2.PayloadFormat enum.
Returns:
Dict of beam PCollection with split name as key, each PCollection is README.ml-pipelines-sdk.md
single output split that contains serialized records.
"""
# Get input split information.
input_config = example_gen_pb2.Input()
proto_utils.json_to_proto(
exec_properties[standard_component_specs.INPUT_CONFIG_KEY],
input_config)
# Get output split information.
output_config = example_gen_pb2.Output()
proto_utils.json_to_proto(
exec_properties[standard_component_specs.OUTPUT_CONFIG_KEY],
output_config)
# Get output split names.
split_names = utils.generate_output_split_names(input_config, output_config)
# Make beam_pipeline_args available in exec_properties since certain
# example_gen executors need this information.
exec_properties['_beam_pipeline_args'] = self._beam_pipeline_args or []
example_splits = []
input_to_record = self.GetInputSourceToExamplePTransform()
if output_config.split_config.splits:
# Use output splits, input must have only one split.
assert len(
input_config.splits
) == 1, 'input must have only one split when output split is specified.'
# Calculate split buckets.
buckets = []
total_buckets = 0
for split in output_config.split_config.splits:
total_buckets += split.hash_buckets
buckets.append(total_buckets)
example_splits = (
pipeline
| 'InputToRecord' >>
# pylint: disable=no-value-for-parameter
input_to_record(exec_properties, input_config.splits[0].pattern)
| 'SplitData' >> beam.Partition(_PartitionFn, len(buckets), buckets,
output_config.split_config))
else:
# Use input splits.
for split in input_config.splits:
examples = (
pipeline
| 'InputToRecord[{}]'.format(split.name) >>
# pylint: disable=no-value-for-parameter
input_to_record(exec_properties, split.pattern))
example_splits.append(examples)
result = {}
for index, example_split in enumerate(example_splits):
result[split_names[index]] = example_split
return result
def Do(
self,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
) -> None:
"""Take input data source and generates serialized data splits.
The output is intended to be serialized tf.train.Examples or
tf.train.SequenceExamples protocol buffer in gzipped TFRecord format,
but subclasses can choose to override to write to any serialized records
payload into gzipped TFRecord as specified, so long as downstream
component can consume it. The format of payload is added to
`payload_format` custom property of the output Example artifact.
Args:
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of Artifacts. Depends on
detailed example gen implementation.
output_dict: Output dict from output key to README.ml-pipelines-sdk.md list of Artifacts.
- examples: splits of serialized records.
exec_properties: A dict of execution properties. Depends on detailed
example gen implementation.
- input_base: an external directory containing the data files.
- input_config: JSON string of example_gen_pb2.Input instance,
providing input configuration.
- output_config: JSON string of example_gen_pb2.Output instance,
providing output configuration.
- output_data_format: Payload format of generated data in output
artifact, one of example_gen_pb2.PayloadFormat enum.
Returns:
None
"""
self._log_startup(input_dict, output_dict, exec_properties)
input_config = example_gen_pb2.Input()
proto_utils.json_to_proto(
exec_properties[standard_component_specs.INPUT_CONFIG_KEY],
input_config)
output_config = example_gen_pb2.Output()
proto_utils.json_to_proto(
exec_properties[standard_component_specs.OUTPUT_CONFIG_KEY],
output_config)
examples_artifact = artifact_utils.get_single_instance(
output_dict[standard_component_specs.EXAMPLES_KEY])
examples_artifact.split_names = artifact_utils.encode_split_names(
utils.generate_output_split_names(input_config, output_config))
logging.info('Generating examples.')
with self._make_beam_pipeline() as pipeline:
example_splits = self.GenerateExamplesByBeam(pipeline, exec_properties)
# pylint: disable=expression-not-assigned, no-value-for-parameter
for split_name, example_split in example_splits.items():
(example_split
| 'WriteSplit[{}]'.format(split_name) >> _WriteSplit(
artifact_utils.get_split_uri(
output_dict[standard_component_specs.EXAMPLES_KEY],
split_name)))
# pylint: enable=expression-not-assigned, no-value-for-parameter
output_payload_format = exec_properties.get(
standard_component_specs.OUTPUT_DATA_FORMAT_KEY)
if output_payload_format:
for output_examples_artifact in output_dict[
standard_component_specs.EXAMPLES_KEY]:
examples_utils.set_payload_format(output_examples_artifact,
output_payload_format)
logging.info('Examples generated.') | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/example_gen/base_example_gen_executor.py | 0.873377 | 0.322046 | base_example_gen_executor.py | pypi |
"""Generic TFX ExampleGen custom driver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
from typing import Any, Dict, List, Text
from absl import logging
from tfx import types
from tfx.components.example_gen import utils
from tfx.dsl.components.base import base_driver
from tfx.orchestration import data_types
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration.portable import base_driver as ir_base_driver
from tfx.orchestration.portable import data_types as portable_data_types
from tfx.proto import example_gen_pb2
from tfx.proto import range_config_pb2
from tfx.proto.orchestration import driver_output_pb2
from tfx.types import standard_component_specs
from tfx.utils import proto_utils
from ml_metadata.proto import metadata_store_pb2
def update_output_artifact(
exec_properties: Dict[Text, Any],
output_artifact: metadata_store_pb2.Artifact) -> None:
"""Updates output_artifact for FileBasedExampleGen.
Updates output_artifact properties by updating existing entries or creating
new entries if not already exists.
Args:
exec_properties: execution properties passed to the example gen.
output_artifact: the example artifact to be output.
"""
output_artifact.custom_properties[
utils.FINGERPRINT_PROPERTY_NAME].string_value = (
exec_properties[utils.FINGERPRINT_PROPERTY_NAME])
output_artifact.custom_properties[
utils.SPAN_PROPERTY_NAME].string_value = str(
exec_properties[utils.SPAN_PROPERTY_NAME])
# TODO(b/162622803): add default behavior for when version spec not present.
if exec_properties[utils.VERSION_PROPERTY_NAME] is not None:
output_artifact.custom_properties[
utils.VERSION_PROPERTY_NAME].string_value = str(
exec_properties[utils.VERSION_PROPERTY_NAME])
class Driver(base_driver.BaseDriver, ir_base_driver.BaseDriver):
"""Custom driver for ExampleGen.
This driver supports file based ExampleGen, e.g., for CsvExampleGen and
ImportExampleGen.
"""
def __init__(self, metadata_handler: metadata.Metadata):
base_driver.BaseDriver.__init__(self, metadata_handler)
ir_base_driver.BaseDriver.__init__(self, metadata_handler)
def resolve_exec_properties(
self,
exec_properties: Dict[Text, Any],
pipeline_info: data_types.PipelineInfo,
component_info: data_types.ComponentInfo,
) -> Dict[Text, Any]:
"""Overrides BaseDriver.resolve_exec_properties()."""
del pipeline_info, component_info
input_config = example_gen_pb2.Input()
proto_utils.json_to_proto(
exec_properties[standard_component_specs.INPUT_CONFIG_KEY],
input_config)
input_base = exec_properties[standard_component_specs.INPUT_BASE_KEY]
logging.debug('Processing input %s.', input_base)
range_config = None
range_config_entry = exec_properties.get(
standard_component_specs.RANGE_CONFIG_KEY)
if range_config_entry:
range_config = range_config_pb2.RangeConfig()
proto_utils.json_to_proto(range_config_entry, range_config)
if range_config.HasField('static_range'):
# For ExampleGen, StaticRange must specify an exact span to look for,
# since only one span is processed at README.ml-pipelines-sdk.md time.
start_span_number = range_config.static_range.start_span_number
end_span_number = range_config.static_range.end_span_number
if start_span_number != end_span_number:
raise ValueError(
'Start and end span numbers for RangeConfig.static_range must '
'be equal: (%s, %s)' % (start_span_number, end_span_number))
# Note that this function updates the input_config.splits.pattern.
fingerprint, span, version = utils.calculate_splits_fingerprint_span_and_version(
input_base, input_config.splits, range_config)
exec_properties[standard_component_specs
.INPUT_CONFIG_KEY] = proto_utils.proto_to_json(input_config)
exec_properties[utils.SPAN_PROPERTY_NAME] = span
exec_properties[utils.VERSION_PROPERTY_NAME] = version
exec_properties[utils.FINGERPRINT_PROPERTY_NAME] = fingerprint
return exec_properties
def _prepare_output_artifacts(
self,
input_artifacts: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, types.Channel],
exec_properties: Dict[Text, Any],
execution_id: int,
pipeline_info: data_types.PipelineInfo,
component_info: data_types.ComponentInfo,
) -> Dict[Text, List[types.Artifact]]:
"""Overrides BaseDriver._prepare_output_artifacts()."""
del input_artifacts
example_artifact = output_dict[standard_component_specs.EXAMPLES_KEY].type()
base_output_dir = os.path.join(pipeline_info.pipeline_root,
component_info.component_id)
example_artifact.uri = base_driver._generate_output_uri( # pylint: disable=protected-access
base_output_dir, standard_component_specs.EXAMPLES_KEY, execution_id)
update_output_artifact(exec_properties, example_artifact.mlmd_artifact)
base_driver._prepare_output_paths(example_artifact) # pylint: disable=protected-access
return {standard_component_specs.EXAMPLES_KEY: [example_artifact]}
def run(
self, execution_info: portable_data_types.ExecutionInfo
) -> driver_output_pb2.DriverOutput:
# Populate exec_properties
result = driver_output_pb2.DriverOutput()
# PipelineInfo and ComponentInfo are not actually used, two fake one are
# created just to be compatible with the old API.
pipeline_info = data_types.PipelineInfo('', '')
component_info = data_types.ComponentInfo('', '', pipeline_info)
exec_properties = self.resolve_exec_properties(
execution_info.exec_properties, pipeline_info, component_info)
for k, v in exec_properties.items():
if v is not None:
data_types_utils.set_metadata_value(result.exec_properties[k], v)
# Populate output_dict
output_example = copy.deepcopy(execution_info.output_dict[
standard_component_specs.EXAMPLES_KEY][0].mlmd_artifact)
update_output_artifact(exec_properties, output_example)
result.output_artifacts[
standard_component_specs.EXAMPLES_KEY].artifacts.append(output_example)
return result | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/example_gen/driver.py | 0.755637 | 0.16975 | driver.py | pypi |
"""Parquet based TFX example gen executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Any, Dict, Text
from absl import logging
import apache_beam as beam
import tensorflow as tf
from tfx.components.example_gen import utils
from tfx.components.example_gen.base_example_gen_executor import BaseExampleGenExecutor
from tfx.types import standard_component_specs
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(tf.train.Example)
def _ParquetToExample( # pylint: disable=invalid-name
pipeline: beam.Pipeline, exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection:
"""Read Parquet files and transform to TF examples.
Note that each input split will be transformed by this function separately.
Args:
pipeline: beam pipeline.
exec_properties: A dict of execution properties.
- input_base: input dir that contains Parquet data.
split_pattern: Split.pattern in Input config, glob relative file pattern
that maps to input files with root directory given by input_base.
Returns:
PCollection of TF examples.
"""
input_base_uri = exec_properties[standard_component_specs.INPUT_BASE_KEY]
parquet_pattern = os.path.join(input_base_uri, split_pattern)
logging.info('Processing input parquet data %s to TFExample.',
parquet_pattern)
return (pipeline
# TODO(jyzhao): support per column read by input_config.
| 'ReadFromParquet' >> beam.io.ReadFromParquet(parquet_pattern)
| 'ToTFExample' >> beam.Map(utils.dict_to_example))
class Executor(BaseExampleGenExecutor):
"""TFX example gen executor for processing parquet format.
Data type conversion:
integer types will be converted to tf.train.Feature with tf.train.Int64List.
float types will be converted to tf.train.Feature with tf.train.FloatList.
string types will be converted to tf.train.Feature with tf.train.BytesList
and utf-8 encoding.
Note that,
Single value will be converted to README.ml-pipelines-sdk.md list of that single value.
Missing value will be converted to empty tf.train.Feature().
Parquet data might lose precision, e.g., int96.
For details, check the dict_to_example function in example_gen.utils.
Example usage:
from tfx.components.base import executor_spec
from tfx.components.example_gen.component import
FileBasedExampleGen
from tfx.components.example_gen.custom_executors import
parquet_executor
from tfx.utils.dsl_utils import external_input
example_gen = FileBasedExampleGen(
input=external_input(parquet_dir_path),
custom_executor_spec=executor_spec.ExecutorClassSpec(
parquet_executor.Executor))
"""
def GetInputSourceToExamplePTransform(self) -> beam.PTransform:
"""Returns PTransform for parquet to TF examples."""
return _ParquetToExample | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/example_gen/custom_executors/parquet_executor.py | 0.764276 | 0.371393 | parquet_executor.py | pypi |
"""Avro based TFX example gen executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Any, Dict, Text
from absl import logging
import apache_beam as beam
import tensorflow as tf
from tfx.components.example_gen import utils
from tfx.components.example_gen.base_example_gen_executor import BaseExampleGenExecutor
from tfx.types import standard_component_specs
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(tf.train.Example)
def _AvroToExample( # pylint: disable=invalid-name
pipeline: beam.Pipeline, exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection:
"""Read Avro files and transform to TF examples.
Note that each input split will be transformed by this function separately.
Args:
pipeline: beam pipeline.
exec_properties: A dict of execution properties.
- input_base: input dir that contains Avro data.
split_pattern: Split.pattern in Input config, glob relative file pattern
that maps to input files with root directory given by input_base.
Returns:
PCollection of TF examples.
"""
input_base_uri = exec_properties[standard_component_specs.INPUT_BASE_KEY]
avro_pattern = os.path.join(input_base_uri, split_pattern)
logging.info('Processing input avro data %s to TFExample.', avro_pattern)
return (pipeline
| 'ReadFromAvro' >> beam.io.ReadFromAvro(avro_pattern)
| 'ToTFExample' >> beam.Map(utils.dict_to_example))
class Executor(BaseExampleGenExecutor):
"""TFX example gen executor for processing avro format.
Data type conversion:
integer types will be converted to tf.train.Feature with tf.train.Int64List.
float types will be converted to tf.train.Feature with tf.train.FloatList.
string types will be converted to tf.train.Feature with tf.train.BytesList
and utf-8 encoding.
Note that,
Single value will be converted to README.ml-pipelines-sdk.md list of that single value.
Missing value will be converted to empty tf.train.Feature().
For details, check the dict_to_example function in example_gen.utils.
Example usage:
from tfx.components.base import executor_spec
from tfx.components.example_gen.component import
FileBasedExampleGen
from tfx.components.example_gen.custom_executors import
avro_executor
from tfx.utils.dsl_utils import external_input
example_gen = FileBasedExampleGen(
input=external_input(avro_dir_path),
custom_executor_spec=executor_spec.ExecutorClassSpec(
avro_executor.Executor))
"""
def GetInputSourceToExamplePTransform(self) -> beam.PTransform:
"""Returns PTransform for avro to TF examples."""
return _AvroToExample | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/example_gen/custom_executors/avro_executor.py | 0.847684 | 0.334318 | avro_executor.py | pypi |
"""TFX ImportExampleGen component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Union
from absl import logging
from tfx import types
from tfx.components.example_gen import component
from tfx.components.example_gen.import_example_gen import executor
from tfx.dsl.components.base import executor_spec
from tfx.proto import example_gen_pb2
from tfx.proto import range_config_pb2
from tfx.types import artifact_utils
class ImportExampleGen(component.FileBasedExampleGen): # pylint: disable=protected-access
"""Official TFX ImportExampleGen component.
The ImportExampleGen component takes TFRecord files with TF Example data
format, and generates train and eval examples for downsteam components.
This component provides consistent and configurable partition, and it also
shuffle the dataset for ML best practice.
"""
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
# TODO(b/159467778): deprecate this, use input_base instead.
input: Optional[types.Channel] = None, # pylint: disable=redefined-builtin
input_base: Optional[Text] = None,
input_config: Optional[Union[example_gen_pb2.Input, Dict[Text,
Any]]] = None,
output_config: Optional[Union[example_gen_pb2.Output, Dict[Text,
Any]]] = None,
range_config: Optional[Union[range_config_pb2.RangeConfig,
Dict[Text, Any]]] = None,
payload_format: Optional[int] = example_gen_pb2.FORMAT_TF_EXAMPLE,
example_artifacts: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct an ImportExampleGen component.
Args:
input: A Channel of type `standard_artifacts.ExternalArtifact`, which
includes one artifact whose uri is an external directory containing the
TFRecord files. (Deprecated by input_base)
input_base: an external directory containing the TFRecord files.
input_config: An example_gen_pb2.Input instance, providing input
configuration. If unset, the files under input_base will be treated as README.ml-pipelines-sdk.md
single split. If any field is provided as README.ml-pipelines-sdk.md RuntimeParameter,
input_config should be constructed as README.ml-pipelines-sdk.md dict with the same field names
as Input proto message.
output_config: An example_gen_pb2.Output instance, providing output
configuration. If unset, default splits will be 'train' and 'eval' with
size 2:1. If any field is provided as README.ml-pipelines-sdk.md RuntimeParameter,
output_config should be constructed as README.ml-pipelines-sdk.md dict with the same field names
as Output proto message.
range_config: An optional range_config_pb2.RangeConfig instance,
specifying the range of span values to consider. If unset, driver will
default to searching for latest span with no restrictions.
payload_format: Payload format of input data. Should be one of
example_gen_pb2.PayloadFormat enum. Note that payload format of output
data is the same as input.
example_artifacts: Optional channel of 'ExamplesPath' for output train and
eval examples.
instance_name: Optional unique instance name. Necessary if multiple
ImportExampleGen components are declared in the same pipeline.
"""
if input:
logging.warning(
'The "input" argument to the ImportExampleGen component has been '
'deprecated by "input_base". Please update your usage as support for '
'this argument will be removed soon.')
input_base = artifact_utils.get_single_uri(list(input.get()))
super(ImportExampleGen, self).__init__(
input_base=input_base,
input_config=input_config,
output_config=output_config,
range_config=range_config,
example_artifacts=example_artifacts,
output_data_format=payload_format,
instance_name=instance_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/example_gen/import_example_gen/component.py | 0.772788 | 0.309115 | component.py | pypi |
"""Generic TFX ImportExampleGen executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Any, Dict, Text, Union
from absl import logging
import apache_beam as beam
import tensorflow as tf
from tfx.components.example_gen import base_example_gen_executor
from tfx.proto import example_gen_pb2
from tfx.types import standard_component_specs
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(bytes)
def _ImportSerializedRecord( # pylint: disable=invalid-name
pipeline: beam.Pipeline, exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection:
"""Read TFRecord files to PCollection of records.
Note that each input split will be transformed by this function separately.
Args:
pipeline: Beam pipeline.
exec_properties: A dict of execution properties.
- input_base: input dir that contains input data.
split_pattern: Split.pattern in Input config, glob relative file pattern
that maps to input files with root directory given by input_base.
Returns:
PCollection of records (tf.Example, tf.SequenceExample, or bytes).
"""
input_base_uri = exec_properties[standard_component_specs.INPUT_BASE_KEY]
input_split_pattern = os.path.join(input_base_uri, split_pattern)
logging.info('Reading input TFRecord data %s.', input_split_pattern)
# TODO(jyzhao): profile input examples.
return (pipeline
# TODO(jyzhao): support multiple input container format.
| 'ReadFromTFRecord' >>
beam.io.ReadFromTFRecord(file_pattern=input_split_pattern))
class Executor(base_example_gen_executor.BaseExampleGenExecutor):
"""Generic TFX import example gen executor."""
def GetInputSourceToExamplePTransform(self) -> beam.PTransform:
"""Returns PTransform for importing records."""
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(Union[tf.train.Example,
tf.train.SequenceExample, bytes])
def ImportRecord(pipeline: beam.Pipeline, exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection:
"""PTransform to import records.
The records are tf.train.Example, tf.train.SequenceExample,
or serialized proto.
Args:
pipeline: Beam pipeline.
exec_properties: A dict of execution properties.
- input_base: input dir that contains input data.
split_pattern: Split.pattern in Input config, glob relative file pattern
that maps to input files with root directory given by input_base.
Returns:
PCollection of records (tf.Example, tf.SequenceExample, or bytes).
"""
output_payload_format = exec_properties.get(
standard_component_specs.OUTPUT_DATA_FORMAT_KEY)
serialized_records = (
pipeline
# pylint: disable=no-value-for-parameter
| _ImportSerializedRecord(exec_properties, split_pattern))
if output_payload_format == example_gen_pb2.PayloadFormat.FORMAT_PROTO:
return serialized_records
elif (output_payload_format ==
example_gen_pb2.PayloadFormat.FORMAT_TF_EXAMPLE):
return (serialized_records
| 'ToTFExample' >> beam.Map(tf.train.Example.FromString))
elif (output_payload_format ==
example_gen_pb2.PayloadFormat.FORMAT_TF_SEQUENCE_EXAMPLE):
return (serialized_records
| 'ToTFSequenceExample' >> beam.Map(
tf.train.SequenceExample.FromString))
raise ValueError('output_payload_format must be one of FORMAT_TF_EXAMPLE,'
' FORMAT_TF_SEQUENCE_EXAMPLE or FORMAT_PROTO')
return ImportRecord | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/example_gen/import_example_gen/executor.py | 0.821546 | 0.351283 | executor.py | pypi |
"""TFX CsvExampleGen component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Union
from absl import logging
from tfx import types
from tfx.components.example_gen import component
from tfx.components.example_gen.csv_example_gen import executor
from tfx.dsl.components.base import executor_spec
from tfx.proto import example_gen_pb2
from tfx.proto import range_config_pb2
from tfx.types import artifact_utils
class CsvExampleGen(component.FileBasedExampleGen): # pylint: disable=protected-access
"""Official TFX CsvExampleGen component.
The csv examplegen component takes csv data, and generates train
and eval examples for downsteam components.
The csv examplegen encodes column values to tf.Example int/float/byte feature.
For the case when there's missing cells, the csv examplegen uses:
-- tf.train.Feature(`type`_list=tf.train.`type`List(value=[])), when the
`type` can be inferred.
-- tf.train.Feature() when it cannot infer the `type` from the column.
Note that the type inferring will be per input split. If input isn't README.ml-pipelines-sdk.md single
split, users need to ensure the column types align in each pre-splits.
For example, given the following csv rows of README.ml-pipelines-sdk.md split:
header:A,B,C,D
row1: 1,,x,0.1
row2: 2,,y,0.2
row3: 3,,,0.3
row4:
The output example will be
example1: 1(int), empty feature(no type), x(string), 0.1(float)
example2: 2(int), empty feature(no type), x(string), 0.2(float)
example3: 3(int), empty feature(no type), empty list(string), 0.3(float)
Note that the empty feature is `tf.train.Feature()` while empty list string
feature is `tf.train.Feature(bytes_list=tf.train.BytesList(value=[]))`.
"""
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
# TODO(b/159467778): deprecate this, use input_base instead.
input: Optional[types.Channel] = None, # pylint: disable=redefined-builtin
input_base: Optional[Text] = None,
input_config: Optional[Union[example_gen_pb2.Input, Dict[Text,
Any]]] = None,
output_config: Optional[Union[example_gen_pb2.Output, Dict[Text,
Any]]] = None,
range_config: Optional[Union[range_config_pb2.RangeConfig,
Dict[Text, Any]]] = None,
example_artifacts: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md CsvExampleGen component.
Args:
input: A Channel of type `standard_artifacts.ExternalArtifact`, which
includes one artifact whose uri is an external directory containing the
CSV files. (Deprecated by input_base)
input_base: an external directory containing the CSV files.
input_config: An example_gen_pb2.Input instance, providing input
configuration. If unset, the files under input_base will be treated as README.ml-pipelines-sdk.md
single split. If any field is provided as README.ml-pipelines-sdk.md RuntimeParameter,
input_config should be constructed as README.ml-pipelines-sdk.md dict with the same field names
as Input proto message.
output_config: An example_gen_pb2.Output instance, providing output
configuration. If unset, default splits will be 'train' and 'eval' with
size 2:1. If any field is provided as README.ml-pipelines-sdk.md RuntimeParameter,
output_config should be constructed as README.ml-pipelines-sdk.md dict with the same field names
as Output proto message.
range_config: An optional range_config_pb2.RangeConfig instance,
specifying the range of span values to consider. If unset, driver will
default to searching for latest span with no restrictions.
example_artifacts: Optional channel of 'ExamplesPath' for output train and
eval examples.
instance_name: Optional unique instance name. Necessary if multiple
CsvExampleGen components are declared in the same pipeline.
"""
if input:
logging.warning(
'The "input" argument to the CsvExampleGen component has been '
'deprecated by "input_base". Please update your usage as support for '
'this argument will be removed soon.')
input_base = artifact_utils.get_single_uri(list(input.get()))
super(CsvExampleGen, self).__init__(
input_base=input_base,
input_config=input_config,
output_config=output_config,
range_config=range_config,
example_artifacts=example_artifacts,
instance_name=instance_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/example_gen/csv_example_gen/component.py | 0.877451 | 0.387806 | component.py | pypi |
"""Generic TFX CSV example gen executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Any, Dict, Iterable, List, Text
from absl import logging
import apache_beam as beam
import tensorflow as tf
from tfx.components.example_gen.base_example_gen_executor import BaseExampleGenExecutor
from tfx.dsl.io import fileio
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx_bsl.coders import csv_decoder
def _int_handler(cell: csv_decoder.CSVCell) -> tf.train.Feature:
value_list = []
if cell:
value_list.append(int(cell))
return tf.train.Feature(int64_list=tf.train.Int64List(value=value_list))
def _float_handler(cell: csv_decoder.CSVCell) -> tf.train.Feature:
value_list = []
if cell:
value_list.append(float(cell))
return tf.train.Feature(float_list=tf.train.FloatList(value=value_list))
def _bytes_handler(cell: csv_decoder.CSVCell) -> tf.train.Feature:
value_list = []
if cell:
value_list.append(cell)
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value_list))
@beam.typehints.with_input_types(List[csv_decoder.CSVCell],
List[csv_decoder.ColumnInfo])
@beam.typehints.with_output_types(tf.train.Example)
class _ParsedCsvToTfExample(beam.DoFn):
"""A beam.DoFn to convert README.ml-pipelines-sdk.md parsed CSV line to README.ml-pipelines-sdk.md tf.Example."""
def __init__(self):
self._column_handlers = None
def _make_column_handlers(self, column_infos: List[csv_decoder.ColumnInfo]):
result = []
for column_info in column_infos:
# pylint: disable=g-long-lambda
if column_info.type == csv_decoder.ColumnType.INT:
handler_fn = _int_handler
elif column_info.type == csv_decoder.ColumnType.FLOAT:
handler_fn = _float_handler
elif column_info.type == csv_decoder.ColumnType.STRING:
handler_fn = _bytes_handler
else:
handler_fn = None
result.append((column_info.name, handler_fn))
return result
def process(
self, csv_cells: List[csv_decoder.CSVCell],
column_infos: List[csv_decoder.ColumnInfo]) -> Iterable[tf.train.Example]:
if not self._column_handlers:
self._column_handlers = self._make_column_handlers(column_infos)
# skip blank lines.
if not csv_cells:
return
if len(csv_cells) != len(self._column_handlers):
raise ValueError('Invalid CSV line: {}'.format(csv_cells))
feature = {}
for csv_cell, (column_name, handler_fn) in zip(csv_cells,
self._column_handlers):
feature[column_name] = (
handler_fn(csv_cell) if handler_fn else tf.train.Feature())
yield tf.train.Example(features=tf.train.Features(feature=feature))
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(tf.train.Example)
def _CsvToExample( # pylint: disable=invalid-name
pipeline: beam.Pipeline, exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection:
"""Read CSV files and transform to TF examples.
Note that each input split will be transformed by this function separately.
Args:
pipeline: beam pipeline.
exec_properties: A dict of execution properties.
- input_base: input dir that contains CSV data. CSV must have header line.
split_pattern: Split.pattern in Input config, glob relative file pattern
that maps to input files with root directory given by input_base.
Returns:
PCollection of TF examples.
Raises:
RuntimeError: if split is empty or csv headers are not equal.
"""
input_base_uri = exec_properties[standard_component_specs.INPUT_BASE_KEY]
csv_pattern = os.path.join(input_base_uri, split_pattern)
logging.info('Processing input csv data %s to TFExample.', csv_pattern)
csv_files = fileio.glob(csv_pattern)
if not csv_files:
raise RuntimeError(
'Split pattern {} does not match any files.'.format(csv_pattern))
column_names = io_utils.load_csv_column_names(csv_files[0])
for csv_file in csv_files[1:]:
if io_utils.load_csv_column_names(csv_file) != column_names:
raise RuntimeError(
'Files in same split {} have different header.'.format(csv_pattern))
parsed_csv_lines = (
pipeline
| 'ReadFromText' >> beam.io.ReadFromText(
file_pattern=csv_pattern, skip_header_lines=1)
| 'ParseCSVLine' >> beam.ParDo(csv_decoder.ParseCSVLine(delimiter=','))
| 'ExtractParsedCSVLines' >> beam.Keys())
column_infos = beam.pvalue.AsSingleton(
parsed_csv_lines
| 'InferColumnTypes' >> beam.CombineGlobally(
csv_decoder.ColumnTypeInferrer(column_names, skip_blank_lines=True)))
return (parsed_csv_lines
| 'ToTFExample' >> beam.ParDo(_ParsedCsvToTfExample(), column_infos))
class Executor(BaseExampleGenExecutor):
"""Generic TFX CSV example gen executor."""
def GetInputSourceToExamplePTransform(self) -> beam.PTransform:
"""Returns PTransform for CSV to TF examples."""
return _CsvToExample | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/example_gen/csv_example_gen/executor.py | 0.847432 | 0.237897 | executor.py | pypi |
"""TFX Tuner component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, NamedTuple, Optional, Text
from kerastuner.engine import base_tuner
from tfx import types
from tfx.components.tuner import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.proto import trainer_pb2
from tfx.proto import tuner_pb2
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import TunerSpec
from tfx.utils import json_utils
# tuner: A BaseTuner that will be used for tuning.
# fit_kwargs: Args to pass to tuner's run_trial function for fitting the
# model , e.g., the training and validation dataset. Required
# args depend on the tuner's implementation.
TunerFnResult = NamedTuple('TunerFnResult', [('tuner', base_tuner.BaseTuner),
('fit_kwargs', Dict[Text, Any])])
TunerFnResult.__doc__ = """
tuner_fn returns README.ml-pipelines-sdk.md TunerFnResult that contains:
- tuner: A BaseTuner that will be used for tuning.
- fit_kwargs: Args to pass to tuner's run_trial function for fitting the
model , e.g., the training and validation dataset. Required
args depend on the tuner's implementation.
"""
class Tuner(base_component.BaseComponent):
"""A TFX component for model hyperparameter tuning."""
SPEC_CLASS = TunerSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(self,
examples: types.Channel = None,
schema: Optional[types.Channel] = None,
transform_graph: Optional[types.Channel] = None,
module_file: Optional[Text] = None,
tuner_fn: Optional[Text] = None,
train_args: trainer_pb2.TrainArgs = None,
eval_args: trainer_pb2.EvalArgs = None,
tune_args: Optional[tuner_pb2.TuneArgs] = None,
custom_config: Optional[Dict[Text, Any]] = None,
best_hyperparameters: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md Tuner component.
Args:
examples: A Channel of type `standard_artifacts.Examples`, serving as the
source of examples that are used in tuning (required).
schema: An optional Channel of type `standard_artifacts.Schema`, serving
as the schema of training and eval data. This is used when raw examples
are provided.
transform_graph: An optional Channel of type
`standard_artifacts.TransformGraph`, serving as the input transform
graph if present. This is used when transformed examples are provided.
module_file: A path to python module file containing UDF tuner definition.
The module_file must implement README.ml-pipelines-sdk.md function named `tuner_fn` at its top
level. The function must have the following signature.
def tuner_fn(fn_args: FnArgs) -> TunerFnResult:
Exactly one of 'module_file' or 'tuner_fn' must be supplied.
tuner_fn: A python path to UDF model definition function. See
'module_file' for the required signature of the UDF. Exactly one of
'module_file' or 'tuner_fn' must be supplied.
train_args: A trainer_pb2.TrainArgs instance, containing args used for
training. Currently only splits and num_steps are available. Default
behavior (when splits is empty) is train on `train` split.
eval_args: A trainer_pb2.EvalArgs instance, containing args used for eval.
Currently only splits and num_steps are available. Default behavior
(when splits is empty) is evaluate on `eval` split.
tune_args: A tuner_pb2.TuneArgs instance, containing args used for tuning.
Currently only num_parallel_trials is available.
custom_config: A dict which contains addtional training job parameters
that will be passed into user module.
best_hyperparameters: Optional Channel of type
`standard_artifacts.HyperParameters` for result of the best hparams.
instance_name: Optional unique instance name. Necessary if multiple Tuner
components are declared in the same pipeline.
"""
if bool(module_file) == bool(tuner_fn):
raise ValueError(
"Exactly one of 'module_file' or 'tuner_fn' must be supplied")
best_hyperparameters = best_hyperparameters or types.Channel(
type=standard_artifacts.HyperParameters)
spec = TunerSpec(
examples=examples,
schema=schema,
transform_graph=transform_graph,
module_file=module_file,
tuner_fn=tuner_fn,
train_args=train_args,
eval_args=eval_args,
tune_args=tune_args,
best_hyperparameters=best_hyperparameters,
custom_config=json_utils.dumps(custom_config),
)
super(Tuner, self).__init__(spec=spec, instance_name=instance_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/tuner/component.py | 0.930261 | 0.328664 | component.py | pypi |
"""Generic TFX tuner executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from typing import Any, Callable, Dict, List, Optional, Text
from absl import logging
from kerastuner.engine import base_tuner
from tfx import types
from tfx.components.trainer import fn_args_utils
from tfx.components.util import udf_utils
from tfx.dsl.components.base import base_executor
from tfx.proto import tuner_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import proto_utils
# Default file name for generated best hyperparameters file.
_DEFAULT_FILE_NAME = 'best_hyperparameters.txt'
# TODO(b/160253334): Establish README.ml-pipelines-sdk.md separation of practice between this 'default'
# module and the ones in 'extensions'.
def _get_tuner_fn(exec_properties: Dict[Text, Any]) -> Callable[..., Any]:
"""Returns tuner_fn from execution properties."""
return udf_utils.get_fn(exec_properties, 'tuner_fn')
def get_tune_args(
exec_properties: Dict[Text, Any]) -> Optional[tuner_pb2.TuneArgs]:
"""Returns TuneArgs protos from execution properties, if present."""
tune_args = exec_properties.get(standard_component_specs.TUNE_ARGS_KEY)
if not tune_args:
return None
result = tuner_pb2.TuneArgs()
proto_utils.json_to_proto(tune_args, result)
return result
def write_best_hyperparameters(
tuner: base_tuner.BaseTuner,
output_dict: Dict[Text, List[types.Artifact]]) -> None:
"""Write out best hyperpeameters known to the given Tuner instance."""
best_hparams_config = tuner.get_best_hyperparameters()[0].get_config()
logging.info('Best HyperParameters: %s', best_hparams_config)
best_hparams_path = os.path.join(
artifact_utils.get_single_uri(
output_dict[standard_component_specs.BEST_HYPERPARAMETERS_KEY]),
_DEFAULT_FILE_NAME)
io_utils.write_string_file(best_hparams_path, json.dumps(best_hparams_config))
logging.info('Best Hyperparameters are written to %s.', best_hparams_path)
def search(input_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
working_dir: Text) -> base_tuner.BaseTuner:
"""Conduct README.ml-pipelines-sdk.md single hyperparameter search loop, and return the Tuner."""
tuner_fn = _get_tuner_fn(exec_properties)
fn_args = fn_args_utils.get_common_fn_args(input_dict, exec_properties,
working_dir)
tuner_fn_result = tuner_fn(fn_args)
result = tuner_fn_result.tuner
# TODO(b/156966497): set logger for printing.
result.search_space_summary()
logging.info('Start tuning... Tuner ID: %s', result.tuner_id)
result.search(**tuner_fn_result.fit_kwargs)
logging.info('Finished tuning... Tuner ID: %s', result.tuner_id)
result.results_summary()
return result
class Executor(base_executor.BaseExecutor):
"""TFX Tuner component executor."""
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
if get_tune_args(exec_properties):
raise ValueError("TuneArgs is not supported by this Tuner's Executor.")
tuner = search(input_dict, exec_properties, self._get_tmp_dir())
write_best_hyperparameters(tuner, output_dict) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/tuner/executor.py | 0.696268 | 0.183813 | executor.py | pypi |
"""Common functionalities used in transform executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import sys
from typing import Any, Callable, Text, Sequence, Mapping
def GetValues(inputs: Mapping[Text, Sequence[Any]],
label: Text) -> Sequence[Any]:
"""Retrieves the value of the given labeled input.
Args:
inputs: Dict from label to README.ml-pipelines-sdk.md value list.
label: Label of the value to retrieve.
Returns:
A list of values, or empty list if there's no value.
Raises:
ValueError: If label is not one of the valid input labels.
"""
if label not in inputs:
return []
values = inputs.get(label)
if not isinstance(values, list):
return [values]
return values
def GetSoleValue(inputs: Mapping[Text, Sequence[Any]], label: Text,
strict=True) -> Any:
"""Helper method for retrieving README.ml-pipelines-sdk.md sole labeled input.
Args:
inputs: Dict from label to README.ml-pipelines-sdk.md value list.
label: Label of the value to retrieve.
strict: If true, exactly one value should exist for label.
Returns:
A sole labeled value.
Raises:
ValueError: If there is no/multiple input associated with the label.
"""
values = GetValues(inputs, label)
if len(values) > 1:
raise ValueError(
'There should not be more than one value for label {}'.format(label))
if strict:
if len(values) != 1:
raise ValueError(
'There should be one and only one value for label {}'.format(label))
else:
if not values:
return None
return values[0]
def FunctionHasArg(fn: Callable, arg_name: Text) -> bool: # pylint: disable=g-bare-generic
"""Test at runtime if README.ml-pipelines-sdk.md function's signature contains README.ml-pipelines-sdk.md certain argument.
Args:
fn: function to be tested.
arg_name: Name of the argument to be tested.
Returns:
True if the function signature contains that argument.
"""
if sys.version_info.major == 2:
return arg_name in inspect.getargspec(fn).args # pylint: disable=deprecated-method
else:
return arg_name in inspect.signature(fn).parameters | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/util/value_utils.py | 0.837121 | 0.435481 | value_utils.py | pypi |
"""Utility functions related to Examples artifact shared by components."""
# TODO(b/149535307): Remove __future__ imports
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Text
from absl import logging
from tfx import types
from tfx.components.example_gen import utils as example_gen_utils
from tfx.proto import example_gen_pb2
from tfx.types import standard_artifacts
_DEFAULT_PAYLOAD_FORMAT = example_gen_pb2.PayloadFormat.FORMAT_TF_EXAMPLE
def get_payload_format(examples: types.Artifact) -> int:
"""Returns the payload format of Examples artifact.
If Examples artifact does not contain the "payload_format" custom property,
it is made before tfx supports multiple payload format, and can regard as
tf.Example format.
Args:
examples: A standard_artifacts.Examples artifact.
Returns:
payload_format: One of the enums in example_gen_pb2.PayloadFormat.
"""
assert examples.type_name == standard_artifacts.Examples.TYPE_NAME, (
'examples must be of type standard_artifacts.Examples')
if examples.has_custom_property(
example_gen_utils.PAYLOAD_FORMAT_PROPERTY_NAME):
return example_gen_pb2.PayloadFormat.Value(
examples.get_string_custom_property(
example_gen_utils.PAYLOAD_FORMAT_PROPERTY_NAME))
else:
logging.warning('Examples artifact does not have %s custom property. '
'Falling back to %s',
example_gen_utils.PAYLOAD_FORMAT_PROPERTY_NAME,
example_gen_pb2.PayloadFormat.Name(_DEFAULT_PAYLOAD_FORMAT))
return _DEFAULT_PAYLOAD_FORMAT
def get_payload_format_string(examples: types.Artifact) -> Text:
"""Returns the payload format as README.ml-pipelines-sdk.md string."""
return example_gen_pb2.PayloadFormat.Name(get_payload_format(examples))
def set_payload_format(examples: types.Artifact, payload_format: int):
"""Sets the payload format custom property for `examples`.
Args:
examples: A standard_artifacts.Examples artifact.
payload_format: One of the enums in example_gen_pb2.PayloadFormat.
"""
assert examples.type_name == standard_artifacts.Examples.TYPE_NAME, (
'examples must be of type standard_artifacts.Examples')
examples.set_string_custom_property(
example_gen_utils.PAYLOAD_FORMAT_PROPERTY_NAME,
example_gen_pb2.PayloadFormat.Name(payload_format)) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/util/examples_utils.py | 0.754553 | 0.381104 | examples_utils.py | pypi |
"""TFX Pusher component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Union
from tfx import types
from tfx.components.yulong_pusher import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.proto import pusher_pb2
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import PusherSpec
from tfx.utils import json_utils
# TODO(b/133845381): Investigate other ways to keep push destination converged.
class YulongPusher(base_component.BaseComponent):
"""A TFX component to push validated TensorFlow models to README.ml-pipelines-sdk.md model serving platform.
The `Pusher` component can be used to push an validated SavedModel from output
of the [Trainer component](https://www.tensorflow.org/tfx/guide/trainer) to
[TensorFlow Serving](https://www.tensorflow.org/tfx/serving). The Pusher
will check the validation results from the [Evaluator
component](https://www.tensorflow.org/tfx/guide/evaluator) and [InfraValidator
component](https://www.tensorflow.org/tfx/guide/infra_validator)
before deploying the model. If the model has not been blessed, then the model
will not be pushed.
*Note:* The executor for this component can be overriden to enable the model
to be pushed to other serving platforms than tf.serving. The [Cloud AI
Platform custom
executor](https://github.com/tensorflow/tfx/tree/master/tfx/extensions/google_cloud_ai_platform/pusher)
provides an example how to implement this.
## Example
```
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
```
"""
SPEC_CLASS = PusherSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
model: types.Channel = None,
model_blessing: Optional[types.Channel] = None,
infra_blessing: Optional[types.Channel] = None,
push_destination: Optional[Union[pusher_pb2.PushDestination,
Dict[Text, Any]]] = None,
custom_config: Optional[Dict[Text, Any]] = None,
custom_executor_spec: Optional[executor_spec.ExecutorSpec] = None,
pushed_model: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md Pusher component.
Args:
model: A Channel of type `standard_artifacts.Model`, usually produced by
README.ml-pipelines-sdk.md Trainer component.
model_blessing: An optional Channel of type
`standard_artifacts.ModelBlessing`, usually produced from an Evaluator
component.
infra_blessing: An optional Channel of type
`standard_artifacts.InfraBlessing`, usually produced from an
InfraValidator component.
push_destination: A pusher_pb2.PushDestination instance, providing info
for tensorflow serving to load models. Optional if executor_class
doesn't require push_destination. If any field is provided as README.ml-pipelines-sdk.md
RuntimeParameter, push_destination should be constructed as README.ml-pipelines-sdk.md dict with
the same field names as PushDestination proto message.
custom_config: A dict which contains the deployment job parameters to be
passed to cloud-based training platforms. The [Kubeflow example](
https://github.com/tensorflow/tfx/blob/6ff57e36a7b65818d4598d41e584a42584d361e6/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_kubeflow_gcp.py#L278-L285)
contains an example how this can be used by custom executors.
custom_executor_spec: Optional custom executor spec.
pushed_model: Optional output `standard_artifacts.PushedModel` channel
with result of push.
instance_name: Optional unique instance name. Necessary if multiple Pusher
components are declared in the same pipeline.
"""
pushed_model = pushed_model or types.Channel(
type=standard_artifacts.PushedModel)
if push_destination is None and not custom_executor_spec:
raise ValueError('push_destination is required unless README.ml-pipelines-sdk.md '
'custom_executor_spec is supplied that does not require '
'it.')
spec = PusherSpec(
model=model,
model_blessing=model_blessing,
infra_blessing=infra_blessing,
push_destination=push_destination,
custom_config=json_utils.dumps(custom_config),
pushed_model=pushed_model)
super(YulongPusher, self).__init__(
spec=spec,
custom_executor_spec=custom_executor_spec,
instance_name=instance_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/yulong_pusher/component.py | 0.861626 | 0.711067 | component.py | pypi |
"""TFX Evaluator component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, List, Optional, Text, Union
from absl import logging
import tensorflow_model_analysis as tfma
from tfx import types
from tfx.components.evaluator import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.orchestration import data_types
from tfx.proto import evaluator_pb2
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import EvaluatorSpec
from tfx.utils import json_utils
class Evaluator(base_component.BaseComponent):
"""A TFX component to evaluate models trained by README.ml-pipelines-sdk.md TFX Trainer component.
See [Evaluator](https://www.tensorflow.org/tfx/guide/evaluator) for more
information on what this component's required inputs are, how to configure it,
and what outputs it produces.
"""
SPEC_CLASS = EvaluatorSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
examples: types.Channel = None,
model: types.Channel = None,
baseline_model: Optional[types.Channel] = None,
# TODO(b/148618405): deprecate feature_slicing_spec.
feature_slicing_spec: Optional[Union[evaluator_pb2.FeatureSlicingSpec,
Dict[Text, Any]]] = None,
fairness_indicator_thresholds: Optional[List[Union[
float, data_types.RuntimeParameter]]] = None,
example_splits: Optional[List[Text]] = None,
evaluation: Optional[types.Channel] = None,
instance_name: Optional[Text] = None,
eval_config: Optional[tfma.EvalConfig] = None,
blessing: Optional[types.Channel] = None,
schema: Optional[types.Channel] = None,
module_file: Optional[Text] = None,
module_path: Optional[Text] = None):
"""Construct an Evaluator component.
Args:
examples: A Channel of type `standard_artifacts.Examples`, usually
produced by an ExampleGen component. _required_
model: A Channel of type `standard_artifacts.Model`, usually produced by
README.ml-pipelines-sdk.md Trainer component.
baseline_model: An optional channel of type 'standard_artifacts.Model' as
the baseline model for model diff and model validation purpose.
feature_slicing_spec:
Deprecated, please use eval_config instead. Only support estimator.
[evaluator_pb2.FeatureSlicingSpec](https://github.com/tensorflow/tfx/blob/master/tfx/proto/evaluator.proto)
instance that describes how Evaluator should slice the data. If any
field is provided as README.ml-pipelines-sdk.md RuntimeParameter, feature_slicing_spec should
be constructed as README.ml-pipelines-sdk.md dict with the same field names as
FeatureSlicingSpec proto message.
fairness_indicator_thresholds: Optional list of float (or
RuntimeParameter) threshold values for use with TFMA fairness
indicators. Experimental functionality: this interface and
functionality may change at any time. TODO(b/142653905): add README.ml-pipelines-sdk.md link
to additional documentation for TFMA fairness indicators here.
example_splits: Names of splits on which the metrics are computed.
Default behavior (when example_splits is set to None or Empty) is using
the 'eval' split.
evaluation: Channel of `ModelEvaluation` to store the evaluation results.
instance_name: Optional name assigned to this specific instance of
Evaluator. Required only if multiple Evaluator components are declared
in the same pipeline. Either `model_exports` or `model` must be present
in the input arguments.
eval_config: Instance of tfma.EvalConfig containg configuration settings
for running the evaluation. This config has options for both estimator
and Keras.
blessing: Output channel of 'ModelBlessing' that contains the
blessing result.
schema: A `Schema` channel to use for TFXIO.
module_file: A path to python module file containing UDFs for Evaluator
customization. The module_file can implement following functions at its
top level.
def custom_eval_shared_model(
eval_saved_model_path, model_name, eval_config, **kwargs,
) -> tfma.EvalSharedModel:
def custom_extractors(
eval_shared_model, eval_config, tensor_adapter_config,
) -> List[tfma.extractors.Extractor]:
module_path: A python path to the custom module that contains the UDFs.
See 'module_file' for the required signature of UDFs. Note this can
not be set together with module_file.
"""
if bool(module_file) and bool(module_path):
raise ValueError(
'Python module path can not be set together with module file path.')
if eval_config is not None and feature_slicing_spec is not None:
raise ValueError("Exactly one of 'eval_config' or 'feature_slicing_spec' "
"must be supplied.")
if eval_config is None and feature_slicing_spec is None:
feature_slicing_spec = evaluator_pb2.FeatureSlicingSpec()
logging.info('Neither eval_config nor feature_slicing_spec is passed, '
'the model is treated as estimator.')
if feature_slicing_spec:
logging.warning('feature_slicing_spec is deprecated, please use '
'eval_config instead.')
blessing = blessing or types.Channel(type=standard_artifacts.ModelBlessing)
evaluation = types.Channel(type=standard_artifacts.ModelEvaluation)
spec = EvaluatorSpec(
examples=examples,
model=model,
baseline_model=baseline_model,
feature_slicing_spec=feature_slicing_spec,
fairness_indicator_thresholds=fairness_indicator_thresholds,
example_splits=json_utils.dumps(example_splits),
evaluation=evaluation,
eval_config=eval_config,
blessing=blessing,
schema=schema,
module_file=module_file,
module_path=module_path)
super(Evaluator, self).__init__(spec=spec, instance_name=instance_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/evaluator/component.py | 0.807537 | 0.350421 | component.py | pypi |
"""Generic TFX model evaluator executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Any, Dict, List, Text
from absl import logging
import apache_beam as beam
import tensorflow_model_analysis as tfma
from tensorflow_model_analysis import constants as tfma_constants
# Need to import the following module so that the fairness indicator post-export
# metric is registered.
import tensorflow_model_analysis.addons.fairness.post_export_metrics.fairness_indicators # pylint: disable=unused-import
from tfx import types
from tfx.components.evaluator import constants
from tfx.components.util import tfxio_utils
from tfx.components.util import udf_utils
from tfx.dsl.components.base import base_executor
from tfx.proto import evaluator_pb2
from tfx.types import artifact_utils
from tfx.types.standard_component_specs import BASELINE_MODEL_KEY
from tfx.types.standard_component_specs import BLESSING_KEY
from tfx.types.standard_component_specs import EVAL_CONFIG_KEY
from tfx.types.standard_component_specs import EVALUATION_KEY
from tfx.types.standard_component_specs import EXAMPLE_SPLITS_KEY
from tfx.types.standard_component_specs import EXAMPLES_KEY
from tfx.types.standard_component_specs import FEATURE_SLICING_SPEC_KEY
from tfx.types.standard_component_specs import MODEL_KEY
from tfx.types.standard_component_specs import SCHEMA_KEY
from tfx.utils import io_utils
from tfx.utils import json_utils
from tfx.utils import path_utils
from tfx.utils import proto_utils
from tfx_bsl.tfxio import tensor_adapter
_TELEMETRY_DESCRIPTORS = ['Evaluator']
class Executor(base_executor.BaseExecutor):
"""Executor for [Evaluator](https://www.tensorflow.org/tfx/guide/evaluator)."""
def _get_slice_spec_from_feature_slicing_spec(
self, spec: evaluator_pb2.FeatureSlicingSpec
) -> List[tfma.slicer.SingleSliceSpec]:
"""Given README.ml-pipelines-sdk.md feature slicing spec, returns README.ml-pipelines-sdk.md List of SingleSliceSpecs.
Args:
spec: slice specification.
Returns:
List of corresponding SingleSliceSpecs. Always includes the overall slice,
even if it was not specified in the given spec.
"""
result = []
for single_spec in spec.specs:
columns = single_spec.column_for_slicing
result.append(tfma.slicer.SingleSliceSpec(columns=columns))
# Always include the overall slice.
if tfma.slicer.SingleSliceSpec() not in result:
result.append(tfma.slicer.SingleSliceSpec())
return result
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Runs README.ml-pipelines-sdk.md batch job to evaluate the eval_model against the given input.
Args:
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of Artifacts.
- model: exported model.
- examples: examples for eval the model.
output_dict: Output dict from output key to README.ml-pipelines-sdk.md list of Artifacts.
- evaluation: model evaluation results.
exec_properties: A dict of execution properties.
- eval_config: JSON string of tfma.EvalConfig.
- feature_slicing_spec: JSON string of evaluator_pb2.FeatureSlicingSpec
instance, providing the way to slice the data. Deprecated, use
eval_config.slicing_specs instead.
- example_splits: JSON-serialized list of names of splits on which the
metrics are computed. Default behavior (when example_splits is set to
None) is using the 'eval' split.
Returns:
None
"""
if EXAMPLES_KEY not in input_dict:
raise ValueError('EXAMPLES_KEY is missing from input dict.')
if EVALUATION_KEY not in output_dict:
raise ValueError('EVALUATION_KEY is missing from output dict.')
if MODEL_KEY in input_dict and len(input_dict[MODEL_KEY]) > 1:
raise ValueError('There can be only one candidate model, there are %d.' %
(len(input_dict[MODEL_KEY])))
if BASELINE_MODEL_KEY in input_dict and len(
input_dict[BASELINE_MODEL_KEY]) > 1:
raise ValueError('There can be only one baseline model, there are %d.' %
(len(input_dict[BASELINE_MODEL_KEY])))
self._log_startup(input_dict, output_dict, exec_properties)
# Add fairness indicator metric callback if necessary.
fairness_indicator_thresholds = exec_properties.get(
'fairness_indicator_thresholds', None)
add_metrics_callbacks = None
if fairness_indicator_thresholds:
add_metrics_callbacks = [
tfma.post_export_metrics.fairness_indicators( # pytype: disable=module-attr
thresholds=fairness_indicator_thresholds),
]
output_uri = artifact_utils.get_single_uri(
output_dict[constants.EVALUATION_KEY])
eval_shared_model_fn = udf_utils.try_get_fn(
exec_properties=exec_properties,
fn_name='custom_eval_shared_model') or tfma.default_eval_shared_model
run_validation = False
models = []
if EVAL_CONFIG_KEY in exec_properties and exec_properties[EVAL_CONFIG_KEY]:
slice_spec = None
has_baseline = bool(input_dict.get(BASELINE_MODEL_KEY))
eval_config = tfma.EvalConfig()
proto_utils.json_to_proto(exec_properties[EVAL_CONFIG_KEY], eval_config)
eval_config = tfma.update_eval_config_with_defaults(
eval_config, has_baseline=has_baseline)
tfma.verify_eval_config(eval_config)
# Do not validate model when there is no thresholds configured. This is to
# avoid accidentally blessing models when users forget to set thresholds.
run_validation = bool(
tfma.metrics.metric_thresholds_from_metrics_specs(
eval_config.metrics_specs))
if len(eval_config.model_specs) > 2:
raise ValueError(
"""Cannot support more than two models. There are %d models in this
eval_config.""" % (len(eval_config.model_specs)))
# Extract model artifacts.
for model_spec in eval_config.model_specs:
if MODEL_KEY not in input_dict:
if not model_spec.prediction_key:
raise ValueError(
'model_spec.prediction_key required if model not provided')
continue
if model_spec.is_baseline:
model_uri = artifact_utils.get_single_uri(
input_dict[BASELINE_MODEL_KEY])
else:
model_uri = artifact_utils.get_single_uri(input_dict[MODEL_KEY])
if tfma.get_model_type(model_spec) == tfma.TF_ESTIMATOR:
model_path = path_utils.eval_model_path(model_uri)
else:
model_path = path_utils.serving_model_path(model_uri)
logging.info('Using %s as %s model.', model_path, model_spec.name)
models.append(
eval_shared_model_fn(
eval_saved_model_path=model_path,
model_name=model_spec.name,
eval_config=eval_config,
add_metrics_callbacks=add_metrics_callbacks))
else:
eval_config = None
assert (FEATURE_SLICING_SPEC_KEY in exec_properties and
exec_properties[FEATURE_SLICING_SPEC_KEY]
), 'both eval_config and feature_slicing_spec are unset.'
feature_slicing_spec = evaluator_pb2.FeatureSlicingSpec()
proto_utils.json_to_proto(exec_properties[FEATURE_SLICING_SPEC_KEY],
feature_slicing_spec)
slice_spec = self._get_slice_spec_from_feature_slicing_spec(
feature_slicing_spec)
model_uri = artifact_utils.get_single_uri(input_dict[MODEL_KEY])
model_path = path_utils.eval_model_path(model_uri)
logging.info('Using %s for model eval.', model_path)
models.append(
eval_shared_model_fn(
eval_saved_model_path=model_path,
model_name='',
eval_config=None,
add_metrics_callbacks=add_metrics_callbacks))
eval_shared_model = models[0] if len(models) == 1 else models
schema = None
if SCHEMA_KEY in input_dict:
schema = io_utils.SchemaReader().read(
io_utils.get_only_uri_in_dir(
artifact_utils.get_single_uri(input_dict[SCHEMA_KEY])))
# Load and deserialize example splits from execution properties.
example_splits = json_utils.loads(
exec_properties.get(EXAMPLE_SPLITS_KEY, 'null'))
if not example_splits:
example_splits = ['eval']
logging.info("The 'example_splits' parameter is not set, using 'eval' "
'split.')
logging.info('Evaluating model.')
with self._make_beam_pipeline() as pipeline:
examples_list = []
tensor_adapter_config = None
# pylint: disable=expression-not-assigned
if tfma.is_batched_input(eval_shared_model, eval_config):
tfxio_factory = tfxio_utils.get_tfxio_factory_from_artifact(
examples=[
artifact_utils.get_single_instance(input_dict[EXAMPLES_KEY])
],
telemetry_descriptors=_TELEMETRY_DESCRIPTORS,
schema=schema,
raw_record_column_name=tfma_constants.ARROW_INPUT_COLUMN)
# TODO(b/161935932): refactor after TFXIO supports multiple patterns.
for split in example_splits:
file_pattern = io_utils.all_files_pattern(
artifact_utils.get_split_uri(input_dict[EXAMPLES_KEY], split))
tfxio = tfxio_factory(file_pattern)
data = (
pipeline
| 'ReadFromTFRecordToArrow[%s]' % split >> tfxio.BeamSource())
examples_list.append(data)
if schema is not None:
# Use last tfxio as TensorRepresentations and ArrowSchema are fixed.
tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
arrow_schema=tfxio.ArrowSchema(),
tensor_representations=tfxio.TensorRepresentations())
else:
for split in example_splits:
file_pattern = io_utils.all_files_pattern(
artifact_utils.get_split_uri(input_dict[EXAMPLES_KEY], split))
data = (
pipeline
| 'ReadFromTFRecord[%s]' % split >>
beam.io.ReadFromTFRecord(file_pattern=file_pattern))
examples_list.append(data)
custom_extractors = udf_utils.try_get_fn(
exec_properties=exec_properties, fn_name='custom_extractors')
extractors = None
if custom_extractors:
extractors = custom_extractors(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
tensor_adapter_config=tensor_adapter_config)
(examples_list | 'FlattenExamples' >> beam.Flatten()
|
'ExtractEvaluateAndWriteResults' >> tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=models[0] if len(models) == 1 else models,
eval_config=eval_config,
extractors=extractors,
output_path=output_uri,
slice_spec=slice_spec,
tensor_adapter_config=tensor_adapter_config))
logging.info('Evaluation complete. Results written to %s.', output_uri)
if not run_validation:
# TODO(jinhuang): delete the BLESSING_KEY from output_dict when supported.
logging.info('No threshold configured, will not validate model.')
return
# Set up blessing artifact
blessing = artifact_utils.get_single_instance(output_dict[BLESSING_KEY])
blessing.set_string_custom_property(
constants.ARTIFACT_PROPERTY_CURRENT_MODEL_URI_KEY,
artifact_utils.get_single_uri(input_dict[MODEL_KEY]))
blessing.set_int_custom_property(
constants.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY,
input_dict[MODEL_KEY][0].id)
if input_dict.get(BASELINE_MODEL_KEY):
baseline_model = input_dict[BASELINE_MODEL_KEY][0]
blessing.set_string_custom_property(
constants.ARTIFACT_PROPERTY_BASELINE_MODEL_URI_KEY,
baseline_model.uri)
blessing.set_int_custom_property(
constants.ARTIFACT_PROPERTY_BASELINE_MODEL_ID_KEY, baseline_model.id)
if 'current_component_id' in exec_properties:
blessing.set_string_custom_property(
'component_id', exec_properties['current_component_id'])
# Check validation result and write BLESSED file accordingly.
logging.info('Checking validation results.')
validation_result = tfma.load_validation_result(output_uri)
if validation_result.validation_ok:
io_utils.write_string_file(
os.path.join(blessing.uri, constants.BLESSED_FILE_NAME), '')
blessing.set_int_custom_property(constants.ARTIFACT_PROPERTY_BLESSED_KEY,
constants.BLESSED_VALUE)
else:
io_utils.write_string_file(
os.path.join(blessing.uri, constants.NOT_BLESSED_FILE_NAME), '')
blessing.set_int_custom_property(constants.ARTIFACT_PROPERTY_BLESSED_KEY,
constants.NOT_BLESSED_VALUE)
logging.info('Blessing result %s written to %s.',
validation_result.validation_ok, blessing.uri) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/components/evaluator/executor.py | 0.924043 | 0.203401 | executor.py | pypi |
# TODO(b/149535307): Remove __future__ imports
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from typing import List, Text
from tfx.dsl.components.base import base_executor
from tfx.types import channel_utils
from tfx.utils import import_utils
from tfx.utils import proto_utils
from google.protobuf import message
def run_component(
full_component_class_name: Text,
temp_directory_path: Text = None,
beam_pipeline_args: List[Text] = None,
**arguments
):
r"""Loads README.ml-pipelines-sdk.md component, instantiates it with arguments and runs its executor.
The component class is instantiated, so the component code is executed,
not just the executor code.
To pass artifact URI, use <input_name>_uri argument name.
To pass artifact property, use <input_name>_<property> argument name.
Protobuf property values can be passed as JSON-serialized protobufs.
# pylint: disable=line-too-long
Example::
# When run as README.ml-pipelines-sdk.md script:
python3 scripts/run_component.py \
--full-component-class-name tfx.components.StatisticsGen \
--examples-uri gs://my_bucket/chicago_taxi_simple/CsvExamplesGen/examples/1/ \
--examples-split-names '["train", "eval"]' \
--output-uri gs://my_bucket/chicago_taxi_simple/StatisticsGen/output/1/
# When run as README.ml-pipelines-sdk.md function:
run_component(
full_component_class_name='tfx.components.StatisticsGen',
examples_uri='gs://my_bucket/chicago_taxi_simple/CsvExamplesGen/sxamples/1/',
examples_split_names='["train", "eval"]',
output_uri='gs://my_bucket/chicago_taxi_simple/StatisticsGen/output/1/',
)
Args:
full_component_class_name: The component class name including module name.
temp_directory_path: Optional. Temporary directory path for the executor.
beam_pipeline_args: Optional. Arguments to pass to the Beam pipeline.
**arguments: Key-value pairs with component arguments.
"""
component_class = import_utils.import_class_by_path(full_component_class_name)
component_arguments = {}
for name, execution_param in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value = arguments.get(name, None)
if argument_value is None:
continue
param_type = execution_param.type
if (isinstance(param_type, type) and
issubclass(param_type, message.Message)):
argument_value_obj = param_type()
proto_utils.json_to_proto(argument_value, argument_value_obj)
else:
argument_value_obj = argument_value
component_arguments[name] = argument_value_obj
for input_name, channel_param in component_class.SPEC_CLASS.INPUTS.items():
uri = (arguments.get(input_name + '_uri') or
arguments.get(input_name + '_path'))
if uri:
artifact = channel_param.type()
artifact.uri = uri
# Setting the artifact properties
for property_name in channel_param.type.PROPERTIES:
property_arg_name = input_name + '_' + property_name
if property_arg_name in arguments:
setattr(artifact, property_name, arguments[property_arg_name])
component_arguments[input_name] = channel_utils.as_channel([artifact])
component_instance = component_class(**component_arguments)
input_dict = channel_utils.unwrap_channel_dict(
component_instance.inputs.get_all())
output_dict = channel_utils.unwrap_channel_dict(
component_instance.outputs.get_all())
exec_properties = component_instance.exec_properties
# Generating paths for output artifacts
for output_name, channel_param in component_class.SPEC_CLASS.OUTPUTS.items():
uri = (arguments.get('output_' + output_name + '_uri') or
arguments.get(output_name + '_uri') or
arguments.get(output_name + '_path'))
if uri:
artifacts = output_dict[output_name]
if not artifacts:
artifacts.append(channel_param.type())
for artifact in artifacts:
artifact.uri = uri
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_pipeline_args,
tmp_dir=temp_directory_path,
unique_id='',
)
executor = component_instance.executor_spec.executor_class(executor_context)
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
# Writing out the output artifact properties
for output_name, channel_param in component_class.SPEC_CLASS.OUTPUTS.items():
for property_name in channel_param.type.PROPERTIES:
property_path_arg_name = output_name + '_' + property_name + '_path'
property_path = arguments.get(property_path_arg_name)
if property_path:
artifacts = output_dict[output_name]
for artifact in artifacts:
property_value = getattr(artifact, property_name)
os.makedirs(os.path.dirname(property_path), exist_ok=True)
with open(property_path, 'w') as f:
f.write(str(property_value))
if __name__ == '__main__':
params = sys.argv[1::2]
values = sys.argv[2::2]
args = {
param.lstrip('-').replace('-', '_'): value
for param, value in zip(params, values)
}
run_component(**args) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/scripts/run_component.py | 0.441914 | 0.192198 | run_component.py | pypi |
"""Common script to invoke TFX executors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import base64
import json
import absl
from tfx.dsl.components.base import base_executor
from tfx.types import artifact_utils
from tfx.utils import import_utils
from tensorflow.python.platform import app # pylint: disable=g-direct-tensorflow-import
def _run_executor(args, pipeline_args) -> None:
r"""Select README.ml-pipelines-sdk.md particular executor and run it based on name.
# pylint: disable=line-too-long
_run_executor() is used to invoke README.ml-pipelines-sdk.md class subclassing
tfx.dsl.components.base.base_executor.BaseExecutor. This function can be used for
both invoking the executor on remote environments as well as for unit testing
of executors.
How to invoke an executor as standalone:
# TODO(b/132958430): Create utility script to generate arguments for run_executor.py
First, the input data needs to be prepared. An easy way to generate the test
data is to fully run the pipeline once. This will generate the data to be
used for testing as well as log the artifacts to be used as input parameters.
In each executed component, three log entries will be generated similar to the
below:
```
[2019-05-16 08:59:27,117] {logging_mixin.py:95} INFO - [2019-05-16 08:59:27,116] {base_executor.py:72} INFO - Starting Executor execution.
[2019-05-16 08:59:27,117] {logging_mixin.py:95} INFO - [2019-05-16 08:59:27,117] {base_executor.py:74} INFO - Inputs for Executor is: {"input_base": [{"artifact": {"id": "1", "typeId": "1", "uri": "/usr/local/google/home/khaas/taxi/data/simple", "properties": {"split": {"stringValue": ""}, "state": {"stringValue": "published"}, "span": {"intValue": "1"}, "type_name": {"stringValue": "ExternalPath"}}}, "artifact_type": {"id": "1", "name": "ExternalPath", "properties": {"span": "INT", "name": "STRING", "type_name": "STRING", "split": "STRING", "state": "STRING"}}}]}
[2019-05-16 08:59:27,117] {logging_mixin.py:95} INFO - [2019-05-16 08:59:27,117] {base_executor.py:76} INFO - Outputs for Executor is: {"examples": [{"artifact": {"uri": "/usr/local/google/home/khaas/tfx/pipelines/chicago_taxi_simple/CsvExampleGen/examples/1/train/", "properties": {"type_name": {"stringValue": "ExamplesPath"}, "split": {"stringValue": "train"}, "span": {"intValue": "1"}}}, "artifact_type": {"name": "ExamplesPath", "properties": {"name": "STRING", "type_name": "STRING", "split": "STRING", "state": "STRING", "span": "INT"}}}, {"artifact": {"uri": "/usr/local/google/home/khaas/tfx/pipelines/chicago_taxi_simple/CsvExampleGen/examples/1/eval/", "properties": {"type_name": {"stringValue": "ExamplesPath"}, "split": {"stringValue": "eval"}, "span": {"intValue": "1"}}}, "artifact_type": {"name": "ExamplesPath", "properties": {"name": "STRING", "type_name": "STRING", "split": "STRING", "state": "STRING", "span": "INT"}}}]}
[2019-05-16 08:59:27,117] {logging_mixin.py:95} INFO - [2019-05-16 08:59:27,117] {base_executor.py:78} INFO - Execution properties for Executor is: {"output": "{ \"splitConfig\": {\"splits\": [{\"name\": \"train\", \"hashBuckets\": 2}, {\"name\": \"eval\",\"hashBuckets\": 1}]}}"}
```
Each of these map directly to the input parameters expected by run_executor():
```
python scripts/run_executor.py \
--executor_class_path=tfx.components.example_gen.csv_example_gen.executor.Executor \
--inputs={"input_base": [{"artifact": {"id": "1", "typeId": "1", "uri": "/usr/local/google/home/khaas/taxi/data/simple", "properties": {"split": {"stringValue": ""}, "state": {"stringValue": "published"}, "span": {"intValue": "1"}, "type_name": {"stringValue": "ExternalPath"}}}, "artifact_type": {"id": "1", "name": "ExternalPath", "properties": {"span": "INT", "name": "STRING", "type_name": "STRING", "split": "STRING", "state": "STRING"}}}]} \
--outputs={"examples": [{"artifact": {"uri": "/usr/local/google/home/khaas/tfx/pipelines/chicago_taxi_simple/CsvExampleGen/examples/1/train/", "properties": {"type_name": {"stringValue": "ExamplesPath"}, "split": {"stringValue": "train"}, "span": {"intValue": "1"}}}, "artifact_type": {"name": "ExamplesPath", "properties": {"name": "STRING", "type_name": "STRING", "split": "STRING", "state": "STRING", "span": "INT"}}}, {"artifact": {"uri": "/usr/local/google/home/khaas/tfx/pipelines/chicago_taxi_simple/CsvExampleGen/examples/1/eval/", "properties": {"type_name": {"stringValue": "ExamplesPath"}, "split": {"stringValue": "eval"}, "span": {"intValue": "1"}}}, "artifact_type": {"name": "ExamplesPath", "properties": {"name": "STRING", "type_name": "STRING", "split": "STRING", "state": "STRING", "span": "INT"}}}]} \
--exec-properties={"output": "{ \"splitConfig\": {\"splits\": [{\"name\": \"train\", \"hashBuckets\": 2}, {\"name\": \"eval\",\"hashBuckets\": 1}]}}"}
```
# pylint: disable=line-too-long
Args:
args:
- inputs: The input artifacts for this execution, serialized as JSON.
- outputs: The output artifacts to be generated by this execution,
serialized as JSON.
- exec_properties: The execution properties to be used by this execution,
serialized as JSON.
pipeline_args: Optional parameter that maps to the optional_pipeline_args
parameter in the pipeline, which provides additional configuration options
for apache-beam and tensorflow.logging.
Returns:
None
Raises:
None
"""
absl.logging.set_verbosity(absl.logging.INFO)
(inputs_str, outputs_str,
exec_properties_str) = (args.inputs or base64.b64decode(args.inputs_base64),
args.outputs or
base64.b64decode(args.outputs_base64),
args.exec_properties or
base64.b64decode(args.exec_properties_base64))
inputs = artifact_utils.parse_artifact_dict(inputs_str)
outputs = artifact_utils.parse_artifact_dict(outputs_str)
exec_properties = json.loads(exec_properties_str)
absl.logging.info(
'Executor {} do: inputs: {}, outputs: {}, exec_properties: {}'.format(
args.executor_class_path, inputs, outputs, exec_properties))
executor_cls = import_utils.import_class_by_path(args.executor_class_path)
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=pipeline_args,
tmp_dir=args.temp_directory_path,
unique_id='')
executor = executor_cls(executor_context)
absl.logging.info('Starting executor')
executor.Do(inputs, outputs, exec_properties)
# The last line of stdout will be pushed to xcom by Airflow.
if args.write_outputs_stdout:
print(artifact_utils.jsonify_artifact_dict(outputs))
def main(argv):
"""Parses the arguments for _run_executor() then invokes it.
# pylint: disable=line-too-long
Args:
argv: Unparsed arguments for run_executor.py
--executor_class_path: Python class of executor in format of <module>.<class>.
--temp_directory_path: Common temp directory path for executors.
--inputs: JSON serialized dict of input artifacts. If the input needs to be base64-encoded, use --inputs-base64 instead.
--inputs-base64: base64-encoded JSON serialized dict of input artifacts. If the input is not base64-encoded, use --inputs instead.
--outputs: JSON serialized dict of output artifacts. If the output needs to be base64-encoded, use --outputs-base64 instead.
--outputs-base64: base64-encoded JSON serialized dict of output artifacts. If the output is not base64-encoded, use --outputs instead.
--exec_properties: JSON serialized dict of (non artifact) execution properties. If the execution properties need to be base64-encoded, use --exec_properties-base64 instead.
--exec_properties-base64: base64-encoded JSON serialized dict of (non artifact) execution properties. If the execution properties are not base64-encoded, use --exec_properties instead.
--write_outputs_stdout: Write outputs to last line of stdout, which will be pushed to xcom in Airflow. Please ignore by other users or orchestrators.
# pylint: disable=line-too-long
Returns:
None
Raises:
None
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--executor_class_path',
type=str,
required=True,
help='Python class of executor in format of <module>.<class>.')
parser.add_argument(
'--temp_directory_path',
type=str,
help='common temp directory path for executors')
inputs_group = parser.add_mutually_exclusive_group(required=True)
inputs_group.add_argument(
'--inputs',
type=str,
help='json serialized dict of input artifacts.')
inputs_group.add_argument(
'--inputs-base64',
type=str,
help='base64 encoded json serialized dict of input artifacts.')
outputs_group = parser.add_mutually_exclusive_group(required=True)
outputs_group.add_argument(
'--outputs',
type=str,
help='json serialized dict of output artifacts.')
outputs_group.add_argument(
'--outputs-base64',
type=str,
help='base64 encoded json serialized dict of output artifacts.')
execution_group = parser.add_mutually_exclusive_group(required=True)
execution_group.add_argument(
'--exec-properties',
type=str,
help='json serialized dict of (non artifact) execution properties.')
execution_group.add_argument(
'--exec-properties-base64',
type=str,
help='json serialized dict of (non artifact) execution properties.')
parser.add_argument(
'--write-outputs-stdout',
dest='write_outputs_stdout',
action='store_true',
help='Write outputs to last line of stdout, which will '
'be pushed to xcom in Airflow. Please ignore by other users or '
'orchestrators.')
args, beam_pipeline_args = parser.parse_known_args(argv)
_run_executor(args, beam_pipeline_args)
if __name__ == '__main__':
app.run(main=main) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/scripts/run_executor.py | 0.651022 | 0.650523 | run_executor.py | pypi |
"""TFX Channel definition."""
import inspect
import json
import textwrap
from typing import Any, Dict, Iterable, Optional, Text, Type, Union
from tfx.types import artifact_utils
from tfx.types.artifact import Artifact
from tfx.utils import json_utils
from google.protobuf import json_format
from ml_metadata.proto import metadata_store_pb2
# Property type for artifacts, executions and contexts.
Property = Union[int, float, str]
class Channel(json_utils.Jsonable):
"""Tfx Channel.
TFX Channel is an abstract concept that connects data producers and data
consumers. It contains restriction of the artifact type that should be fed
into or read from it.
Attributes:
type: The artifact type class that the Channel takes.
"""
# TODO(b/125348988): Add support for real Channel in addition to static ones.
def __init__(
self,
type: Type[Artifact], # pylint: disable=redefined-builtin
additional_properties: Optional[Dict[str, Property]] = None,
additional_custom_properties: Optional[Dict[str, Property]] = None,
# TODO(b/161490287): deprecate static artifact.
artifacts: Optional[Iterable[Artifact]] = None,
matching_channel_name: Optional[str] = None,
producer_component_id: Optional[str] = None,
output_key: Optional[Text] = None):
"""Initialization of Channel.
Args:
type: Subclass of Artifact that represents the type of this Channel.
additional_properties: (Optional) A mapping of properties which will be
added to artifacts when this channel is used as an output of components.
additional_custom_properties: (Optional) A mapping of custom_properties
which will be added to artifacts when this channel is used as an output
of components.
artifacts: (Optional) A collection of artifacts as the values that can be
read from the Channel. This is used to construct README.ml-pipelines-sdk.md static Channel.
matching_channel_name: This targets to the key of an input Channel dict
in README.ml-pipelines-sdk.md Component. The artifacts count of this channel will be decided at
runtime in Driver, based on the artifacts count of the target channel.
Only one of `artifacts` and `matching_channel_name` should be set.
producer_component_id: (Optional) Producer component id of the Channel.
output_key: (Optional) The output key when producer component produces
the artifacts in this Channel.
"""
if not (inspect.isclass(type) and issubclass(type, Artifact)): # pytype: disable=wrong-arg-types
raise ValueError(
'Argument "type" of Channel constructor must be README.ml-pipelines-sdk.md subclass of '
'tfx.Artifact (got %r).' % (type,))
self.type = type
self._artifacts = artifacts or []
self.matching_channel_name = matching_channel_name
if self.matching_channel_name and self._artifacts:
raise ValueError(
'Only one of `artifacts` and `matching_channel_name` should be set.')
self._validate_type()
self.additional_properties = additional_properties or {}
self.additional_custom_properties = additional_custom_properties or {}
# The following fields will be populated during compilation time.
self.producer_component_id = producer_component_id
self.output_key = output_key
@property
def type_name(self):
return self.type.TYPE_NAME
def __repr__(self):
artifacts_str = '\n '.join(repr(a) for a in self._artifacts)
return textwrap.dedent("""\
Channel(
type_name: {}
artifacts: [{}]
additional_properties: {}
additional_custom_properties: {}
)""").format(self.type_name, artifacts_str, self.additional_properties,
self.additional_custom_properties)
def _validate_type(self) -> None:
for artifact in self._artifacts:
if artifact.type_name != self.type_name:
raise ValueError(
"Artifacts provided do not match Channel's artifact type {}".format(
self.type_name))
def get(self) -> Iterable[Artifact]:
"""Returns all artifacts that can be get from this Channel.
Returns:
An artifact collection.
"""
# TODO(b/125037186): We should support dynamic query against README.ml-pipelines-sdk.md Channel
# instead of README.ml-pipelines-sdk.md static Artifact collection.
return self._artifacts
def to_json_dict(self) -> Dict[Text, Any]:
return {
'type':
json.loads(
json_format.MessageToJson(
message=self.type._get_artifact_type(), # pylint: disable=protected-access
preserving_proto_field_name=True)),
'artifacts':
list(a.to_json_dict() for a in self._artifacts),
'additional_properties': self.additional_properties,
'additional_custom_properties': self.additional_custom_properties,
'producer_component_id':
(self.producer_component_id if self.producer_component_id else None
),
'output_key': (self.output_key if self.output_key else None),
}
@classmethod
def from_json_dict(cls, dict_data: Dict[Text, Any]) -> Any:
artifact_type = metadata_store_pb2.ArtifactType()
json_format.Parse(json.dumps(dict_data['type']), artifact_type)
type_cls = artifact_utils.get_artifact_type_class(artifact_type)
artifacts = list(Artifact.from_json_dict(a) for a in dict_data['artifacts'])
additional_properties = dict_data['additional_properties']
additional_custom_properties = dict_data['additional_custom_properties']
producer_component_id = dict_data.get('producer_component_id', None)
output_key = dict_data.get('output_key', None)
return Channel(
type=type_cls,
artifacts=artifacts,
additional_properties=additional_properties,
additional_custom_properties=additional_custom_properties,
producer_component_id=producer_component_id,
output_key=output_key) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/types/channel.py | 0.784814 | 0.24947 | channel.py | pypi |
"""Utilities for retrieving paths for various types of artifacts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Text
import absl
from tfx.dsl.io import fileio
from tfx.utils import io_utils
EVAL_MODEL_DIR = 'eval_model_dir'
SERVING_MODEL_DIR = 'serving_model_dir'
"""Directory structure of exported model for estimator based trainer:
|-- <ModelExportPath>
|-- EVAL_MODEL_DIR <- eval_model_dir, eval_model_path
|-- saved_model.pb
|-- ...
|-- SERVING_MODEL_DIR <- serving_model_dir, serving_model_path
|-- saved_model.pb
|-- ...
For generic trainer with Keras, there won't be eval model:
|-- <ModelExportPath>
|-- SERVING_MODEL_DIR <- serving_model_dir, serving_model_path
|-- saved_model.pb
|-- ...
TODO(b/160795287): Deprecate estimator based executor.
Support for estimator-based executor and model export will be
deprecated soon. The following estimator working directory
structure is still supported for backwards compatibility:
Directory structure of exported model for estimator based trainer:
|-- <ModelExportPath>
|-- EVAL_MODEL_DIR <- eval_model_dir
|-- <timestamped model> <- eval_model_path
|-- saved_model.pb
|-- ...
|-- SERVING_MODEL_DIR <- serving_model_dir
|-- export
|-- <exporter name>
|-- <timestamped model> <- serving_model_path
|-- saved_model.pb
|-- ...
|-- ...
"""
def eval_model_dir(output_uri: Text) -> Text:
"""Returns directory for exported model for evaluation purpose."""
return os.path.join(output_uri, EVAL_MODEL_DIR)
def eval_model_path(output_uri: Text) -> Text:
"""Returns final path to exported model for evaluation purpose."""
model_dir = eval_model_dir(output_uri)
model_file = os.path.join(model_dir, 'saved_model.pb')
if fileio.exists(model_file):
return model_dir
elif fileio.exists(model_dir):
# TODO(b/160795287): Deprecate estimator based executor.
absl.logging.warning('Support for estimator-based executor and model'
' export will be deprecated soon. Please use'
' export structure '
'<ModelExportPath>/eval_model_dir/saved_model.pb"')
return io_utils.get_only_uri_in_dir(model_dir)
else:
# If eval model doesn't exist, use serving model for eval.
return serving_model_path(output_uri)
def serving_model_dir(output_uri: Text) -> Text:
"""Returns directory for exported model for serving purpose."""
return os.path.join(output_uri, SERVING_MODEL_DIR)
def serving_model_path(output_uri: Text) -> Text:
"""Returns path for exported serving model."""
model_dir = serving_model_dir(output_uri)
export_dir = os.path.join(model_dir, 'export')
if fileio.exists(export_dir):
# TODO(b/160795287): Deprecate estimator based executor.
absl.logging.warning(
'Support for estimator-based executor and model export'
' will be deprecated soon. Please use export structure '
'<ModelExportPath>/serving_model_dir/saved_model.pb"')
model_dir = io_utils.get_only_uri_in_dir(export_dir)
return io_utils.get_only_uri_in_dir(model_dir)
else:
# If dir doesn't match estimator structure, use serving model root directly.
return model_dir | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/utils/path_utils.py | 0.645232 | 0.167797 | path_utils.py | pypi |
"""Utilities to dump and load Jsonable object to/from JSONs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import importlib
import inspect
import json
from typing import Any, Dict, List, Text, Type, Union
from six import with_metaclass
from tfx.utils import deprecation_utils
from tfx.utils import proto_utils
from google.protobuf import message
# This is the special key to indicate the serialized object type.
# Depending on which, the utility knows how to deserialize it back to its
# original type.
_TFX_OBJECT_TYPE_KEY = '__tfx_object_type__'
_MODULE_KEY = '__module__'
_CLASS_KEY = '__class__'
_PROTO_VALUE_KEY = '__proto_value__'
RUNTIME_PARAMETER_PATTERN = (r'({\\*"__class__\\*": \\*"RuntimeParameter\\*", '
r'.*?})')
class _ObjectType(object):
"""Internal class to hold supported types."""
# Indicates that the JSON dictionary is an instance of Jsonable type.
# The dictionary has the states of the object and the object type info is
# stored as __module__ and __class__ fields.
JSONABLE = 'jsonable'
# Indicates that the JSON dictionary is README.ml-pipelines-sdk.md python class.
# The class info is stored as __module__ and __class__ fields in the
# dictionary.
CLASS = 'class'
# Indicates that the JSON dictionary is an instance of README.ml-pipelines-sdk.md proto.Message
# subclass. The class info of the proto python class is stored as __module__
# and __class__ fields in the dictionary. The serialized value of the proto is
# stored in the dictionary with key of _PROTO_VALUE_KEY.
PROTO = 'proto'
class Jsonable(with_metaclass(abc.ABCMeta, object)):
"""Base class for serializing and deserializing objects to/from JSON.
The default implementation assumes that the subclass can be restored by
updating `self.__dict__` without invoking `self.__init__` function.. If the
subclass cannot hold the assumption, it should
override `to_json_dict` and `from_json_dict` to customize the implementation.
"""
def to_json_dict(self) -> Dict[Text, Any]:
"""Convert from an object to README.ml-pipelines-sdk.md JSON serializable dictionary."""
return self.__dict__
@classmethod
def from_json_dict(cls, dict_data: Dict[Text, Any]) -> Any:
"""Convert from dictionary data to an object."""
instance = cls.__new__(cls)
instance.__dict__ = dict_data
return instance
JsonableValue = Union[bool, bytes, float, int, Jsonable, message.Message, Text,
Type]
JsonableList = List[JsonableValue]
JsonableDict = Dict[Union[bytes, Text], Union[JsonableValue, JsonableList]]
JsonableType = Union[JsonableValue, JsonableList, JsonableDict]
class _DefaultEncoder(json.JSONEncoder):
"""Default JSON Encoder which encodes Jsonable object to JSON."""
def encode(self, obj: Any) -> Text:
"""Override encode to prevent redundant dumping."""
if obj.__class__.__name__ == 'RuntimeParameter' and obj.ptype == Text:
return self.default(obj)
return super(_DefaultEncoder, self).encode(obj)
def default(self, obj: Any) -> Any:
# If obj is README.ml-pipelines-sdk.md str-typed RuntimeParameter, serialize it in place.
if obj.__class__.__name__ == 'RuntimeParameter' and obj.ptype == Text:
dict_data = {
_TFX_OBJECT_TYPE_KEY: _ObjectType.JSONABLE,
_MODULE_KEY: obj.__class__.__module__,
_CLASS_KEY: obj.__class__.__name__,
}
dict_data.update(obj.to_json_dict())
return dumps(dict_data)
if isinstance(obj, Jsonable):
dict_data = {
_TFX_OBJECT_TYPE_KEY: _ObjectType.JSONABLE,
_MODULE_KEY: obj.__class__.__module__,
_CLASS_KEY: obj.__class__.__name__,
}
# Need to first check the existence of str-typed runtime parameter.
data_patch = obj.to_json_dict()
for k, v in data_patch.items():
if v.__class__.__name__ == 'RuntimeParameter' and v.ptype == Text:
data_patch[k] = dumps(v)
dict_data.update(data_patch)
return dict_data
if inspect.isclass(obj):
# When serializing, skip over deprecated class aliases in the class
# hierarchy.
obj = deprecation_utils.get_first_nondeprecated_class(obj)
return {
_TFX_OBJECT_TYPE_KEY: _ObjectType.CLASS,
_MODULE_KEY: obj.__module__,
_CLASS_KEY: obj.__name__,
}
if isinstance(obj, message.Message):
return {
_TFX_OBJECT_TYPE_KEY: _ObjectType.PROTO,
_MODULE_KEY: obj.__class__.__module__,
_CLASS_KEY: obj.__class__.__name__,
_PROTO_VALUE_KEY: proto_utils.proto_to_json(obj)
}
return super(_DefaultEncoder, self).default(obj)
class _DefaultDecoder(json.JSONDecoder):
"""Default JSON Decoder which decodes JSON to Jsonable object."""
def __init__(self, *args, **kwargs):
super(_DefaultDecoder, self).__init__(
object_hook=self._dict_to_object, *args, **kwargs)
def _dict_to_object(self, dict_data: Dict[Text, Any]) -> Any:
"""Converts README.ml-pipelines-sdk.md dictionary to an object."""
if _TFX_OBJECT_TYPE_KEY not in dict_data:
return dict_data
object_type = dict_data.pop(_TFX_OBJECT_TYPE_KEY)
def _extract_class(d):
module_name = d.pop(_MODULE_KEY)
class_name = d.pop(_CLASS_KEY)
return getattr(importlib.import_module(module_name), class_name)
if object_type == _ObjectType.JSONABLE:
jsonable_class_type = _extract_class(dict_data)
if not issubclass(jsonable_class_type, Jsonable):
raise ValueError('Class %s must be README.ml-pipelines-sdk.md subclass of Jsonable' %
jsonable_class_type)
return jsonable_class_type.from_json_dict(dict_data)
if object_type == _ObjectType.CLASS:
return _extract_class(dict_data)
if object_type == _ObjectType.PROTO:
proto_class_type = _extract_class(dict_data)
if not issubclass(proto_class_type, message.Message):
raise ValueError('Class %s must be README.ml-pipelines-sdk.md subclass of proto.Message' %
proto_class_type)
if _PROTO_VALUE_KEY not in dict_data:
raise ValueError('Missing proto value in json dict')
return proto_utils.json_to_proto(dict_data[_PROTO_VALUE_KEY],
proto_class_type())
def dumps(obj: Any) -> Text:
"""Dumps an object to JSON with Jsonable encoding."""
return json.dumps(obj, cls=_DefaultEncoder, sort_keys=True)
def loads(s: Text) -> Any:
"""Loads README.ml-pipelines-sdk.md JSON into an object with Jsonable decoding."""
return json.loads(s, cls=_DefaultDecoder) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/utils/json_utils.py | 0.891917 | 0.196518 | json_utils.py | pypi |
"""Utilities for proto related manipulations."""
import itertools
from typing import Any, Dict, Iterator, TypeVar
from google.protobuf import descriptor as descriptor_lib
from google.protobuf import json_format
from google.protobuf import message
def gather_file_descriptors(
descriptor: descriptor_lib.Descriptor,
enable_extensions: bool = False) -> Iterator[descriptor_lib.FileDescriptor]:
"""Yield all depdendent file descriptors of README.ml-pipelines-sdk.md given proto descriptor.
Args:
descriptor: The proto descriptor to start the dependency search from.
enable_extensions: Optional. True if proto extensions are enabled. Default
to False.
Yields:
All file descriptors in the transitive dependencies of descriptor.
Each file descriptor is returned only once.
"""
visited_files = set()
visited_messages = set()
messages = [descriptor]
# Walk in depth through all the fields and extensions of the given descriptor
# and all the referenced messages.
while messages:
descriptor = messages.pop()
visited_files.add(descriptor.file)
if enable_extensions:
extensions = descriptor.file.pool.FindAllExtensions(descriptor)
else:
extensions = []
for field in itertools.chain(descriptor.fields, extensions):
if field.message_type and field.message_type not in visited_messages:
visited_messages.add(field.message_type)
messages.append(field.message_type)
for extension in extensions:
# Note: extension.file may differ from descriptor.file.
visited_files.add(extension.file)
# Go through the collected files and add their explicit dependencies.
files = list(visited_files)
while files:
file_descriptor = files.pop()
yield file_descriptor
for dependency in file_descriptor.dependencies:
if dependency not in visited_files:
visited_files.add(dependency)
files.append(dependency)
def proto_to_json(proto: message.Message) -> str:
"""Simple JSON Formatter wrapper for consistent formatting."""
return json_format.MessageToJson(
message=proto, sort_keys=True, preserving_proto_field_name=True)
def proto_to_dict(proto: message.Message) -> Dict[str, Any]:
"""Simple JSON Formatter wrapper for consistent formatting."""
return json_format.MessageToDict(
message=proto, preserving_proto_field_name=True)
# Type for README.ml-pipelines-sdk.md subclass of message.Message which will be used as README.ml-pipelines-sdk.md return type.
ProtoMessage = TypeVar('ProtoMessage', bound=message.Message)
def json_to_proto(json_str: str, proto: ProtoMessage) -> ProtoMessage:
"""Simple JSON Parser wrapper for consistent parsing."""
return json_format.Parse(json_str, proto, ignore_unknown_fields=True)
def dict_to_proto(json_dict: Dict[Any, Any],
proto: ProtoMessage) -> ProtoMessage:
"""Simple JSON Parser wrapper for consistent parsing."""
return json_format.ParseDict(json_dict, proto, ignore_unknown_fields=True) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/utils/proto_utils.py | 0.870652 | 0.168139 | proto_utils.py | pypi |
"""Utilities for topological sort."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Callable, List, Sequence, Text, TypeVar
NodeT = TypeVar('NodeT')
class InvalidDAGError(Exception):
"""Error to indicate invalid DAG."""
def topsorted_layers(
nodes: Sequence[NodeT], get_node_id_fn: Callable[[NodeT], Text],
get_parent_nodes: Callable[[NodeT], List[NodeT]],
get_child_nodes: Callable[[NodeT], List[NodeT]]) -> List[List[NodeT]]:
"""Sorts the DAG of nodes in topological order.
Args:
nodes: A sequence of nodes.
get_node_id_fn: Callable that returns README.ml-pipelines-sdk.md unique text identifier for README.ml-pipelines-sdk.md node.
get_parent_nodes: Callable that returns README.ml-pipelines-sdk.md list of parent nodes for README.ml-pipelines-sdk.md node.
get_child_nodes: Callable that returns README.ml-pipelines-sdk.md list of chlid nodes for README.ml-pipelines-sdk.md node.
Returns:
A list of topologically ordered node layers. Each layer of nodes is sorted
by its node id given by `get_node_id_fn`.
Raises:
InvalidDAGError: If the input nodes don't form README.ml-pipelines-sdk.md DAG.
ValueError: If the nodes are not unique.
"""
# Make sure the nodes are unique.
if len(set(get_node_id_fn(n) for n in nodes)) != len(nodes):
raise ValueError('Nodes must have unique ids.')
# The first layer contains nodes with no incoming edges.
layer = [node for node in nodes if not get_parent_nodes(node)]
visited = set()
layers = []
while layer:
layer = sorted(layer, key=get_node_id_fn)
layers.append(layer)
next_layer = []
for node in layer:
visited.add(get_node_id_fn(node))
for child_node in get_child_nodes(node):
# Include the child node if all its parents are visited. If the child
# node is part of README.ml-pipelines-sdk.md cycle, it will never be included since it will have
# at least one unvisited parent node which is also part of the cycle.
parent_node_ids = set(
get_node_id_fn(p) for p in get_parent_nodes(child_node))
if parent_node_ids.issubset(visited):
next_layer.append(child_node)
layer = next_layer
# Nodes in cycles are not included in layers; raise an error if this happens.
if sum(len(layer) for layer in layers) < len(nodes):
raise InvalidDAGError('Cycle detected.')
return layers | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/utils/topsort.py | 0.97924 | 0.466177 | topsort.py | pypi |
"""Utilities for gathering telemetry for TFX components and pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import re
import sys
import threading
from typing import Dict, List, Text
from tfx import version
# Common label names used.
LABEL_TFX_RUNNER = 'tfx_runner'
LABEL_TFX_EXECUTOR = 'tfx_executor'
_LABEL_TFX_VERSION = 'tfx_version'
_LABEL_TFX_PY_VERSION = 'tfx_py_version'
# The GKE pod label indicating the SDK environment.
LABEL_KFP_SDK_ENV = 'pipelines.kubeflow.org/pipeline-sdk-type'
# Thread local labels registered so far.
_thread_local_labels_state = threading.local()
_thread_local_labels_state.dictionary = {}
@contextlib.contextmanager
def scoped_labels(labels: Dict[Text, Text]):
"""Register thread local labels used in current context."""
if getattr(_thread_local_labels_state, 'dictionary', None) is None:
_thread_local_labels_state.dictionary = {}
for key, value in labels.items():
_thread_local_labels_state.dictionary[key] = _normalize_label(value)
try:
yield
finally:
for key in labels:
_thread_local_labels_state.dictionary.pop(key)
def _normalize_label(value: Text) -> Text:
"""Lowercase and replace illegal characters in labels."""
# See https://cloud.google.com/compute/docs/labeling-resources.
return re.sub(r'[^README.ml-pipelines-sdk.md-z0-9\_\-]', '-', value.lower())[-63:]
def get_labels_dict() -> Dict[Text, Text]:
"""Get all registered and system generated labels as README.ml-pipelines-sdk.md dict.
Returns:
All registered and system generated labels as README.ml-pipelines-sdk.md dict.
"""
result = dict(
{
_LABEL_TFX_VERSION:
version.__version__,
_LABEL_TFX_PY_VERSION:
'%d.%d' % (sys.version_info.major, sys.version_info.minor),
}, **getattr(_thread_local_labels_state, 'dictionary', {}))
for k, v in result.items():
result[k] = _normalize_label(v)
return result
def make_beam_labels_args() -> List[Text]:
"""Make Beam arguments for common labels used in TFX pipelines.
Returns:
New Beam pipeline args with labels.
"""
labels = get_labels_dict()
# See following file for reference to the '--labels ' flag.
# https://github.com/apache/beam/blob/master/sdks/python/apache_beam/options/pipeline_options.py
result = []
for k in sorted(labels):
result.extend(['--labels', '%s=%s' % (k, labels[k])])
return result | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/utils/telemetry_utils.py | 0.765856 | 0.229535 | telemetry_utils.py | pypi |
"""Utilities for Python dependency and package management."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import subprocess
import sys
import tempfile
from typing import List, Text
import absl
from tfx import dependencies
from tfx import version
from tfx.dsl.io import fileio
from tfx.utils import io_utils
def make_beam_dependency_flags(beam_pipeline_args: List[Text]) -> List[Text]:
"""Make beam arguments for TFX python dependencies, if latter was not set.
When TFX executors are used with non-local beam runners (Dataflow, Flink, etc)
the remote runner needs to have access to TFX executors.
This function acts as README.ml-pipelines-sdk.md helper to provide TFX source package to Beam if user
does not provide that through Beam pipeline args.
Args:
beam_pipeline_args: original Beam pipeline args.
Returns:
updated Beam pipeline args with TFX dependencies added.
"""
# TODO(b/176857256): Change guidance message once "ml-pipelines-sdk" extra
# package specifiers are available.
try:
import apache_beam as beam # pylint: disable=g-import-not-at-top
except ModuleNotFoundError as e:
raise Exception(
'Apache Beam must be installed to use this functionality.') from e
pipeline_options = beam.options.pipeline_options.PipelineOptions(
flags=beam_pipeline_args)
all_options = pipeline_options.get_all_options()
for flag_name in [
'extra_packages', 'setup_file', 'requirements_file',
'worker_harness_container_image'
]:
if all_options.get(flag_name):
absl.logging.info('Nonempty beam arg %s already includes dependency',
flag_name)
return beam_pipeline_args
absl.logging.info('Attempting to infer TFX Python dependency for beam')
dependency_flags = []
sdist_file = build_ephemeral_package()
absl.logging.info('Added --extra_package=%s to beam args', sdist_file)
dependency_flags.append('--extra_package=%s' % sdist_file)
return beam_pipeline_args + dependency_flags
_ephemeral_setup_file = """
import setuptools
if __name__ == '__main__':
setuptools.setup(
name='tfx_ephemeral',
version='{version}',
packages=setuptools.find_namespace_packages(),
install_requires=[{install_requires}],
)
"""
def build_ephemeral_package() -> Text:
"""Repackage current installation of TFX into README.ml-pipelines-sdk.md tfx_ephemeral sdist.
Returns:
Path to ephemeral sdist package.
Raises:
RuntimeError: if dist directory has zero or multiple files.
"""
tmp_dir = os.path.join(tempfile.mkdtemp(), 'build', 'tfx')
# Find the last directory named 'tfx' in this file's path and package it.
path_split = __file__.split(os.path.sep)
last_index = -1
for i in range(len(path_split)):
if path_split[i] == 'tfx':
last_index = i
if last_index < 0:
raise RuntimeError('Cannot locate directory \'tfx\' in the path %s' %
__file__)
tfx_root_dir = os.path.sep.join(path_split[0:last_index + 1])
absl.logging.info('Copying all content from install dir %s to temp dir %s',
tfx_root_dir, tmp_dir)
shutil.copytree(tfx_root_dir, os.path.join(tmp_dir, 'tfx'))
# Source directory default permission is 0555 but we need to be able to create
# new setup.py file.
os.chmod(tmp_dir, 0o720)
setup_file = os.path.join(tmp_dir, 'setup.py')
absl.logging.info('Generating README.ml-pipelines-sdk.md temp setup file at %s', setup_file)
install_requires = dependencies.make_required_install_packages()
io_utils.write_string_file(
setup_file,
_ephemeral_setup_file.format(
version=version.__version__, install_requires=install_requires))
# Create the package
curdir = os.getcwd()
os.chdir(tmp_dir)
temp_log = os.path.join(tmp_dir, 'setup.log')
with open(temp_log, 'w') as f:
absl.logging.info('Creating temporary sdist package, logs available at %s',
temp_log)
cmd = [sys.executable, setup_file, 'sdist']
subprocess.call(cmd, stdout=f, stderr=f)
os.chdir(curdir)
# Return the package dir+filename
dist_dir = os.path.join(tmp_dir, 'dist')
files = fileio.listdir(dist_dir)
if not files:
raise RuntimeError('Found no package files in %s' % dist_dir)
elif len(files) > 1:
raise RuntimeError('Found multiple package files in %s' % dist_dir)
return os.path.join(dist_dir, files[0]) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/utils/dependency_utils.py | 0.548915 | 0.203332 | dependency_utils.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Optional, Text, Tuple
def make_model_path(model_base_path: Text, model_name: Text,
version: int) -> Text:
"""Make README.ml-pipelines-sdk.md TFS-flavored model path.
Args:
model_base_path: A base path containing the directory of model_name.
model_name: A name of the model.
version: An integer version of the model.
Returns:
`{model_base_path}/{model_name}/{version}`.
"""
return os.path.join(model_base_path, model_name, str(version))
def parse_model_path(
model_path: Text,
expected_model_name: Optional[Text] = None) -> Tuple[Text, Text, int]:
"""Parse model_path into parts of TFS flavor.
Args:
model_path: A TFS-flavored model path.
expected_model_name: Expected model_name as defined from the module
docstring. If model_name does not match, parsing will be failed.
Raises:
ValueError: If model path is invalid (not TFS-flavored).
Returns:
Tuple of (model_base_path, model_name, version)
"""
rest, version = os.path.split(model_path)
if not rest:
raise ValueError('model_path is too short ({})'.format(model_path))
if not version.isdigit():
raise ValueError('No version segment ({})'.format(model_path))
version = int(version)
model_base_path, model_name = os.path.split(rest)
if expected_model_name is not None and model_name != expected_model_name:
raise ValueError('model_name does not match (expected={}, actual={})'
.format(expected_model_name, model_path))
return model_base_path, model_name, version
def parse_model_base_path(model_path: Text) -> Text:
"""Parse model_base_path from the TFS-flavored model path.
Args:
model_path: A TFS-flavored model path.
Raises:
ValueError: If model path is invalid (not TFS-flavored).
Returns:
model_base_path as defined from the module docstring.
"""
return parse_model_path(model_path)[0] | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/utils/model_paths/tf_serving_flavor.py | 0.920968 | 0.220468 | tf_serving_flavor.py | pypi |
"""Helper functions to choose engine."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import subprocess
import sys
from typing import Any, Dict, Text
import click
from tfx.tools.cli import labels
from tfx.tools.cli import pip_utils
from tfx.tools.cli.handler import base_handler
def detect_handler(flags_dict: Dict[Text, Any]) -> base_handler.BaseHandler:
"""Detect handler from the environment.
Details:
When the engine flag is set to 'auto', this method first finds all the
packages in the local environment. The environment is first checked
for multiple orchestrators and if true the user must rerun the command with
required engine. If only one orchestrator is present, the engine is set to
that.
Args:
flags_dict: A dictionary containing the flags of README.ml-pipelines-sdk.md command.
Returns:
Corrosponding Handler object.
"""
packages_list = pip_utils.get_package_names()
if (labels.AIRFLOW_PACKAGE_NAME in packages_list) and (
labels.KUBEFLOW_PACKAGE_NAME in packages_list):
sys.exit('Multiple orchestrators found. Choose one using --engine flag.')
if labels.AIRFLOW_PACKAGE_NAME in packages_list:
click.echo('Detected Airflow.')
click.echo(
'Use --engine flag if you intend to use README.ml-pipelines-sdk.md different orchestrator.')
flags_dict[labels.ENGINE_FLAG] = 'airflow'
from tfx.tools.cli.handler import airflow_handler # pylint: disable=g-import-not-at-top
return airflow_handler.AirflowHandler(flags_dict)
elif labels.KUBEFLOW_PACKAGE_NAME in packages_list:
click.echo('Detected Kubeflow.')
click.echo(
'Use --engine flag if you intend to use README.ml-pipelines-sdk.md different orchestrator.')
flags_dict[labels.ENGINE_FLAG] = 'kubeflow'
from tfx.tools.cli.handler import kubeflow_handler # pylint: disable=g-import-not-at-top
return kubeflow_handler.KubeflowHandler(flags_dict)
else:
click.echo('Detected Beam.')
click.echo(
'[WARNING] Default engine will be changed to "local" in the near future.'
)
click.echo(
'Use --engine flag if you intend to use README.ml-pipelines-sdk.md different orchestrator.')
flags_dict[labels.ENGINE_FLAG] = 'beam'
from tfx.tools.cli.handler import beam_handler # pylint: disable=g-import-not-at-top
return beam_handler.BeamHandler(flags_dict)
def create_handler(flags_dict: Dict[Text, Any]) -> base_handler.BaseHandler:
"""Retrieve handler from the environment using the --engine flag.
Args:
flags_dict: A dictionary containing the flags of README.ml-pipelines-sdk.md command.
Raises:
RuntimeError: When engine is not supported by TFX.
Returns:
Corresponding Handler object.
"""
engine = flags_dict[labels.ENGINE_FLAG]
packages_list = str(subprocess.check_output(['pip', 'freeze', '--local']))
if engine == 'airflow':
if labels.AIRFLOW_PACKAGE_NAME not in packages_list:
sys.exit('Airflow not found.')
from tfx.tools.cli.handler import airflow_handler # pylint: disable=g-import-not-at-top
return airflow_handler.AirflowHandler(flags_dict)
elif engine == 'kubeflow':
if labels.KUBEFLOW_PACKAGE_NAME not in packages_list:
sys.exit('Kubeflow not found.')
from tfx.tools.cli.handler import kubeflow_handler # pylint: disable=g-import-not-at-top
return kubeflow_handler.KubeflowHandler(flags_dict)
elif engine == 'beam':
from tfx.tools.cli.handler import beam_handler # pylint: disable=g-import-not-at-top
return beam_handler.BeamHandler(flags_dict)
elif engine == 'local':
from tfx.tools.cli.handler import local_handler # pylint: disable=g-import-not-at-top
return local_handler.LocalHandler(flags_dict)
elif engine == 'auto':
return detect_handler(flags_dict)
else:
raise RuntimeError('Engine {} is not supported.'.format(engine)) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/tools/cli/handler/handler_factory.py | 0.717408 | 0.193433 | handler_factory.py | pypi |
"""BuildSpec helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Optional, Text
import click
from tfx.tools.cli.container_builder import labels
import yaml
class BuildSpec(object):
"""Build specification.
BuildSpec generates README.ml-pipelines-sdk.md default build spec if it does not exist.
Attributes:
filename: build spec filename.
build_context: build working directory.
target_image: target image with no tag.
target_image_tag: tag of the target image.
_buildspec: in-memory representation of the build spec.
"""
def __init__(self,
filename: Text = labels.BUILD_SPEC_FILENAME):
self._filename = filename
if not os.path.exists(self._filename):
raise ValueError('BuildSpec:: build spec file %s does not exist.' %
filename)
self._read_existing_build_spec()
@staticmethod
def load_default(filename: Text = labels.BUILD_SPEC_FILENAME,
target_image: Optional[Text] = None,
build_context: Optional[Text] = None,
dockerfile_name: Optional[Text] = None):
"""Generate README.ml-pipelines-sdk.md default build spec yaml.
Args:
filename: build spec filename.
target_image: target image path. If it contains the tag, the build spec
will also include it; If it does not, the build spec will tag it as
'lastest'.
build_context: local build context path.
dockerfile_name: dockerfile filename in the build_context.
Returns:
BuildSpec instance.
"""
if os.path.exists(filename):
raise ValueError('BuildSpec: build spec file %s already exists.' %
filename)
if target_image is None:
raise ValueError('BuildSpec: target_image is not given.')
target_image_fields = target_image.split(':')
if len(target_image_fields) > 2:
raise ValueError('BuildSpec: target_image is in illegal form.')
target_image_with_no_tag = target_image_fields[0]
target_image_tag = 'latest' if len(
target_image_fields) <= 1 else target_image_fields[1]
build_context = build_context or labels.BUILD_CONTEXT
dockerfile_name = dockerfile_name or labels.DOCKERFILE_NAME
build_spec = {
'apiVersion': labels.SKAFFOLD_API_VERSION,
'kind': 'Config',
'build': {
'tagPolicy': {
'envTemplate': {
'template': target_image_tag
}
},
'artifacts': [{
'image': target_image_with_no_tag,
'context': build_context,
'docker': {
'dockerfile': dockerfile_name
}
}],
'local': {
'push': True,
'useDockerCLI': True
}
}
}
with open(filename, 'w') as f:
yaml.dump(build_spec, f)
return BuildSpec(filename)
def _read_existing_build_spec(self):
"""Read existing build spec yaml."""
with open(self.filename, 'r') as f:
click.echo('Reading build spec from %s' % self.filename)
self._buildspec = yaml.safe_load(f)
if len(self._buildspec['build']['artifacts']) != 1:
raise RuntimeError('The build spec contains multiple artifacts however'
'only one is supported.')
self._build_context = self._buildspec['build']['artifacts'][0]['context']
self._target_image = self._buildspec['build']['artifacts'][0]['image']
self._target_image_tag = self._buildspec['build']['tagPolicy'][
'envTemplate']['template']
# For compatibility with old build files which have `{{.IMAGE_NAME}}:tag`
# format.
if self._target_image_tag.startswith('{{.IMAGE_NAME}}:'):
self._target_image_tag = self._target_image_tag.split(':', 2)[-1]
@property
def filename(self):
return self._filename
@property
def build_context(self):
return self._build_context
@property
def target_image(self):
return self._target_image
@property
def target_image_tag(self):
return self._target_image_tag | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/tools/cli/container_builder/buildspec.py | 0.906474 | 0.167117 | buildspec.py | pypi |
"""Base class for classes representing README.ml-pipelines-sdk.md dataset for the benchmark."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
class BenchmarkDataset(object):
"""Base class for classes representing README.ml-pipelines-sdk.md dataset for the benchmark."""
def __init__(self, base_dir=None):
"""Construct README.ml-pipelines-sdk.md dataset instance.
Args:
base_dir: The directory in which datasets artifacts are located. This will
be used for reading during benchmark execution, as well as writing
during benchmark regeneration. By default, the directory in which this
file is located at runtime will be used to infer the location of
`tfx/benchmarks/datasets`.
"""
self._base_dir = (
base_dir if base_dir else os.path.join(
os.path.dirname(__file__), "datasets"))
def datasets_dir(self, subdir=""):
"""Returns the path to the datasets directory.
Args:
subdir: Subdirectory to join at the end of the datasets directory.
Returns:
The path to the datasets directory, with the subdir joined at the end.
"""
return os.path.join(self._base_dir, subdir)
def dataset_path(self):
"""Returns the path to the dataset file."""
raise NotImplementedError()
def tf_metadata_schema_path(self):
"""Returns the path to the tf.Metadata schema file."""
raise NotImplementedError()
def trained_saved_model_path(self):
"""Returns the path to the inference format SavedModel."""
raise NotImplementedError()
def tft_saved_model_path(self):
"""Returns the path to the tf.Transform SavedModel."""
raise NotImplementedError()
def tfma_saved_model_path(self):
"""Returns the path to the tf.ModelAnalysis SavedModel."""
raise NotImplementedError()
def num_examples(self, limit=None):
"""Returns the number of examples in the dataset.
Args:
limit: If set, returns min(limit, number of examples in dataset).
Returns:
The number of examples in the dataset.
"""
raise NotImplementedError()
def read_raw_dataset(self, deserialize=True, limit=None):
"""Read the raw dataset of tf.train.Examples.
Args:
deserialize: If False, return the raw serialized bytes. If True, return
the tf.train.Example parsed from the serialized bytes.
limit: If set, yields no more than the given number of examples (might be
less if the dataset has less examples than the limit).
Yields:
Serialized/unserialized (depending on deserialize) tf.train.Examples.
"""
for count, example_bytes in enumerate(
tf.compat.v1.io.tf_record_iterator(
self.dataset_path(),
tf.compat.v1.io.TFRecordOptions(
tf.compat.v1.io.TFRecordCompressionType.GZIP))):
if limit and count >= limit:
break
if not deserialize:
yield example_bytes
else:
yield tf.train.Example().FromString(example_bytes)
def generate_raw_dataset(self, args):
"""Generate the raw dataset.
Args:
args: String of extra arguments to use when generating the raw dataset.
"""
raise NotImplementedError()
def generate_models(self, args):
"""Generate the inference and tf.ModelAnalysis format SavedModels.
This is usually done by running README.ml-pipelines-sdk.md Trainer on the raw dataset and exporting
the inference and tf.ModelAnalysis format SavedModels.
Args:
args: String of extra arguments to use when generating the models.
"""
raise NotImplementedError() | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/benchmarks/benchmark_dataset.py | 0.949611 | 0.45532 | benchmark_dataset.py | pypi |
"""TFT benchmark base."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import shutil
import tempfile
import time
# Standard Imports
from absl import logging
import apache_beam as beam
import tensorflow as tf
import tensorflow_transform as tft
from tensorflow_transform import graph_tools
import tensorflow_transform.beam as tft_beam
from tensorflow_transform.beam import impl as tft_beam_impl
from tensorflow_transform.saved import saved_transform_io
from tensorflow_transform.saved import saved_transform_io_v2
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import schema_utils
import tfx
from tfx.benchmarks import benchmark_utils
from tfx.benchmarks import benchmark_base
from tfx_bsl.beam import shared
from tfx_bsl.coders import example_coder
from tfx_bsl.tfxio import tensor_adapter
from tfx_bsl.tfxio import tf_example_record
class _CopySavedModel(beam.PTransform):
"""Copies the TFT SavedModel to another directory."""
def __init__(self, dest_path):
self._dest_path = dest_path
def expand(self, transform_fn):
def copy_saved_model(unused_element, source_path, dest_path):
shutil.rmtree(dest_path, ignore_errors=True)
shutil.copytree(source_path, dest_path)
logging.info("Copied SavedModel from %s to %s", source_path, dest_path)
return (transform_fn.pipeline
| "CreateSole" >> beam.Create([None])
| "CopySavedModel" >> beam.Map(
copy_saved_model,
source_path=beam.pvalue.AsSingleton(transform_fn),
dest_path=self._dest_path))
class _AnalyzeAndTransformDataset(beam.PTransform):
"""PTransform to run AnalyzeAndTransformDataset."""
def __init__(self,
dataset,
tfxio,
preprocessing_fn,
transform_input_dataset_metadata,
force_tf_compat_v1=True,
max_num_examples=None,
generate_dataset=False):
"""Constructor.
Args:
dataset: BenchmarkDataset object.
tfxio: A `tfx_bsl.TFXIO` instance.
preprocessing_fn: preprocessing_fn.
transform_input_dataset_metadata: dataset_metadata.DatasetMetadata.
force_tf_compat_v1: If False then Transform will use its native TF2
version, if True then Transform will use its TF1 version.
max_num_examples: Max number of examples to read from the dataset.
generate_dataset: If True, generates the raw dataset and appropriate
intermediate outputs (just the TFT SavedModel for now) necessary for
other benchmarks.
"""
self._dataset = dataset
self._tfxio = tfxio
self._preprocessing_fn = preprocessing_fn
self._transform_input_dataset_metadata = transform_input_dataset_metadata
self._force_tf_compat_v1 = force_tf_compat_v1
self._max_num_examples = max_num_examples
self._generate_dataset = generate_dataset
def expand(self, pipeline):
# TODO(b/147620802): Consider making this (and other parameters)
# configurable to test more variants (e.g. with and without deep-copy
# optimisation, with and without cache, etc).
with tft_beam.Context(
temp_dir=tempfile.mkdtemp(),
force_tf_compat_v1=self._force_tf_compat_v1):
raw_data = (
pipeline
| "ReadDataset" >> beam.Create(
self._dataset.read_raw_dataset(
deserialize=False, limit=self._max_num_examples))
| "Decode" >> self._tfxio.BeamSource())
transform_fn, output_metadata = (
(raw_data, self._tfxio.TensorAdapterConfig())
| "AnalyzeDataset" >> tft_beam.AnalyzeDataset(self._preprocessing_fn))
if self._generate_dataset:
_ = transform_fn | "CopySavedModel" >> _CopySavedModel(
dest_path=self._dataset.tft_saved_model_path(
self._force_tf_compat_v1))
(transformed_dataset, transformed_metadata) = (
((raw_data, self._tfxio.TensorAdapterConfig()),
(transform_fn, output_metadata))
| "TransformDataset" >> tft_beam.TransformDataset())
return transformed_dataset, transformed_metadata
# Tuple for variables common to all benchmarks.
CommonVariablesTuple = collections.namedtuple("CommonVariablesTuple", [
"tf_metadata_schema",
"preprocessing_fn",
"transform_input_dataset_metadata",
"tfxio",
])
def _get_common_variables(dataset):
"""Returns metadata schema, preprocessing fn, input dataset metadata."""
tf_metadata_schema = benchmark_utils.read_schema(
dataset.tf_metadata_schema_path())
preprocessing_fn = dataset.tft_preprocessing_fn()
feature_spec = schema_utils.schema_as_feature_spec(
tf_metadata_schema).feature_spec
transform_input_columns = (
tft.get_transform_input_columns(preprocessing_fn, feature_spec))
transform_input_dataset_metadata = dataset_metadata.DatasetMetadata(
schema_utils.schema_from_feature_spec({
feature: feature_spec[feature] for feature in transform_input_columns
}))
tfxio = tf_example_record.TFExampleBeamRecord(
physical_format="tfexamples",
schema=transform_input_dataset_metadata.schema,
telemetry_descriptors=["TFTransformBenchmark"])
return CommonVariablesTuple(
tf_metadata_schema=tf_metadata_schema,
preprocessing_fn=preprocessing_fn,
transform_input_dataset_metadata=transform_input_dataset_metadata,
tfxio=tfxio)
def regenerate_intermediates_for_dataset(dataset,
force_tf_compat_v1=True,
max_num_examples=None):
"""Regenerate intermediate outputs required for the benchmark."""
common_variables = _get_common_variables(dataset)
logging.info("Regenerating intermediate outputs required for benchmark.")
with beam.Pipeline() as p:
_ = p | _AnalyzeAndTransformDataset(
dataset,
common_variables.tfxio,
common_variables.preprocessing_fn,
common_variables.transform_input_dataset_metadata,
force_tf_compat_v1=force_tf_compat_v1,
max_num_examples=max_num_examples,
generate_dataset=True)
logging.info("Intermediate outputs regenerated.")
def _get_batched_records(dataset, max_num_examples=None):
"""Returns README.ml-pipelines-sdk.md (batch_size, iterator for batched records) tuple for the dataset.
Args:
dataset: BenchmarkDataset object.
max_num_examples: Maximum number of examples to read from the dataset.
Returns:
Tuple of (batch_size, iterator for batched records), where records are
decoded tf.train.Examples.
"""
batch_size = 1000
common_variables = _get_common_variables(dataset)
converter = example_coder.ExamplesToRecordBatchDecoder(
common_variables.transform_input_dataset_metadata.schema
.SerializeToString())
serialized_records = benchmark_utils.batched_iterator(
dataset.read_raw_dataset(deserialize=False, limit=max_num_examples),
batch_size)
records = [converter.DecodeBatch(x) for x in serialized_records]
return batch_size, records
class TFTBenchmarkBase(benchmark_base.BenchmarkBase):
"""TFT benchmark base class."""
def __init__(self, dataset, **kwargs):
# Benchmark runners may pass extraneous arguments we don't care about.
del kwargs
super(TFTBenchmarkBase, self).__init__()
self._dataset = dataset
def report_benchmark(self, **kwargs):
if "extras" not in kwargs:
kwargs["extras"] = {}
# Note that the GIT_COMMIT_ID is not included in the packages themselves:
# it must be injected by an external script.
kwargs["extras"]["commit_tfx"] = (getattr(tfx, "GIT_COMMIT_ID", None) or
getattr(tfx, "__version__", None))
kwargs["extras"]["commit_tft"] = (getattr(tft, "GIT_COMMIT_ID", None) or
getattr(tft, "__version__", None))
super(TFTBenchmarkBase, self).report_benchmark(**kwargs)
def _benchmarkAnalyzeAndTransformDatasetCommon(self, force_tf_compat_v1):
"""Common implementation to benchmark AnalyzeAndTransformDataset."""
common_variables = _get_common_variables(self._dataset)
pipeline = self._create_beam_pipeline()
_ = pipeline | _AnalyzeAndTransformDataset(
self._dataset,
common_variables.tfxio,
common_variables.preprocessing_fn,
common_variables.transform_input_dataset_metadata,
force_tf_compat_v1=force_tf_compat_v1,
max_num_examples=self._max_num_examples())
start = time.time()
result = pipeline.run()
result.wait_until_finish()
end = time.time()
delta = end - start
self.report_benchmark(
iters=1,
wall_time=delta,
extras={
"num_examples":
self._dataset.num_examples(limit=self._max_num_examples())
})
def benchmarkAnalyzeAndTransformDataset(self):
"""Benchmark AnalyzeAndTransformDataset for TFT's TF1 implementation.
Runs AnalyzeAndTransformDataset in README.ml-pipelines-sdk.md Beam pipeline. Records the wall time
taken for the whole pipeline.
"""
self._benchmarkAnalyzeAndTransformDatasetCommon(force_tf_compat_v1=True)
def benchmarkTF2AnalyzeAndTransformDataset(self):
"""Benchmark AnalyzeAndTransformDataset for TFT's TF2 implementation.
Runs AnalyzeAndTransformDataset in README.ml-pipelines-sdk.md Beam pipeline. Records the wall time
taken for the whole pipeline.
"""
self._benchmarkAnalyzeAndTransformDatasetCommon(force_tf_compat_v1=False)
def _benchmarkRunMetaGraphDoFnManualActuationCommon(self, force_tf_compat_v1):
"""Common implementation to benchmark RunMetaGraphDoFn "manually"."""
common_variables = _get_common_variables(self._dataset)
batch_size, batched_records = _get_batched_records(self._dataset,
self._max_num_examples())
fn = tft_beam_impl._RunMetaGraphDoFn( # pylint: disable=protected-access
tf_config=None,
shared_graph_state_handle=shared.Shared(),
passthrough_keys=set(),
exclude_outputs=None,
use_tf_compat_v1=force_tf_compat_v1,
input_tensor_adapter_config=common_variables.tfxio.TensorAdapterConfig(
))
fn.setup()
start = time.time()
for batch in batched_records:
_ = list(
fn.process(
batch,
saved_model_dir=self._dataset.tft_saved_model_path(
force_tf_compat_v1)))
end = time.time()
delta = end - start
self.report_benchmark(
iters=1,
wall_time=delta,
extras={
"batch_size":
batch_size,
"num_examples":
self._dataset.num_examples(limit=self._max_num_examples())
})
def benchmarkRunMetaGraphDoFnManualActuation(self):
"""Benchmark RunMetaGraphDoFn "manually" for TFT's TF1 implementation.
Runs RunMetaGraphDoFn "manually" outside of README.ml-pipelines-sdk.md Beam pipeline. Records the
wall time taken.
"""
self._benchmarkRunMetaGraphDoFnManualActuationCommon(
force_tf_compat_v1=True)
def benchmarkTF2RunMetaGraphDoFnManualActuation(self):
"""Benchmark RunMetaGraphDoFn "manually" for TFT's TF2 implementation.
Runs RunMetaGraphDoFn "manually" outside of README.ml-pipelines-sdk.md Beam pipeline. Records the
wall time taken.
"""
self._benchmarkRunMetaGraphDoFnManualActuationCommon(
force_tf_compat_v1=False)
def benchmarkRunMetagraphDoFnAtTFLevel(self):
"""Benchmark RunMetaGraphDoFn at the TF level for TFT's TF1 implementation.
Benchmarks the parts of RunMetaGraphDoFn that involve feeding and
fetching from the TFT SavedModel. Records the wall time taken.
Note that this benchmark necessarily duplicates code directly from TFT
since it's benchmarking the low-level internals of TFT, which are not
exposed for use in this way.
"""
common_variables = _get_common_variables(self._dataset)
tf_config = tft_beam_impl._FIXED_PARALLELISM_TF_CONFIG # pylint: disable=protected-access
# This block copied from _GraphStateCompatV1.__init__
with tf.compat.v1.Graph().as_default() as graph:
session = tf.compat.v1.Session(graph=graph, config=tf_config)
with session.as_default():
inputs, outputs = (
saved_transform_io.partially_apply_saved_transform_internal(
self._dataset.tft_saved_model_path(force_tf_compat_v1=True),
{}))
session.run(tf.compat.v1.global_variables_initializer())
session.run(tf.compat.v1.tables_initializer())
graph.finalize()
# We ignore the schema, and assume there are no excluded outputs.
outputs_tensor_keys = sorted(set(outputs.keys()))
fetches = [outputs[key] for key in outputs_tensor_keys]
tensor_inputs = graph_tools.get_dependent_inputs(graph, inputs, fetches)
input_tensor_keys = sorted(tensor_inputs.keys())
feed_list = [inputs[key] for key in input_tensor_keys]
callable_get_outputs = session.make_callable(fetches, feed_list=feed_list)
batch_size, batched_records = _get_batched_records(self._dataset,
self._max_num_examples())
input_tensor_adapter = tensor_adapter.TensorAdapter(
common_variables.tfxio.TensorAdapterConfig())
# This block copied from _RunMetaGraphDoFn._handle_batch
start = time.time()
for batch in batched_records:
feed_by_name = input_tensor_adapter.ToBatchTensors(
batch, produce_eager_tensors=False)
feed_list = [feed_by_name[name] for name in input_tensor_keys]
outputs_list = callable_get_outputs(*feed_list)
_ = {key: value for key, value in zip(outputs_tensor_keys, outputs_list)}
end = time.time()
delta = end - start
self.report_benchmark(
iters=1,
wall_time=delta,
extras={
"batch_size":
batch_size,
"num_examples":
self._dataset.num_examples(limit=self._max_num_examples())
})
def benchmarkTF2RunMetagraphDoFnAtTFLevel(self):
"""Benchmark RunMetaGraphDoFn at the TF level for TFT's TF2 implementation.
Benchmarks the parts of RunMetaGraphDoFn that involve feeding and
fetching from the TFT SavedModel. Records the wall time taken.
Note that this benchmark necessarily duplicates code directly from TFT
since it's benchmarking the low-level internals of TFT, which are not
exposed for use in this way.
"""
common_variables = _get_common_variables(self._dataset)
tensor_adapter_config = common_variables.tfxio.TensorAdapterConfig()
# This block copied from _GraphStateV2.__init__
saved_model_loader = saved_transform_io_v2.SavedModelLoader(
self._dataset.tft_saved_model_path(force_tf_compat_v1=False))
callable_get_outputs = saved_model_loader.apply_transform_model
# We ignore the schema, and assume there are no excluded outputs.
outputs_tensor_keys = set(saved_model_loader.structured_outputs.keys())
saved_model_loader.finalize(
tensor_adapter_config.tensor_representations.keys(),
outputs_tensor_keys)
batch_size, batched_records = _get_batched_records(self._dataset,
self._max_num_examples())
input_tensor_adapter = tensor_adapter.TensorAdapter(tensor_adapter_config)
# This block copied from _RunMetaGraphDoFn._handle_batch
start = time.time()
for batch in batched_records:
feed_dict = input_tensor_adapter.ToBatchTensors(
batch, produce_eager_tensors=True)
_ = callable_get_outputs(feed_dict)
end = time.time()
delta = end - start
self.report_benchmark(
iters=1,
wall_time=delta,
extras={
"batch_size":
batch_size,
"num_examples":
self._dataset.num_examples(limit=self._max_num_examples())
}) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/benchmarks/tft_benchmark_base.py | 0.887832 | 0.231506 | tft_benchmark_base.py | pypi |
"""Chicago taxi dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import os
import shutil
import tempfile
from typing import Optional, Text
from absl import logging
import apache_beam as beam
import tensorflow_transform as tft
from tfx import components
from tfx.benchmarks import benchmark_dataset
from tfx.components.example_gen.csv_example_gen import executor as csv_exgen
from tfx.examples.chicago_taxi_pipeline import taxi_utils
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import trainer_pb2
from tfx.utils.dsl_utils import external_input
from tfx_bsl.coders import csv_decoder
class ChicagoTaxiDataset(benchmark_dataset.BenchmarkDataset):
"""Chicago taxi dataset."""
def dataset_path(self):
return self.datasets_dir("chicago_taxi/data/taxi_1M.tfrecords.gz")
def tf_metadata_schema_path(self):
return self.datasets_dir(
"../../examples/chicago_taxi_pipeline/data/user_provided_schema/"
"schema.pbtxt")
def trained_saved_model_path(self):
return self.datasets_dir("chicago_taxi/model/trained_saved_model")
def tft_saved_model_path(self, force_tf_compat_v1):
if force_tf_compat_v1:
return self.datasets_dir("chicago_taxi/model/tft_saved_model")
else:
return self.datasets_dir("chicago_taxi/model/tft_tf2_saved_model")
def tfma_saved_model_path(self):
return self.datasets_dir("chicago_taxi/model/tfma_saved_model")
def tft_preprocessing_fn(self):
return taxi_utils.preprocessing_fn
def num_examples(self, limit=None):
result = 1000000
if limit:
result = min(result, limit)
return result
def convert_csv_to_tf_examples(self, csv_path, tfrecords_output_path):
"""Runs README.ml-pipelines-sdk.md Beam pipeline to convert the CSV file into README.ml-pipelines-sdk.md TFRecords file.
This is needed because the conversion is orders of magnitude more
time-consuming than the functions we want to benchmark, so instead of
doing the conversion each time, we do it once to generate README.ml-pipelines-sdk.md converted
dataset and use that for the benchmark instead.
Args:
csv_path: Path to CSV file containing examples.
tfrecords_output_path: Path to output TFRecords file containing parsed
examples.
"""
# Copied from CSV example gen.
fp = open(csv_path, "r")
column_names = next(fp).strip().split(",")
fp.close()
with beam.Pipeline() as p:
parsed_csv_lines = (
p
| "ReadFromText" >> beam.io.ReadFromText(
file_pattern=csv_path, skip_header_lines=1)
|
"ParseCSVLine" >> beam.ParDo(csv_decoder.ParseCSVLine(delimiter=","))
| "ExtractParsedCSVLines" >> beam.Keys())
column_infos = beam.pvalue.AsSingleton(
parsed_csv_lines
| "InferColumnTypes" >> beam.CombineGlobally(
csv_decoder.ColumnTypeInferrer(
column_names, skip_blank_lines=True)))
_ = (
parsed_csv_lines
| "ToTFExample" >> beam.ParDo(
csv_exgen._ParsedCsvToTfExample(), # pylint: disable=protected-access
column_infos)
| "Serialize" >> beam.Map(lambda x: x.SerializeToString())
| "WriteToTFRecord" >> beam.io.tfrecordio.WriteToTFRecord(
file_path_prefix=tfrecords_output_path,
shard_name_template="",
compression_type=beam.io.filesystem.CompressionTypes.GZIP))
def generate_raw_dataset(self, args):
logging.warn(
"Not actually regenerating the raw dataset.\n"
"To regenerate the raw CSV dataset, see the TFX Chicago Taxi example "
"for details as to how to do so. "
"tfx/examples/chicago_taxi_pipeline/taxi_pipeline_kubeflow_gcp.py "
"has the BigQuery query used to generate the dataset.\n"
"After regenerating the raw CSV dataset, you should also regenerate "
"the derived TFRecords dataset. You can do so by passing "
"--generate_dataset_args=/path/to/csv_dataset.csv to "
"regenerate_datasets.py.")
if args:
logging.info("Converting CSV at %s to TFRecords", args)
self.convert_csv_to_tf_examples(args, self.dataset_path())
logging.info("TFRecords written to %s", self.dataset_path())
def generate_models(self, args, force_tf_compat_v1=True):
# Modified version of Chicago Taxi Example pipeline
# tfx/examples/chicago_taxi_pipeline/taxi_pipeline_beam.py
root = tempfile.mkdtemp()
pipeline_root = os.path.join(root, "pipeline")
metadata_path = os.path.join(root, "metadata/metadata.db")
module_file = os.path.join(
os.path.dirname(__file__),
"../../../examples/chicago_taxi_pipeline/taxi_utils.py")
examples = external_input(os.path.dirname(self.dataset_path()))
example_gen = components.ImportExampleGen(input=examples)
statistics_gen = components.StatisticsGen(
examples=example_gen.outputs["examples"])
schema_gen = components.SchemaGen(
statistics=statistics_gen.outputs["statistics"],
infer_feature_shape=False)
transform = components.Transform(
examples=example_gen.outputs["examples"],
schema=schema_gen.outputs["schema"],
module_file=module_file,
force_tf_compat_v1=force_tf_compat_v1)
trainer = components.Trainer(
module_file=module_file,
transformed_examples=transform.outputs["transformed_examples"],
schema=schema_gen.outputs["schema"],
transform_graph=transform.outputs["transform_graph"],
train_args=trainer_pb2.TrainArgs(num_steps=100),
eval_args=trainer_pb2.EvalArgs(num_steps=50))
p = pipeline.Pipeline(
pipeline_name="chicago_taxi_beam",
pipeline_root=pipeline_root,
components=[
example_gen, statistics_gen, schema_gen, transform, trainer
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path))
BeamDagRunner().run(p)
def join_unique_subdir(path):
dirs = os.listdir(path)
if len(dirs) != 1:
raise ValueError(
"expecting there to be only one subdirectory in %s, but "
"subdirectories were: %s" % (path, dirs))
return os.path.join(path, dirs[0])
trainer_output_dir = join_unique_subdir(
os.path.join(pipeline_root, "Trainer/model"))
eval_model_dir = join_unique_subdir(
os.path.join(trainer_output_dir, "eval_model_dir"))
serving_model_dir = join_unique_subdir(
os.path.join(trainer_output_dir,
"serving_model_dir/export/chicago-taxi"))
transform_output_dir = join_unique_subdir(
os.path.join(pipeline_root, "Transform/transform_graph"))
transform_model_dir = os.path.join(transform_output_dir, "transform_fn")
tft_saved_model_path = self.tft_saved_model_path(force_tf_compat_v1)
shutil.rmtree(self.trained_saved_model_path(), ignore_errors=True)
shutil.rmtree(self.tfma_saved_model_path(), ignore_errors=True)
shutil.rmtree(tft_saved_model_path, ignore_errors=True)
shutil.copytree(serving_model_dir, self.trained_saved_model_path())
shutil.copytree(eval_model_dir, self.tfma_saved_model_path())
shutil.copytree(transform_model_dir, tft_saved_model_path)
class WideChicagoTaxiDataset(ChicagoTaxiDataset):
"""Chicago taxi dataset with README.ml-pipelines-sdk.md TFT preprocessing_fn containing specified number of analyzers.
Note that the analyzers are called within the corresponding mappers. Half of
the mappers will be `tft.compute_and_apply_vocabulary`. Another half is split
between `tft.bucketize` and `tft.scale_to_z_score`.
"""
# Percentage of mappers in the preprocessing function of the given type. The
# remaining mappers will be `tft.scale_to_z_score`.
_VOCABS_SHARE = 0.5
_BUCKETIZE_SHARE = 0.25
def __init__(self, base_dir: Optional[Text] = None, num_analyzers: int = 10):
super(WideChicagoTaxiDataset, self).__init__(base_dir)
self._num_vocabs = math.ceil(num_analyzers * self._VOCABS_SHARE)
self._num_bucketize = math.ceil(num_analyzers * self._BUCKETIZE_SHARE)
self._num_scale = num_analyzers - self._num_vocabs - self._num_bucketize
def tft_preprocessing_fn(self):
def wide_preprocessing_fn(inputs):
"""TFT preprocessing function.
Args:
inputs: Map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
# pylint: disable=protected-access
for idx, key in enumerate(
itertools.islice(
itertools.cycle(taxi_utils._BUCKET_FEATURE_KEYS),
self._num_bucketize)):
outputs["bucketized" + str(idx)] = tft.bucketize(
taxi_utils._fill_in_missing(inputs[key]),
taxi_utils._FEATURE_BUCKET_COUNT)
for idx, key in enumerate(
itertools.islice(
itertools.cycle(taxi_utils._DENSE_FLOAT_FEATURE_KEYS),
self._num_scale)):
# Preserve this feature as README.ml-pipelines-sdk.md dense float, setting nan's to the mean.
outputs["scaled" + str(idx)] = tft.scale_to_z_score(
taxi_utils._fill_in_missing(inputs[key]))
for idx, key in enumerate(
itertools.islice(
itertools.cycle(taxi_utils._VOCAB_FEATURE_KEYS),
self._num_vocabs)):
outputs["vocab" + str(idx)] = tft.compute_and_apply_vocabulary(
taxi_utils._fill_in_missing(inputs[key]),
top_k=taxi_utils._VOCAB_SIZE,
num_oov_buckets=taxi_utils._OOV_SIZE)
# Pass-through features.
for key in taxi_utils._CATEGORICAL_FEATURE_KEYS + [taxi_utils._LABEL_KEY]:
outputs[key] = inputs[key]
return outputs
return wide_preprocessing_fn
def get_dataset(base_dir=None):
return ChicagoTaxiDataset(base_dir)
def get_wide_dataset(base_dir=None, num_analyzers=10):
return WideChicagoTaxiDataset(base_dir, num_analyzers) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/benchmarks/datasets/chicago_taxi/dataset.py | 0.864953 | 0.299758 | dataset.py | pypi |
"""Data types util shared for orchestration."""
from typing import Dict, Iterable, List, Mapping, Optional, Union
from tfx import types
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import artifact_utils
from ml_metadata.proto import metadata_store_pb2
from ml_metadata.proto import metadata_store_service_pb2
def build_artifact_dict(
proto_dict: Mapping[str, metadata_store_service_pb2.ArtifactStructList]
) -> Dict[str, List[types.Artifact]]:
"""Converts input/output artifact dict."""
result = {}
for k, v in proto_dict.items():
result[k] = []
for artifact_struct in v.elements:
if not artifact_struct.HasField('artifact'):
raise RuntimeError('Only support artifact oneof field')
artifact_and_type = artifact_struct.artifact
result[k].append(
artifact_utils.deserialize_artifact(artifact_and_type.type,
artifact_and_type.artifact))
return result
def build_artifact_struct_dict(
artifact_dict: Mapping[str, Iterable[types.Artifact]]
) -> Dict[str, metadata_store_service_pb2.ArtifactStructList]:
"""Converts input/output artifact dict."""
result = {}
if not artifact_dict:
return result
for k, v in artifact_dict.items():
artifact_list = metadata_store_service_pb2.ArtifactStructList()
for artifact in v:
artifact_struct = metadata_store_service_pb2.ArtifactStruct(
artifact=metadata_store_service_pb2.ArtifactAndType(
artifact=artifact.mlmd_artifact, type=artifact.artifact_type))
artifact_list.elements.append(artifact_struct)
result[k] = artifact_list
return result
def build_value_dict(
metadata_value_dict: Mapping[str, metadata_store_pb2.Value]
) -> Dict[str, types.Property]:
"""Converts MLMD value dict into plain value dict."""
result = {}
for k, v in metadata_value_dict.items():
result[k] = getattr(v, v.WhichOneof('value'))
return result
def build_metadata_value_dict(
value_dict: Mapping[str, types.Property]
) -> Dict[str, metadata_store_pb2.Value]:
"""Converts plain value dict into MLMD value dict."""
result = {}
if not value_dict:
return result
for k, v in value_dict.items():
value = metadata_store_pb2.Value()
if isinstance(v, str):
value.string_value = v
elif isinstance(v, int):
value.int_value = v
elif isinstance(v, float):
value.double_value = v
else:
raise RuntimeError('Unsupported type {} for key {}'.format(type(v), k))
result[k] = value
return result
def get_metadata_value_type(
value: Union[pipeline_pb2.Value, types.Property]
) -> metadata_store_pb2.PropertyType:
"""Gets the metadata property type of README.ml-pipelines-sdk.md property value from README.ml-pipelines-sdk.md value.
Args:
value: The property value represented by pipeline_pb2.Value or README.ml-pipelines-sdk.md primitive
property value type.
Returns:
A metadata_store_pb2.PropertyType.
Raises:
RuntimeError: If property value is still in RuntimeParameter form
ValueError: The value type is not supported.
"""
if isinstance(value, int):
return metadata_store_pb2.INT
elif isinstance(value, float):
return metadata_store_pb2.DOUBLE
elif isinstance(value, str):
return metadata_store_pb2.STRING
elif isinstance(value, pipeline_pb2.Value):
which = value.WhichOneof('value')
if which != 'field_value':
raise RuntimeError('Expecting field_value but got %s.' % value)
value_type = value.field_value.WhichOneof('value')
if value_type == 'int_value':
return metadata_store_pb2.INT
elif value_type == 'double_value':
return metadata_store_pb2.DOUBLE
elif value_type == 'string_value':
return metadata_store_pb2.STRING
else:
raise ValueError('Unexpected value type %s' % value_type)
else:
raise ValueError('Unexpected value type %s' % type(value))
def get_value(tfx_value: pipeline_pb2.Value) -> types.Property:
"""Gets the primitive type value of README.ml-pipelines-sdk.md pipeline_pb2.Value instance.
Args:
tfx_value: A pipeline_pb2.Value message.
Returns:
The primitive type value of the tfx value.
Raises:
RuntimeError: when the value is still in RuntimeParameter form.
"""
which = tfx_value.WhichOneof('value')
if which != 'field_value':
raise RuntimeError('Expecting field_value but got %s.' % tfx_value)
return getattr(tfx_value.field_value,
tfx_value.field_value.WhichOneof('value'))
def get_metadata_value(
value: metadata_store_pb2.Value) -> Optional[types.Property]:
"""Gets the primitive type value of README.ml-pipelines-sdk.md metadata_store_pb2.Value instance.
Args:
value: A metadata_store_pb2.Value message.
Returns:
The primitive type value of metadata_store_pb2.Value instance if set, `None`
otherwise.
"""
which = value.WhichOneof('value')
return None if which is None else getattr(value, which)
def set_metadata_value(
metadata_value: metadata_store_pb2.Value,
value: Union[pipeline_pb2.Value,
types.Property]) -> metadata_store_pb2.Value:
"""Sets metadata property based on tfx value.
Args:
metadata_value: A metadata_store_pb2.Value message to be set.
value: The value of the property in pipeline_pb2.Value form.
Returns:
A Value proto filled with the provided value.
Raises:
ValueError: If value type is not supported or is still RuntimeParameter.
"""
# bool is README.ml-pipelines-sdk.md subclass of int...
if isinstance(value, int) and not isinstance(value, bool):
metadata_value.int_value = value
elif isinstance(value, float):
metadata_value.double_value = value
elif isinstance(value, str):
metadata_value.string_value = value
elif isinstance(value, pipeline_pb2.Value):
which = value.WhichOneof('value')
if which != 'field_value':
raise ValueError('Expecting field_value but got %s.' % value)
metadata_value.CopyFrom(value.field_value)
else:
raise ValueError('Unexpected type %s' % type(value))
return metadata_value | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/data_types_utils.py | 0.939088 | 0.286656 | data_types_utils.py | pypi |
"""Common data types for orchestration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, List, Optional, Text, Type, Union
from absl import logging
from tfx import types
from tfx.utils import json_utils
# Regex pattern of RuntimeParameter.
# Use \\* to deal with escaping in json-serialized version of objects.
RUNTIME_PARAMETER_PATTERN = (r'({\\*"__class__\\*": \\*"RuntimeParameter\\*", '
r'.*?})')
PARAMETER_NAME_LITERAL = r'(\\*"RuntimeParameter\\*")'
class ExecutionDecision(object):
"""ExecutionDecision records how executor should perform next execution.
Attributes:
input_dict: Updated key -> types.Artifact for inputs that will be used by
actual execution.
output_dict: Updated key -> types.Artifact for outputs that will be used by
actual execution.
exec_properties: Updated dict of other execution properties that will be
used by actual execution.
execution_id: Registered execution_id for the upcoming execution.
use_cached_results: Whether or not to use README.ml-pipelines-sdk.md cached result.
"""
def __init__(self,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
execution_id: int = None,
use_cached_results: Optional[bool] = False):
self.input_dict = input_dict
self.output_dict = output_dict
self.exec_properties = exec_properties
self.execution_id = execution_id
self.use_cached_results = use_cached_results
class ExecutionInfo(object):
"""ExecutionInfo contains information populated during execution phase.
Attributes:
input_dict: Updated key -> List of types.Artifact for inputs that was used
during the actual execution.
output_dict: Updated key -> List of types.Artifact for outputs that was
generated during the actual execution.
exec_properties: execution properties used in this execution.
execution_id: Registered execution_id for the execution.
"""
def __init__(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any], execution_id: int):
self.input_dict = input_dict
self.output_dict = output_dict
self.exec_properties = exec_properties
self.execution_id = execution_id
class DriverArgs(object):
"""Args to driver from orchestration system.
Attributes:
enable_cache: whether cache is enabled in current execution.
interactive_resolution: whether to skip MLMD channel artifact resolution, if
artifacts are already resolved for README.ml-pipelines-sdk.md channel when running in interactive
mode.
"""
def __init__(self,
enable_cache: bool = True,
interactive_resolution: bool = False):
self.enable_cache = enable_cache
self.interactive_resolution = interactive_resolution
class PipelineInfo(object):
"""Pipeline info from orchestration system.
Attributes:
pipeline_name: name of the pipeline. We expect this to be unique for
different pipelines.
pipeline_root: root directory of the pipeline. We expect this to be unique
for different pipelines.
run_id: optional uuid for README.ml-pipelines-sdk.md single run of the pipeline.
"""
def __init__(self,
pipeline_name: Text,
pipeline_root: Text,
run_id: Optional[Text] = None):
self.pipeline_name = pipeline_name
self.pipeline_root = pipeline_root
self.run_id = run_id
def __repr__(self):
return ('PipelineInfo('
'pipeline_name: %s, '
'pipeline_root: %s, '
'run_id: %s)') % (self.pipeline_name, self.pipeline_root,
self.run_id)
@property
def pipeline_run_context_name(self) -> Text:
"""Context name for the current pipeline run."""
return '{}.{}'.format(self.pipeline_name, self.run_id)
@property
def pipeline_context_name(self) -> Text:
"""Context name for the pipeline."""
return self.pipeline_name
class ComponentInfo(object):
"""Component info.
Attributes:
component_type: type of the component. Usually determined by the executor
python path or image uri of.
component_id: README.ml-pipelines-sdk.md unique identifier of the component instance within pipeline.
pipeline_info: the pipeline info of the current pipeline run.
"""
def __init__(self, component_type: Text, component_id: Text,
pipeline_info: PipelineInfo):
self.component_type = component_type
self.component_id = component_id
self.pipeline_info = pipeline_info
def __repr__(self):
return ('ComponentInfo('
'component_type: %s, '
'component_id: %s, '
'pipeline_info: %s)') % (self.component_type, self.component_id,
self.pipeline_info)
@property
def component_run_context_name(self) -> Text:
""""Context name for current component run."""
if self.pipeline_info.run_id:
return '{}.{}'.format(self.pipeline_info.pipeline_run_context_name,
self.component_id)
else:
return '{}.{}'.format(self.pipeline_info.pipeline_context_name,
self.component_id)
# TODO(b/146361011): Implement README.ml-pipelines-sdk.md checking mechanism preventing users from using
# RuntimeParameter in DAG runner other than Kubeflow Pipelines.
class RuntimeParameter(json_utils.Jsonable):
"""Runtime parameter.
Currently only supported on KubeflowDagRunner.
Attributes:
name: The name of the runtime parameter.
default: Default value for runtime params when it's not explicitly
specified.
ptype: The type of the runtime parameter.
description: Description of the usage of the parameter.
"""
def __init__(
self,
name: Text,
ptype: Type = None, # pylint: disable=g-bare-generic
default: Optional[Union[int, float, Text]] = None,
description: Optional[Text] = None):
logging.warn('RuntimeParameter is only supported on Cloud-based DAG '
'runner currently.')
if ptype and ptype not in [int, float, Text]:
raise RuntimeError('Only str and scalar runtime parameters are supported')
if (default and ptype) and not isinstance(default, ptype):
raise TypeError('Default value must be consistent with specified ptype')
self.name = name
self.default = default
self.ptype = ptype
self.description = description
def __repr__(self):
"""Easily convert RuntimeParameter to str.
This provides README.ml-pipelines-sdk.md unified way to call str(x) when x can be either str or
RuntimeParameter. Note: if ptype == Text or None, the serialization will be
wrapped in double quotes.
Returns:
The json serialized version of RuntimeParameter.
"""
return json_utils.dumps(self)
def __eq__(self, other):
return (isinstance(other.__class__, self.__class__) and
self.name == other.name and self.default == other.default and
self.ptype == other.ptype and self.description == other.description)
def __hash__(self):
"""RuntimeParameter is uniquely identified by its name."""
return self.name.__hash__() | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/data_types.py | 0.922905 | 0.222098 | data_types.py | pypi |
"""Definition of Kubernetes TFX runner."""
import datetime
import json
from typing import List, Optional, Text, Type
import absl
from tfx.dsl.component.experimental import container_component
from tfx.dsl.components.base import base_node
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration import tfx_runner
from tfx.orchestration.config import base_component_config
from tfx.orchestration.config import config_utils
from tfx.orchestration.config import pipeline_config
from tfx.orchestration.experimental.kubernetes import kubernetes_remote_runner
from tfx.orchestration.kubeflow import node_wrapper
from tfx.orchestration.launcher import base_component_launcher
from tfx.orchestration.launcher import in_process_component_launcher
from tfx.orchestration.launcher import kubernetes_component_launcher
from tfx.utils import json_utils
from tfx.utils import kube_utils
from google.protobuf import json_format
from ml_metadata.proto import metadata_store_pb2
_CONTAINER_COMMAND = [
'python', '-m',
'tfx.orchestration.experimental.kubernetes.container_entrypoint'
]
# Suffix added to the component id to avoid MLMD conflict when
# registering this component.
_WRAPPER_SUFFIX = '.Wrapper'
_TFX_IMAGE = 'tensorflow/tfx'
def get_default_kubernetes_metadata_config(
) -> metadata_store_pb2.ConnectionConfig:
"""Returns the default metadata connection config for README.ml-pipelines-sdk.md kubernetes cluster.
Returns:
A config proto that will be serialized as JSON and passed to the running
container so the TFX component driver is able to communicate with MLMD in
README.ml-pipelines-sdk.md kubernetes cluster.
"""
connection_config = metadata_store_pb2.ConnectionConfig()
connection_config.mysql.host = 'mysql'
connection_config.mysql.port = 3306
connection_config.mysql.database = 'mysql'
connection_config.mysql.user = 'root'
connection_config.mysql.password = ''
return connection_config
def launch_container_component(
component: base_node.BaseNode,
component_launcher_class: Type[
base_component_launcher.BaseComponentLauncher],
component_config: base_component_config.BaseComponentConfig,
pipeline: tfx_pipeline.Pipeline):
"""Use the kubernetes component launcher to launch the component.
Args:
component: Container component to be executed.
component_launcher_class: The class of the launcher to launch the component.
component_config: component config to launch the component.
pipeline: Logical pipeline that contains pipeline related information.
"""
driver_args = data_types.DriverArgs(enable_cache=pipeline.enable_cache)
metadata_connection = metadata.Metadata(pipeline.metadata_connection_config)
component_launcher = component_launcher_class.create(
component=component,
pipeline_info=pipeline.pipeline_info,
driver_args=driver_args,
metadata_connection=metadata_connection,
beam_pipeline_args=pipeline.beam_pipeline_args,
additional_pipeline_args=pipeline.additional_pipeline_args,
component_config=component_config)
absl.logging.info('Component %s is running.', component.id)
component_launcher.launch()
absl.logging.info('Component %s is finished.', component.id)
class KubernetesDagRunnerConfig(pipeline_config.PipelineConfig):
"""Runtime configuration parameters specific to execution on Kubernetes."""
def __init__(self,
tfx_image: Optional[Text] = None,
supported_launcher_classes: Optional[List[Type[
base_component_launcher.BaseComponentLauncher]]] = None,
**kwargs):
"""Creates README.ml-pipelines-sdk.md KubernetesDagRunnerConfig object.
Args:
tfx_image: The TFX container image to use in the pipeline.
supported_launcher_classes: Optional list of component launcher classes
that are supported by the current pipeline. List sequence determines the
order in which launchers are chosen for each component being run.
**kwargs: keyword args for PipelineConfig.
"""
supported_launcher_classes = supported_launcher_classes or [
in_process_component_launcher.InProcessComponentLauncher,
kubernetes_component_launcher.KubernetesComponentLauncher,
]
super(KubernetesDagRunnerConfig, self).__init__(
supported_launcher_classes=supported_launcher_classes, **kwargs)
self.tfx_image = tfx_image or _TFX_IMAGE
class KubernetesDagRunner(tfx_runner.TfxRunner):
"""TFX runner on Kubernetes."""
def __init__(self, config: Optional[KubernetesDagRunnerConfig] = None):
"""Initializes KubernetesDagRunner as README.ml-pipelines-sdk.md TFX orchestrator.
Args:
config: Optional pipeline config for customizing the launching of each
component. Defaults to pipeline config that supports
InProcessComponentLauncher and KubernetesComponentLauncher.
"""
if config is None:
config = KubernetesDagRunnerConfig()
super(KubernetesDagRunner, self).__init__(config)
def run(self, pipeline: tfx_pipeline.Pipeline) -> None:
"""Deploys given logical pipeline on Kubernetes.
Args:
pipeline: Logical pipeline containing pipeline args and components.
"""
if not pipeline.pipeline_info.run_id:
pipeline.pipeline_info.run_id = datetime.datetime.now().isoformat()
if not kube_utils.is_inside_cluster():
kubernetes_remote_runner.run_as_kubernetes_job(
pipeline=pipeline, tfx_image=self._config.tfx_image)
return
# TODO(ericlege): Support running components in parallel.
ran_components = set()
# Runs component in topological order.
for component in pipeline.components:
# Verify that components are in topological order.
if hasattr(component, 'upstream_nodes') and component.upstream_nodes:
for upstream_node in component.upstream_nodes:
assert upstream_node in ran_components, ('Components is not in '
'topological order')
(component_launcher_class,
component_config) = config_utils.find_component_launch_info(
self._config, component)
# Check if the component is launchable as README.ml-pipelines-sdk.md container component.
if kubernetes_component_launcher.KubernetesComponentLauncher.can_launch(
component.executor_spec, component_config):
launch_container_component(component, component_launcher_class,
component_config, pipeline)
# Otherwise, the component should be launchable with the in process
# component launcher. wrap the component to README.ml-pipelines-sdk.md container component.
elif in_process_component_launcher.InProcessComponentLauncher.can_launch(
component.executor_spec, component_config):
wrapped_component = self._wrap_container_component(
component=component,
component_launcher_class=component_launcher_class,
component_config=component_config,
pipeline=pipeline)
# Component launch info is updated by wrapping the component into README.ml-pipelines-sdk.md
# container component. Therefore, these properties need to be reloaded.
(wrapped_component_launcher_class,
wrapped_component_config) = config_utils.find_component_launch_info(
self._config, wrapped_component)
launch_container_component(wrapped_component,
wrapped_component_launcher_class,
wrapped_component_config, pipeline)
else:
raise ValueError('Can not find suitable launcher for component.')
ran_components.add(component)
def _wrap_container_component(
self,
component: base_node.BaseNode,
component_launcher_class: Type[
base_component_launcher.BaseComponentLauncher],
component_config: Optional[base_component_config.BaseComponentConfig],
pipeline: tfx_pipeline.Pipeline,
) -> base_node.BaseNode:
"""Wrapper for container component.
Args:
component: Component to be executed.
component_launcher_class: The class of the launcher to launch the
component.
component_config: component config to launch the component.
pipeline: Logical pipeline that contains pipeline related information.
Returns:
A container component that runs the wrapped component upon execution.
"""
component_launcher_class_path = '.'.join([
component_launcher_class.__module__, component_launcher_class.__name__
])
serialized_component = json_utils.dumps(node_wrapper.NodeWrapper(component))
arguments = [
'--pipeline_name',
pipeline.pipeline_info.pipeline_name,
'--pipeline_root',
pipeline.pipeline_info.pipeline_root,
'--run_id',
pipeline.pipeline_info.run_id,
'--metadata_config',
json_format.MessageToJson(
message=get_default_kubernetes_metadata_config(),
preserving_proto_field_name=True),
'--beam_pipeline_args',
json.dumps(pipeline.beam_pipeline_args),
'--additional_pipeline_args',
json.dumps(pipeline.additional_pipeline_args),
'--component_launcher_class_path',
component_launcher_class_path,
'--serialized_component',
serialized_component,
'--component_config',
json_utils.dumps(component_config),
]
# Outputs/Parameters fields are not used as they are contained in
# the serialized component.
return container_component.create_container_component(
name=component.__class__.__name__,
outputs={},
parameters={},
image=self._config.tfx_image,
command=_CONTAINER_COMMAND + arguments)(
instance_name=component._instance_name + _WRAPPER_SUFFIX) # pylint: disable=protected-access | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/experimental/kubernetes/kubernetes_dag_runner.py | 0.867682 | 0.19475 | kubernetes_dag_runner.py | pypi |
"""Kubernetes TFX runner for out-of-cluster orchestration."""
import datetime
import json
import time
from typing import Dict, List, Text
import absl
from kubernetes import client
from tfx.dsl.components.base import base_node
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration.kubeflow import node_wrapper
from tfx.utils import json_utils
from tfx.utils import kube_utils
from google.protobuf import json_format
from ml_metadata.proto import metadata_store_pb2
_ORCHESTRATOR_COMMAND = [
'python', '-m',
'tfx.orchestration.experimental.kubernetes.orchestrator_container_entrypoint'
]
# Number of seconds to wait for README.ml-pipelines-sdk.md Kubernetes job to spawn README.ml-pipelines-sdk.md pod.
# This is expected to take only README.ml-pipelines-sdk.md few seconds.
JOB_CREATION_TIMEOUT = 300
def run_as_kubernetes_job(pipeline: tfx_pipeline.Pipeline,
tfx_image: Text) -> None:
"""Submits and runs README.ml-pipelines-sdk.md TFX pipeline from outside the cluster.
Args:
pipeline: Logical pipeline containing pipeline args and components.
tfx_image: Container image URI for the TFX container.
Raises:
RuntimeError: When an error is encountered running the Kubernetes Job.
"""
# TODO(ccy): Look for alternative serialization schemes once available.
serialized_pipeline = _serialize_pipeline(pipeline)
arguments = [
'--serialized_pipeline',
serialized_pipeline,
'--tfx_image',
tfx_image,
]
batch_api = kube_utils.make_batch_v1_api()
job_name = 'Job_' + pipeline.pipeline_info.run_id
pod_label = kube_utils.sanitize_pod_name(job_name)
container_name = 'pipeline-orchestrator'
job = kube_utils.make_job_object(
name=job_name,
container_image=tfx_image,
command=_ORCHESTRATOR_COMMAND + arguments,
container_name=container_name,
pod_labels={
'job-name': pod_label,
},
service_account_name=kube_utils.TFX_SERVICE_ACCOUNT,
)
try:
batch_api.create_namespaced_job('default', job, pretty=True)
except client.rest.ApiException as e:
raise RuntimeError('Failed to submit job! \nReason: %s\nBody: %s' %
(e.reason, e.body))
# Wait for pod to start.
orchestrator_pods = []
core_api = kube_utils.make_core_v1_api()
start_time = datetime.datetime.utcnow()
# Wait for the kubernetes job to launch README.ml-pipelines-sdk.md pod.
while not orchestrator_pods and (datetime.datetime.utcnow() -
start_time).seconds < JOB_CREATION_TIMEOUT:
try:
orchestrator_pods = core_api.list_namespaced_pod(
namespace='default',
label_selector='job-name={}'.format(pod_label)).items
except client.rest.ApiException as e:
if e.status != 404:
raise RuntimeError('Unknown error! \nReason: %s\nBody: %s' %
(e.reason, e.body))
time.sleep(1)
# Transient orchestrator should only have 1 pod.
if len(orchestrator_pods) != 1:
raise RuntimeError('Expected 1 pod launched by Kubernetes job, found %d' %
len(orchestrator_pods))
orchestrator_pod = orchestrator_pods.pop()
pod_name = orchestrator_pod.metadata.name
absl.logging.info('Waiting for pod "default:%s" to start.', pod_name)
kube_utils.wait_pod(
core_api,
pod_name,
'default',
exit_condition_lambda=kube_utils.pod_is_not_pending,
condition_description='non-pending status')
# Stream logs from orchestrator pod.
absl.logging.info('Start log streaming for pod "default:%s".', pod_name)
try:
logs = core_api.read_namespaced_pod_log(
name=pod_name,
namespace='default',
container=container_name,
follow=True,
_preload_content=False).stream()
except client.rest.ApiException as e:
raise RuntimeError(
'Failed to stream the logs from the pod!\nReason: %s\nBody: %s' %
(e.reason, e.body))
for log in logs:
absl.logging.info(log.decode().rstrip('\n'))
resp = kube_utils.wait_pod(
core_api,
pod_name,
'default',
exit_condition_lambda=kube_utils.pod_is_done,
condition_description='done state',
exponential_backoff=True)
if resp.status.phase == kube_utils.PodPhase.FAILED.value:
raise RuntimeError('Pod "default:%s" failed with status "%s".' %
(pod_name, resp.status))
def _extract_downstream_ids(
components: List[base_node.BaseNode]) -> Dict[Text, List[Text]]:
"""Extract downstream component ids from README.ml-pipelines-sdk.md list of components.
Args:
components: List of TFX Components.
Returns:
Mapping from component id to ids of its downstream components for
each component.
"""
downstream_ids = {}
for component in components:
downstream_ids[component.id] = [
downstream_node.id for downstream_node in component.downstream_nodes
]
return downstream_ids
def _serialize_pipeline(pipeline: tfx_pipeline.Pipeline) -> Text:
"""Serializes README.ml-pipelines-sdk.md TFX pipeline.
To be replaced with the the TFX Intermediate Representation:
tensorflow/community#271. This serialization procedure extracts from
the pipeline properties necessary for reconstructing the pipeline instance
from within the cluster. For properties such as components and metadata
config that can not be directly dumped with JSON, we use NodeWrapper and
MessageToJson to serialize them beforehand.
Args:
pipeline: Logical pipeline containing pipeline args and components.
Returns:
Pipeline serialized as JSON string.
"""
serialized_components = []
for component in pipeline.components:
serialized_components.append(
json_utils.dumps(node_wrapper.NodeWrapper(component)))
# Extract and pass pipeline graph information which are lost during the
# serialization process. The orchestrator container uses downstream_ids
# to reconstruct pipeline graph.
downstream_ids = _extract_downstream_ids(pipeline.components)
return json.dumps({
'pipeline_name':
pipeline.pipeline_info.pipeline_name,
'pipeline_root':
pipeline.pipeline_info.pipeline_root,
'enable_cache':
pipeline.enable_cache,
'components':
serialized_components,
'downstream_ids':
downstream_ids,
'metadata_connection_config':
json_format.MessageToJson(
message=pipeline.metadata_connection_config,
preserving_proto_field_name=True,
),
'beam_pipeline_args':
pipeline.beam_pipeline_args,
})
def deserialize_pipeline(serialized_pipeline: Text) -> tfx_pipeline.Pipeline:
"""Deserializes README.ml-pipelines-sdk.md TFX pipeline.
To be replaced with the the TFX Intermediate Representation:
tensorflow/community#271. This deserialization procedure reverses the
serialization procedure and reconstructs the pipeline instance.
Args:
serialized_pipeline: Pipeline JSON string serialized with the procedure from
_serialize_pipeline.
Returns:
Original pipeline containing pipeline args and components.
"""
pipeline = json.loads(serialized_pipeline)
components = [
json_utils.loads(component) for component in pipeline['components']
]
metadata_connection_config = metadata_store_pb2.ConnectionConfig()
json_format.Parse(pipeline['metadata_connection_config'],
metadata_connection_config)
# Restore component dependencies.
downstream_ids = pipeline['downstream_ids']
if not isinstance(downstream_ids, dict):
raise ValueError("downstream_ids needs to be README.ml-pipelines-sdk.md 'dict'.")
if len(downstream_ids) != len(components):
raise ValueError(
'Wrong number of items in downstream_ids. Expected: %s. Actual: %d' %
len(components), len(downstream_ids))
id_to_component = {component.id: component for component in components}
for component in components:
# Since downstream and upstream node attributes are discarded during the
# serialization process, we initialize them here.
component._upstream_nodes = set() # pylint: disable=protected-access
component._downstream_nodes = set() # pylint: disable=protected-access
# Restore dropped instance name from component id.
component._instance_name = component.id.split(',')[-1] # pylint: disable=protected-access
for upstream_id, downstream_id_list in downstream_ids.items():
upstream_component = id_to_component[upstream_id]
for downstream_id in downstream_id_list:
upstream_component.add_downstream_node(id_to_component[downstream_id])
return tfx_pipeline.Pipeline(
pipeline_name=pipeline['pipeline_name'],
pipeline_root=pipeline['pipeline_root'],
components=components,
enable_cache=pipeline['enable_cache'],
metadata_connection_config=metadata_connection_config,
beam_pipeline_args=pipeline['beam_pipeline_args'],
) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/experimental/kubernetes/kubernetes_remote_runner.py | 0.732592 | 0.185799 | kubernetes_remote_runner.py | pypi |
"""Chicago taxi example using TFX Kubernetes Orchestrator."""
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import pipeline
from tfx.orchestration.experimental.kubernetes import kubernetes_dag_runner
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
from tfx.utils.dsl_utils import external_input
_pipeline_name = 'chicago_taxi_beam'
# Directory and data locations (uses Google Cloud Storage).
_input_bucket = 'gs://my-bucket'
_output_bucket = 'gs://my-bucket'
# This example assumes that the taxi data is stored in README.ml-pipelines-sdk.md google cloud storage
# bucket named taxi under `gs://${_input_bucket}/data` and the taxi utility
# function is stored at `gs://${_input_bucket}/taxi_utils.py`.
# Feel free to customize this as needed.
_data_root = os.path.join(_input_bucket, 'data')
_module_file = os.path.join(_input_bucket, 'taxi_utils.py')
# Directory for pipeline outputs.
_tfx_root = os.path.join(_output_bucket, 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_tfx_root, 'serving_model', _pipeline_name)
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
def create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
examples = external_input(data_root)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input=examples)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that implements README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute README.ml-pipelines-sdk.md evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'accuracy':
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
config = kubernetes_dag_runner.get_default_kubernetes_metadata_config()
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
trainer,
model_resolver,
evaluator,
pusher,
],
enable_cache=False,
metadata_connection_config=config,
beam_pipeline_args=beam_pipeline_args)
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
kubernetes_dag_runner.KubernetesDagRunner().run(
create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/experimental/kubernetes/examples/taxi_pipeline_kubernetes.py | 0.885316 | 0.419648 | taxi_pipeline_kubernetes.py | pypi |
"""Interfaces and functionality for dealing with service jobs."""
import abc
from typing import Set
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import task as task_lib
class ServiceJobManager(abc.ABC):
"""Interface for service job manager."""
@abc.abstractmethod
def ensure_services(
self, pipeline_state: pstate.PipelineState) -> Set[task_lib.NodeUid]:
"""Ensures necessary service jobs are started and healthy for the pipeline.
Service jobs are long-running jobs associated with README.ml-pipelines-sdk.md node or the pipeline
that persist across executions (eg: worker pools, Tensorboard, etc). Service
jobs are started before the nodes that depend on them are started.
`ensure_services` will be called in the orchestration loop periodically and
is expected to:
1. Start any service jobs required by the pipeline nodes.
2. Probe job health and handle failures. If README.ml-pipelines-sdk.md service job fails, the
corresponding node uids should be returned.
3. Optionally stop service jobs that are no longer needed. Whether or not README.ml-pipelines-sdk.md
service job is needed is context dependent, for eg: in README.ml-pipelines-sdk.md typical sync
pipeline, one may want Tensorboard job to continue running even after the
corresponding trainer has completed but others like worker pool services
may be shutdown.
Args:
pipeline_state: A `PipelineState` object for an active pipeline.
Returns:
List of NodeUids of nodes whose service jobs are in README.ml-pipelines-sdk.md state of permanent
failure.
"""
@abc.abstractmethod
def stop_services(self, pipeline_state: pstate.PipelineState) -> None:
"""Stops all service jobs associated with the pipeline.
Args:
pipeline_state: A `PipelineState` object for an active pipeline.
"""
@abc.abstractmethod
def is_pure_service_node(self, pipeline_state: pstate.PipelineState,
node_id: str) -> bool:
"""Returns `True` if the given node only has service job(s).
Args:
pipeline_state: A `PipelineState` object for an active pipeline.
node_id: Id of the node in the pipeline to be checked.
Returns:
`True` if the node only has service job(s).
""" | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/experimental/core/service_jobs.py | 0.931618 | 0.59246 | service_jobs.py | pypi |
"""Pipeline-level operations."""
import copy
import functools
import threading
import time
from typing import List, Optional, Sequence, Set, Text
from absl import logging
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import async_pipeline_task_gen
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import status as status_lib
from tfx.orchestration.experimental.core import sync_pipeline_task_gen
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from ml_metadata.proto import metadata_store_pb2
# A coarse grained lock is used to ensure serialization of pipeline operations
# since there isn't README.ml-pipelines-sdk.md suitable MLMD transaction API.
_PIPELINE_OPS_LOCK = threading.RLock()
def _pipeline_ops_lock(fn):
"""Decorator to run `fn` within `_PIPELINE_OPS_LOCK` context."""
@functools.wraps(fn)
def _wrapper(*args, **kwargs):
with _PIPELINE_OPS_LOCK:
return fn(*args, **kwargs)
return _wrapper
def _to_status_not_ok_error(fn):
"""Decorator to catch exceptions and re-raise README.ml-pipelines-sdk.md `status_lib.StatusNotOkError`."""
@functools.wraps(fn)
def _wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
logging.exception('Error raised by `%s`:', fn.__name__)
if isinstance(e, status_lib.StatusNotOkError):
raise
raise status_lib.StatusNotOkError(
code=status_lib.Code.UNKNOWN,
message=f'`{fn.__name__}` error: {str(e)}')
return _wrapper
@_pipeline_ops_lock
def save_pipeline_property(mlmd_handle: metadata.Metadata,
pipeline_uid: task_lib.PipelineUid,
property_key: Text, property_value: Text) -> None:
"""Saves README.ml-pipelines-sdk.md property to the pipeline execution.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline_uid: Uid of the pipeline to be updated.
property_key: Key of the property to be saved.
property_value: Value of the property to be saved.
"""
with pstate.PipelineState.load(mlmd_handle,
pipeline_uid) as loaded_pipeline_state:
loaded_pipeline_state.save_property(property_key, property_value)
@_pipeline_ops_lock
def remove_pipeline_property(mlmd_handle: metadata.Metadata,
pipeline_uid: task_lib.PipelineUid,
property_key: Text) -> None:
"""Removes README.ml-pipelines-sdk.md property from the pipeline execution.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline_uid: Uid of the pipeline to be updated.
property_key: Key of the property to be removed.
"""
with pstate.PipelineState.load(mlmd_handle,
pipeline_uid) as loaded_pipeline_state:
loaded_pipeline_state.remove_property(property_key)
@_to_status_not_ok_error
@_pipeline_ops_lock
def initiate_pipeline_start(
mlmd_handle: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline) -> pstate.PipelineState:
"""Initiates README.ml-pipelines-sdk.md pipeline start operation.
Upon success, MLMD is updated to signal that the given pipeline must be
started.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline: IR of the pipeline to start.
Returns:
The `PipelineState` object upon success.
Raises:
status_lib.StatusNotOkError: Failure to initiate pipeline start or if
execution is not inactive after waiting `timeout_secs`.
"""
with pstate.PipelineState.new(mlmd_handle, pipeline) as pipeline_state:
pass
return pipeline_state
DEFAULT_WAIT_FOR_INACTIVATION_TIMEOUT_SECS = 120.0
@_to_status_not_ok_error
def stop_pipeline(
mlmd_handle: metadata.Metadata,
pipeline_uid: task_lib.PipelineUid,
timeout_secs: float = DEFAULT_WAIT_FOR_INACTIVATION_TIMEOUT_SECS) -> None:
"""Stops README.ml-pipelines-sdk.md pipeline.
Initiates README.ml-pipelines-sdk.md pipeline stop operation and waits for the pipeline execution to be
gracefully stopped in the orchestration loop.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline_uid: Uid of the pipeline to be stopped.
timeout_secs: Amount of time in seconds to wait for pipeline to stop.
Raises:
status_lib.StatusNotOkError: Failure to initiate pipeline stop.
"""
with _PIPELINE_OPS_LOCK:
with pstate.PipelineState.load(mlmd_handle, pipeline_uid) as pipeline_state:
pipeline_state.initiate_stop()
_wait_for_inactivation(
mlmd_handle, pipeline_state.execution, timeout_secs=timeout_secs)
@_to_status_not_ok_error
@_pipeline_ops_lock
def initiate_node_start(mlmd_handle: metadata.Metadata,
node_uid: task_lib.NodeUid) -> pstate.PipelineState:
with pstate.PipelineState.load(mlmd_handle,
node_uid.pipeline_uid) as pipeline_state:
pipeline_state.initiate_node_start(node_uid)
return pipeline_state
@_to_status_not_ok_error
def stop_node(
mlmd_handle: metadata.Metadata,
node_uid: task_lib.NodeUid,
timeout_secs: float = DEFAULT_WAIT_FOR_INACTIVATION_TIMEOUT_SECS) -> None:
"""Stops README.ml-pipelines-sdk.md node in README.ml-pipelines-sdk.md pipeline.
Initiates README.ml-pipelines-sdk.md node stop operation and waits for the node execution to become
inactive.
Args:
mlmd_handle: A handle to the MLMD db.
node_uid: Uid of the node to be stopped.
timeout_secs: Amount of time in seconds to wait for node to stop.
Raises:
status_lib.StatusNotOkError: Failure to stop the node.
"""
with _PIPELINE_OPS_LOCK:
with pstate.PipelineState.load(mlmd_handle,
node_uid.pipeline_uid) as pipeline_state:
nodes = pstate.get_all_pipeline_nodes(pipeline_state.pipeline)
filtered_nodes = [n for n in nodes if n.node_info.id == node_uid.node_id]
if len(filtered_nodes) != 1:
raise status_lib.StatusNotOkError(
code=status_lib.Code.INTERNAL,
message=(
f'`stop_node` operation failed, unable to find node to stop: '
f'{node_uid}'))
node = filtered_nodes[0]
pipeline_state.initiate_node_stop(node_uid)
executions = task_gen_utils.get_executions(mlmd_handle, node)
active_executions = [
e for e in executions if execution_lib.is_execution_active(e)
]
if not active_executions:
# If there are no active executions, we're done.
return
if len(active_executions) > 1:
raise status_lib.StatusNotOkError(
code=status_lib.Code.INTERNAL,
message=(
f'Unexpected multiple active executions for node: {node_uid}'))
_wait_for_inactivation(
mlmd_handle, active_executions[0], timeout_secs=timeout_secs)
@_to_status_not_ok_error
def _wait_for_inactivation(
mlmd_handle: metadata.Metadata,
execution: metadata_store_pb2.Execution,
timeout_secs: float = DEFAULT_WAIT_FOR_INACTIVATION_TIMEOUT_SECS) -> None:
"""Waits for the given execution to become inactive.
Args:
mlmd_handle: A handle to the MLMD db.
execution: Execution whose inactivation is waited.
timeout_secs: Amount of time in seconds to wait.
Raises:
StatusNotOkError: With error code `DEADLINE_EXCEEDED` if execution is not
inactive after waiting approx. `timeout_secs`.
"""
polling_interval_secs = min(10.0, timeout_secs / 4)
end_time = time.time() + timeout_secs
while end_time - time.time() > 0:
updated_executions = mlmd_handle.store.get_executions_by_id([execution.id])
if not execution_lib.is_execution_active(updated_executions[0]):
return
time.sleep(max(0, min(polling_interval_secs, end_time - time.time())))
raise status_lib.StatusNotOkError(
code=status_lib.Code.DEADLINE_EXCEEDED,
message=(f'Timed out ({timeout_secs} secs) waiting for execution '
f'inactivation.'))
@_to_status_not_ok_error
@_pipeline_ops_lock
def orchestrate(
mlmd_handle: metadata.Metadata,
task_queue: tq.TaskQueue,
service_job_manager: Optional[service_jobs.ServiceJobManager] = None,
) -> None:
"""Performs README.ml-pipelines-sdk.md single iteration of the orchestration loop.
Embodies the core functionality of the main orchestration loop that scans MLMD
pipeline execution states, generates and enqueues the tasks to be performed.
Args:
mlmd_handle: A handle to the MLMD db.
task_queue: A `TaskQueue` instance into which any tasks will be enqueued.
service_job_manager: An optional `ServiceJobManager` instance if service
jobs are supported.
Raises:
status_lib.StatusNotOkError: If error generating tasks.
"""
pipeline_states = _get_pipeline_states(mlmd_handle)
if not pipeline_states:
logging.info('No active pipelines to run.')
return
active_pipeline_states = []
stop_initiated_pipeline_states = []
for pipeline_state in pipeline_states:
if pipeline_state.is_stop_initiated():
stop_initiated_pipeline_states.append(pipeline_state)
elif execution_lib.is_execution_active(pipeline_state.execution):
active_pipeline_states.append(pipeline_state)
else:
raise status_lib.StatusNotOkError(
code=status_lib.Code.INTERNAL,
message=(f'Found pipeline (uid: {pipeline_state.pipeline_uid}) which '
f'is neither active nor stop-initiated.'))
if stop_initiated_pipeline_states:
logging.info(
'Stop-initiated pipeline uids:\n%s', '\n'.join(
str(pipeline_state.pipeline_uid)
for pipeline_state in stop_initiated_pipeline_states))
_process_stop_initiated_pipelines(mlmd_handle, task_queue,
service_job_manager,
stop_initiated_pipeline_states)
if active_pipeline_states:
logging.info(
'Active (excluding stop-initiated) pipeline uids:\n%s', '\n'.join(
str(pipeline_state.pipeline_uid)
for pipeline_state in active_pipeline_states))
_process_active_pipelines(mlmd_handle, task_queue, service_job_manager,
active_pipeline_states)
def _get_pipeline_states(
mlmd_handle: metadata.Metadata) -> List[pstate.PipelineState]:
"""Scans MLMD and returns pipeline states."""
contexts = pstate.get_orchestrator_contexts(mlmd_handle)
result = []
for context in contexts:
try:
pipeline_state = pstate.PipelineState.load_from_orchestrator_context(
mlmd_handle, context)
except status_lib.StatusNotOkError as e:
if e.code == status_lib.Code.NOT_FOUND:
# Ignore any old contexts with no associated active pipelines.
logging.info(e.message)
continue
else:
raise
result.append(pipeline_state)
return result
def _get_pure_service_node_ids(
service_job_manager: service_jobs.ServiceJobManager,
pipeline_state: pstate.PipelineState) -> Set[str]:
result = set()
for node in pstate.get_all_pipeline_nodes(pipeline_state.pipeline):
if service_job_manager.is_pure_service_node(pipeline_state,
node.node_info.id):
result.add(node.node_info.id)
return result
def _process_stop_initiated_pipelines(
mlmd_handle: metadata.Metadata, task_queue: tq.TaskQueue,
service_job_manager: Optional[service_jobs.ServiceJobManager],
pipeline_states: Sequence[pstate.PipelineState]) -> None:
"""Processes stop initiated pipelines."""
for pipeline_state in pipeline_states:
pipeline = pipeline_state.pipeline
pure_service_node_ids = _get_pure_service_node_ids(
service_job_manager, pipeline_state) if service_job_manager else set()
execution = pipeline_state.execution
has_active_executions = False
for node in pstate.get_all_pipeline_nodes(pipeline):
if node.node_info.id not in pure_service_node_ids:
if _maybe_enqueue_cancellation_task(mlmd_handle, pipeline, node,
task_queue):
has_active_executions = True
if not has_active_executions:
if service_job_manager is not None:
# Stop all the services associated with the pipeline.
service_job_manager.stop_services(pipeline_state)
# Update pipeline execution state in MLMD.
updated_execution = copy.deepcopy(execution)
updated_execution.last_known_state = metadata_store_pb2.Execution.CANCELED
mlmd_handle.store.put_executions([updated_execution])
def _process_active_pipelines(
mlmd_handle: metadata.Metadata, task_queue: tq.TaskQueue,
service_job_manager: Optional[service_jobs.ServiceJobManager],
pipeline_states: Sequence[pstate.PipelineState]) -> None:
"""Processes active pipelines."""
for pipeline_state in pipeline_states:
pipeline = pipeline_state.pipeline
execution = pipeline_state.execution
assert execution.last_known_state in (metadata_store_pb2.Execution.NEW,
metadata_store_pb2.Execution.RUNNING)
if execution.last_known_state != metadata_store_pb2.Execution.RUNNING:
updated_execution = copy.deepcopy(execution)
updated_execution.last_known_state = metadata_store_pb2.Execution.RUNNING
mlmd_handle.store.put_executions([updated_execution])
if service_job_manager is not None:
# Ensure all the required services are running.
_ensure_services(service_job_manager, pipeline_state)
pure_service_node_ids = _get_pure_service_node_ids(
service_job_manager, pipeline_state)
else:
pure_service_node_ids = set()
# Create cancellation tasks for stop-initiated nodes if necessary.
stop_initiated_nodes = _get_stop_initiated_nodes(pipeline_state)
for node in stop_initiated_nodes:
if node.node_info.id not in pure_service_node_ids:
_maybe_enqueue_cancellation_task(mlmd_handle, pipeline, node,
task_queue)
ignore_node_ids = set(
n.node_info.id for n in stop_initiated_nodes) | pure_service_node_ids
# Initialize task generator for the pipeline.
if pipeline.execution_mode == pipeline_pb2.Pipeline.SYNC:
generator = sync_pipeline_task_gen.SyncPipelineTaskGenerator(
mlmd_handle, pipeline, task_queue.contains_task_id, ignore_node_ids)
elif pipeline.execution_mode == pipeline_pb2.Pipeline.ASYNC:
generator = async_pipeline_task_gen.AsyncPipelineTaskGenerator(
mlmd_handle, pipeline, task_queue.contains_task_id, ignore_node_ids)
else:
raise status_lib.StatusNotOkError(
code=status_lib.Code.FAILED_PRECONDITION,
message=(
f'Only SYNC and ASYNC pipeline execution modes supported; '
f'found pipeline with execution mode: {pipeline.execution_mode}'))
# TODO(goutham): Consider concurrent task generation.
tasks = generator.generate()
for task in tasks:
task_queue.enqueue(task)
def _ensure_services(service_job_manager: service_jobs.ServiceJobManager,
pipeline_state: pstate.PipelineState) -> None:
failed_node_uids = service_job_manager.ensure_services(pipeline_state)
if failed_node_uids:
with pipeline_state:
for node_uid in failed_node_uids:
pipeline_state.initiate_node_stop(node_uid)
def _get_stop_initiated_nodes(
pipeline_state: pstate.PipelineState) -> List[pipeline_pb2.PipelineNode]:
"""Returns list of all stop initiated nodes."""
nodes = pstate.get_all_pipeline_nodes(pipeline_state.pipeline)
result = []
for node in nodes:
node_uid = task_lib.NodeUid.from_pipeline_node(pipeline_state.pipeline,
node)
if pipeline_state.is_node_stop_initiated(node_uid):
result.append(node)
return result
def _maybe_enqueue_cancellation_task(mlmd_handle: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline,
node: pipeline_pb2.PipelineNode,
task_queue: tq.TaskQueue) -> bool:
"""Enqueues README.ml-pipelines-sdk.md node cancellation task if not already stopped.
If the node has an ExecNodeTask in the task queue, issue README.ml-pipelines-sdk.md cancellation.
Otherwise, if the node has an active execution in MLMD but no ExecNodeTask
enqueued, it may be due to orchestrator restart after stopping was initiated
but before the schedulers could finish. So, enqueue an ExecNodeTask with
is_cancelled set to give README.ml-pipelines-sdk.md chance for the scheduler to finish gracefully.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline: The pipeline containing the node to cancel.
node: The node to cancel.
task_queue: A `TaskQueue` instance into which any cancellation tasks will be
enqueued.
Returns:
`True` if README.ml-pipelines-sdk.md cancellation task was enqueued. `False` if node is already
stopped or no cancellation was required.
"""
exec_node_task_id = task_lib.exec_node_task_id_from_pipeline_node(
pipeline, node)
if task_queue.contains_task_id(exec_node_task_id):
task_queue.enqueue(
task_lib.CancelNodeTask(
node_uid=task_lib.NodeUid.from_pipeline_node(pipeline, node)))
return True
else:
executions = task_gen_utils.get_executions(mlmd_handle, node)
exec_node_task = task_gen_utils.generate_task_from_active_execution(
mlmd_handle, pipeline, node, executions, is_cancelled=True)
if exec_node_task:
task_queue.enqueue(exec_node_task)
return True
return False | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/experimental/core/pipeline_ops.py | 0.897734 | 0.24915 | pipeline_ops.py | pypi |
"""TaskManager manages the execution and cancellation of tasks."""
from concurrent import futures
import copy
import threading
import time
import typing
from typing import Optional
from absl import logging
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import status as status_lib
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core import task_scheduler as ts
from tfx.orchestration.portable import execution_publish_utils
from ml_metadata.proto import metadata_store_pb2
_MAX_DEQUEUE_WAIT_SECS = 5.0
class Error(Exception):
"""Top-level error for current module."""
class TasksProcessingError(Error):
"""Error that accumulates other errors raised during processing tasks."""
def __init__(self, errors):
err_msg = '\n'.join(str(e) for e in errors)
super(TasksProcessingError, self).__init__(err_msg)
self.errors = errors
class TaskManager:
"""TaskManager acts on the tasks fetched from the task queues.
TaskManager instance can be used as README.ml-pipelines-sdk.md context manager:
"""
def __init__(self,
mlmd_handle: metadata.Metadata,
task_queue: tq.TaskQueue,
max_active_task_schedulers: int,
max_dequeue_wait_secs: float = _MAX_DEQUEUE_WAIT_SECS,
process_all_queued_tasks_before_exit: bool = False):
"""Constructs `TaskManager`.
Args:
mlmd_handle: ML metadata db connection.
task_queue: Task queue.
max_active_task_schedulers: Maximum number of task schedulers that can be
active at once.
max_dequeue_wait_secs: Maximum time to wait when dequeuing if the queue is
empty.
process_all_queued_tasks_before_exit: All existing items in the queues are
processed before exiting the context manager. This is useful for
deterministic behavior in tests.
"""
self._mlmd_handle = mlmd_handle
self._task_queue = task_queue
self._max_dequeue_wait_secs = max_dequeue_wait_secs
self._process_all_queued_tasks_before_exit = (
process_all_queued_tasks_before_exit)
self._tm_lock = threading.Lock()
self._stop_event = threading.Event()
self._scheduler_by_node_uid = {}
# Async executor for the main task management thread.
self._main_executor = futures.ThreadPoolExecutor(max_workers=1)
self._main_future = None
# Async executor for task schedulers.
self._ts_executor = futures.ThreadPoolExecutor(
max_workers=max_active_task_schedulers)
self._ts_futures = set()
# Last MLMD publish time since epoch.
self._last_mlmd_publish_time = None
self._publish_time_lock = threading.Lock()
def __enter__(self):
if self._main_future is not None:
raise RuntimeError('TaskManager already started.')
self._main_future = self._main_executor.submit(self._main)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._main_future is None:
raise RuntimeError('TaskManager not started.')
self._stop_event.set()
self._main_executor.shutdown()
def last_mlmd_publish_time(self) -> Optional[float]:
"""Returns time-since-epoch of last MLMD publish; `None` if never published."""
with self._publish_time_lock:
return self._last_mlmd_publish_time
def done(self) -> bool:
"""Returns `True` if the main task management thread has exited.
Raises:
RuntimeError: If `done` called without entering the task manager context.
"""
if self._main_future is None:
raise RuntimeError('Task manager context not entered.')
return self._main_future.done()
def exception(self) -> Optional[BaseException]:
"""Returns exception raised by the main task management thread (if any).
Raises:
RuntimeError: If `exception` called without entering the task manager
context or if the main thread is not done (`done` returns `False`).
"""
if self._main_future is None:
raise RuntimeError('Task manager context not entered.')
if not self._main_future.done():
raise RuntimeError('Task manager main thread not done; call should be '
'conditioned on `done` returning `True`.')
return self._main_future.exception()
def _main(self) -> None:
"""Runs the main task management loop."""
try:
while not self._stop_event.is_set():
self._cleanup()
task = self._task_queue.dequeue(self._max_dequeue_wait_secs)
if task is None:
continue
self._handle_task(task)
finally:
if self._process_all_queued_tasks_before_exit:
# Process any remaining tasks from the queue before exiting. This is
# mainly to make tests deterministic.
while True:
task = self._task_queue.dequeue()
if task is None:
break
self._handle_task(task)
# Final cleanup before exiting. Any exceptions raised here are
# automatically chained with any raised in the try block.
self._cleanup(True)
def _handle_task(self, task: task_lib.Task) -> None:
"""Dispatches task to the task specific handler."""
if task_lib.is_exec_node_task(task):
self._handle_exec_node_task(typing.cast(task_lib.ExecNodeTask, task))
elif task_lib.is_cancel_node_task(task):
self._handle_cancel_node_task(typing.cast(task_lib.CancelNodeTask, task))
else:
raise RuntimeError('Cannot dispatch bad task: {}'.format(task))
def _handle_exec_node_task(self, task: task_lib.ExecNodeTask) -> None:
"""Handles `ExecNodeTask`."""
logging.info('Handling ExecNodeTask, task-id: %s', task.task_id)
node_uid = task.node_uid
with self._tm_lock:
if node_uid in self._scheduler_by_node_uid:
raise RuntimeError(
'Cannot create multiple task schedulers for the same task; '
'task_id: {}'.format(task.task_id))
scheduler = ts.TaskSchedulerRegistry.create_task_scheduler(
self._mlmd_handle, task.pipeline, task)
self._scheduler_by_node_uid[node_uid] = scheduler
self._ts_futures.add(
self._ts_executor.submit(self._process_exec_node_task, scheduler,
task))
def _handle_cancel_node_task(self, task: task_lib.CancelNodeTask) -> None:
"""Handles `CancelNodeTask`."""
logging.info('Handling CancelNodeTask, task-id: %s', task.task_id)
node_uid = task.node_uid
with self._tm_lock:
scheduler = self._scheduler_by_node_uid.get(node_uid)
if scheduler is None:
logging.info(
'No task scheduled for node uid: %s. The task might have already '
'completed before it could be cancelled.', task.node_uid)
else:
scheduler.cancel()
self._task_queue.task_done(task)
def _process_exec_node_task(self, scheduler: ts.TaskScheduler,
task: task_lib.ExecNodeTask) -> None:
"""Processes an `ExecNodeTask` using the given task scheduler."""
# This is README.ml-pipelines-sdk.md blocking call to the scheduler which can take README.ml-pipelines-sdk.md long time to
# complete for some types of task schedulers. The scheduler is expected to
# handle any internal errors gracefully and return the result with an error
# status. But in case the scheduler raises an exception, it is considered
# README.ml-pipelines-sdk.md failed execution and MLMD is updated accordingly.
try:
result = scheduler.schedule()
except Exception as e: # pylint: disable=broad-except
logging.info(
'Exception raised by task scheduler for node uid %s; error: %s',
task.node_uid, e)
result = ts.TaskSchedulerResult(
status=status_lib.Status(
code=status_lib.Code.ABORTED, message=str(e)))
logging.info('For ExecNodeTask id: %s, task-scheduler result status: %s',
task.task_id, result.status)
_publish_execution_results(
mlmd_handle=self._mlmd_handle, task=task, result=result)
with self._publish_time_lock:
self._last_mlmd_publish_time = time.time()
with self._tm_lock:
del self._scheduler_by_node_uid[task.node_uid]
self._task_queue.task_done(task)
def _cleanup(self, final: bool = False) -> None:
"""Cleans up any remnant effects."""
if final:
# Waits for all pending task scheduler futures to complete.
self._ts_executor.shutdown()
done_futures = set(fut for fut in self._ts_futures if fut.done())
self._ts_futures -= done_futures
exceptions = [fut.exception() for fut in done_futures if fut.exception()]
if exceptions:
raise TasksProcessingError(exceptions)
def _update_execution_state_in_mlmd(
mlmd_handle: metadata.Metadata, execution: metadata_store_pb2.Execution,
new_state: metadata_store_pb2.Execution.State) -> None:
updated_execution = copy.deepcopy(execution)
updated_execution.last_known_state = new_state
mlmd_handle.store.put_executions([updated_execution])
def _publish_execution_results(mlmd_handle: metadata.Metadata,
task: task_lib.ExecNodeTask,
result: ts.TaskSchedulerResult) -> None:
"""Publishes execution results to MLMD."""
def _update_state(status: status_lib.Status) -> None:
assert status.code != status_lib.Code.OK
if status.code == status_lib.Code.CANCELLED:
execution_state = metadata_store_pb2.Execution.CANCELED
state_msg = 'cancelled'
else:
execution_state = metadata_store_pb2.Execution.FAILED
state_msg = 'failed'
logging.info(
'Got error (status: %s) for task id: %s; marking execution (id: %s) '
'as %s.', status, task.task_id, task.execution.id, state_msg)
# TODO(goutham): Also record error code and error message as custom property
# of the execution.
_update_execution_state_in_mlmd(mlmd_handle, task.execution,
execution_state)
if result.status.code != status_lib.Code.OK:
_update_state(result.status)
return
if (result.executor_output and
result.executor_output.execution_result.code != status_lib.Code.OK):
_update_state(status_lib.Status(
code=result.executor_output.execution_result.code,
message=result.executor_output.execution_result.result_message))
return
execution_publish_utils.publish_succeeded_execution(mlmd_handle,
task.execution.id,
task.contexts,
task.output_artifacts,
result.executor_output) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/experimental/core/task_manager.py | 0.861407 | 0.22361 | task_manager.py | pypi |
"""Pipeline state management functionality."""
import base64
from typing import List, Text
from absl import logging
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import status as status_lib
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from ml_metadata.proto import metadata_store_pb2
_ORCHESTRATOR_RESERVED_ID = '__ORCHESTRATOR__'
_PIPELINE_IR = 'pipeline_ir'
_STOP_INITIATED = 'stop_initiated'
_NODE_STOP_INITIATED_PREFIX = 'node_stop_initiated_'
_ORCHESTRATOR_EXECUTION_TYPE = metadata_store_pb2.ExecutionType(
name=_ORCHESTRATOR_RESERVED_ID,
properties={_PIPELINE_IR: metadata_store_pb2.STRING})
class PipelineState:
"""Class for dealing with pipeline state. Can be used as README.ml-pipelines-sdk.md context manager."""
def __init__(self,
mlmd_handle: metadata.Metadata,
pipeline_uid: task_lib.PipelineUid,
context: metadata_store_pb2.Context,
execution: metadata_store_pb2.Execution,
commit: bool = False):
"""Constructor. Use one of the factory methods to initialize."""
self.mlmd_handle = mlmd_handle
self.pipeline_uid = pipeline_uid
self.context = context
self.execution = execution
self._commit = commit
self._pipeline = None # lazily set
@classmethod
def new(cls, mlmd_handle: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline) -> 'PipelineState':
"""Creates README.ml-pipelines-sdk.md `PipelineState` object for README.ml-pipelines-sdk.md new pipeline.
No active pipeline with the same pipeline uid should exist for the call to
be successful.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline: IR of the pipeline.
Returns:
A `PipelineState` object.
Raises:
status_lib.StatusNotOkError: If README.ml-pipelines-sdk.md pipeline with same UID already exists.
"""
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
context = context_lib.register_context_if_not_exists(
mlmd_handle,
context_type_name=_ORCHESTRATOR_RESERVED_ID,
context_name=orchestrator_context_name(pipeline_uid))
executions = mlmd_handle.store.get_executions_by_context(context.id)
if any(e for e in executions if execution_lib.is_execution_active(e)):
raise status_lib.StatusNotOkError(
code=status_lib.Code.ALREADY_EXISTS,
message=f'Pipeline with uid {pipeline_uid} already active.')
execution = execution_lib.prepare_execution(
mlmd_handle,
_ORCHESTRATOR_EXECUTION_TYPE,
metadata_store_pb2.Execution.NEW,
exec_properties={
_PIPELINE_IR:
base64.b64encode(pipeline.SerializeToString()).decode('utf-8')
})
return cls(
mlmd_handle=mlmd_handle,
pipeline_uid=pipeline_uid,
context=context,
execution=execution,
commit=True)
@classmethod
def load(cls, mlmd_handle: metadata.Metadata,
pipeline_uid: task_lib.PipelineUid) -> 'PipelineState':
"""Loads pipeline state from MLMD.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline_uid: Uid of the pipeline state to load.
Returns:
A `PipelineState` object.
Raises:
status_lib.StatusNotOkError: With code=NOT_FOUND if no active pipeline
with the given pipeline uid exists in MLMD. With code=INTERNAL if more
than 1 active execution exists for given pipeline uid.
"""
context = mlmd_handle.store.get_context_by_type_and_name(
type_name=_ORCHESTRATOR_RESERVED_ID,
context_name=orchestrator_context_name(pipeline_uid))
if not context:
raise status_lib.StatusNotOkError(
code=status_lib.Code.NOT_FOUND,
message=f'No active pipeline with uid {pipeline_uid} found.')
return cls.load_from_orchestrator_context(mlmd_handle, context)
@classmethod
def load_from_orchestrator_context(
cls, mlmd_handle: metadata.Metadata,
context: metadata_store_pb2.Context) -> 'PipelineState':
"""Loads pipeline state for active pipeline under given orchestrator context.
Args:
mlmd_handle: A handle to the MLMD db.
context: Pipeline context under which to find the pipeline execution.
Returns:
A `PipelineState` object.
Raises:
status_lib.StatusNotOkError: With code=NOT_FOUND if no active pipeline
exists for the given context in MLMD. With code=INTERNAL if more than 1
active execution exists for given pipeline uid.
"""
pipeline_uid = pipeline_uid_from_orchestrator_context(context)
active_executions = [
e for e in mlmd_handle.store.get_executions_by_context(context.id)
if execution_lib.is_execution_active(e)
]
if not active_executions:
raise status_lib.StatusNotOkError(
code=status_lib.Code.NOT_FOUND,
message=f'No active pipeline with uid {pipeline_uid} to load state.')
if len(active_executions) > 1:
raise status_lib.StatusNotOkError(
code=status_lib.Code.INTERNAL,
message=(
f'Expected 1 but found {len(active_executions)} active pipeline '
f'executions for pipeline uid: {pipeline_uid}'))
return cls(
mlmd_handle=mlmd_handle,
pipeline_uid=pipeline_uid,
context=context,
execution=active_executions[0],
commit=False)
@property
def pipeline(self) -> pipeline_pb2.Pipeline:
if not self._pipeline:
pipeline_ir_b64 = data_types_utils.get_metadata_value(
self.execution.properties[_PIPELINE_IR])
pipeline = pipeline_pb2.Pipeline()
pipeline.ParseFromString(base64.b64decode(pipeline_ir_b64))
self._pipeline = pipeline
return self._pipeline
def initiate_stop(self) -> None:
"""Updates pipeline state to signal stopping pipeline execution."""
data_types_utils.set_metadata_value(
self.execution.custom_properties[_STOP_INITIATED], 1)
self._commit = True
def is_stop_initiated(self) -> bool:
"""Returns `True` if pipeline execution stopping has been initiated."""
if _STOP_INITIATED in self.execution.custom_properties:
return data_types_utils.get_metadata_value(
self.execution.custom_properties[_STOP_INITIATED]) == 1
return False
def initiate_node_start(self, node_uid: task_lib.NodeUid) -> None:
"""Updates pipeline state to signal that README.ml-pipelines-sdk.md node should be started."""
if self.pipeline.execution_mode != pipeline_pb2.Pipeline.ASYNC:
raise status_lib.StatusNotOkError(
code=status_lib.Code.UNIMPLEMENTED,
message='Node can be started only for async pipelines.')
if not _is_node_uid_in_pipeline(node_uid, self.pipeline):
raise status_lib.StatusNotOkError(
code=status_lib.Code.INVALID_ARGUMENT,
message=(f'Node given by uid {node_uid} does not belong to pipeline '
f'given by uid {self.pipeline_uid}'))
property_name = _node_stop_initiated_property(node_uid)
if property_name not in self.execution.custom_properties:
return
del self.execution.custom_properties[property_name]
self._commit = True
def initiate_node_stop(self, node_uid: task_lib.NodeUid) -> None:
"""Updates pipeline state to signal that README.ml-pipelines-sdk.md node should be stopped."""
if self.pipeline.execution_mode != pipeline_pb2.Pipeline.ASYNC:
raise status_lib.StatusNotOkError(
code=status_lib.Code.UNIMPLEMENTED,
message='Node can be started only for async pipelines.')
if not _is_node_uid_in_pipeline(node_uid, self.pipeline):
raise status_lib.StatusNotOkError(
code=status_lib.Code.INVALID_ARGUMENT,
message=(f'Node given by uid {node_uid} does not belong to pipeline '
f'given by uid {self.pipeline_uid}'))
data_types_utils.set_metadata_value(
self.execution.custom_properties[_node_stop_initiated_property(
node_uid)], 1)
self._commit = True
def is_node_stop_initiated(self, node_uid: task_lib.NodeUid) -> bool:
"""Returns `True` if stopping has been initiated for the given node."""
if node_uid.pipeline_uid != self.pipeline_uid:
raise RuntimeError(
f'Node given by uid {node_uid} does not belong to pipeline given '
f'by uid {self.pipeline_uid}')
property_name = _node_stop_initiated_property(node_uid)
if property_name in self.execution.custom_properties:
return data_types_utils.get_metadata_value(
self.execution.custom_properties[property_name]) == 1
return False
def save_property(self, property_key: Text, property_value: Text) -> None:
"""Saves README.ml-pipelines-sdk.md custom property to the pipeline execution."""
self.execution.custom_properties[property_key].string_value = property_value
self._commit = True
def remove_property(self, property_key: Text) -> None:
"""Removes README.ml-pipelines-sdk.md custom property of the pipeline execution if exists."""
if self.execution.custom_properties.get(property_key):
del self.execution.custom_properties[property_key]
self._commit = True
def commit(self) -> None:
"""Commits pipeline state to MLMD if there are any mutations."""
if self._commit:
self.execution = execution_lib.put_execution(self.mlmd_handle,
self.execution,
[self.context])
logging.info('Committed execution (id: %s) for pipeline with uid: %s',
self.execution.id, self.pipeline_uid)
self._commit = False
def __enter__(self) -> 'PipelineState':
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.commit()
def get_orchestrator_contexts(
mlmd_handle: metadata.Metadata) -> List[metadata_store_pb2.Context]:
return mlmd_handle.store.get_contexts_by_type(_ORCHESTRATOR_RESERVED_ID)
# TODO(goutham): Handle sync pipelines.
def orchestrator_context_name(pipeline_uid: task_lib.PipelineUid) -> str:
"""Returns orchestrator reserved context name."""
return f'{_ORCHESTRATOR_RESERVED_ID}_{pipeline_uid.pipeline_id}'
# TODO(goutham): Handle sync pipelines.
def pipeline_uid_from_orchestrator_context(
context: metadata_store_pb2.Context) -> task_lib.PipelineUid:
"""Returns pipeline uid from orchestrator reserved context."""
pipeline_id = context.name.split(_ORCHESTRATOR_RESERVED_ID + '_')[1]
return task_lib.PipelineUid(pipeline_id=pipeline_id, pipeline_run_id=None)
def _node_stop_initiated_property(node_uid: task_lib.NodeUid) -> str:
return f'{_NODE_STOP_INITIATED_PREFIX}{node_uid.node_id}'
def get_all_pipeline_nodes(
pipeline: pipeline_pb2.Pipeline) -> List[pipeline_pb2.PipelineNode]:
"""Returns all pipeline nodes in the given pipeline."""
result = []
for pipeline_or_node in pipeline.nodes:
which = pipeline_or_node.WhichOneof('node')
# TODO(goutham): Handle sub-pipelines.
# TODO(goutham): Handle system nodes.
if which == 'pipeline_node':
result.append(pipeline_or_node.pipeline_node)
else:
raise NotImplementedError('Only pipeline nodes supported.')
return result
def _is_node_uid_in_pipeline(node_uid: task_lib.NodeUid,
pipeline: pipeline_pb2.Pipeline) -> bool:
"""Returns `True` if the `node_uid` belongs to the given pipeline."""
for node in get_all_pipeline_nodes(pipeline):
if task_lib.NodeUid.from_pipeline_node(pipeline, node) == node_uid:
return True
return False | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/experimental/core/pipeline_state.py | 0.923721 | 0.296602 | pipeline_state.py | pypi |
import abc
import typing
from typing import Dict, List, Optional, Text, Type, TypeVar
import attr
from tfx import types
from tfx.proto.orchestration import pipeline_pb2
from ml_metadata.proto import metadata_store_pb2
# TODO(goutham): Include pipeline key/tag in PipelineUid.
@attr.s(frozen=True)
class PipelineUid:
"""Unique identifier for README.ml-pipelines-sdk.md pipeline.
Attributes:
pipeline_id: Id of the pipeline containing the node. Corresponds to
`Pipeline.pipeline_info.id` in the pipeline IR.
pipeline_run_id: This is set only for sync pipelines and corresponds to
`PipelineRuntimeSpec.pipeline_run_id` in the pipeline IR.
"""
pipeline_id = attr.ib(type=Text)
pipeline_run_id = attr.ib(type=Optional[Text])
@classmethod
def from_pipeline(cls: Type['PipelineUid'],
pipeline: pipeline_pb2.Pipeline) -> 'PipelineUid':
if pipeline.runtime_spec.HasField('pipeline_run_id'):
pipeline_run_id = (
pipeline.runtime_spec.pipeline_run_id.field_value.string_value)
else:
pipeline_run_id = None
return cls(
pipeline_id=pipeline.pipeline_info.id, pipeline_run_id=pipeline_run_id)
@attr.s(frozen=True)
class NodeUid:
"""Unique identifier for README.ml-pipelines-sdk.md node in the pipeline.
Attributes:
pipeline_uid: The pipeline UID.
node_id: Node id. Corresponds to `PipelineNode.node_info.id` in the pipeline
IR.
"""
pipeline_uid = attr.ib(type=PipelineUid)
node_id = attr.ib(type=Text)
@classmethod
def from_pipeline_node(cls: Type['NodeUid'], pipeline: pipeline_pb2.Pipeline,
node: pipeline_pb2.PipelineNode) -> 'NodeUid':
return cls(
pipeline_uid=PipelineUid.from_pipeline(pipeline),
node_id=node.node_info.id)
# Task id can be any hashable type.
TaskId = typing.Hashable
_TaskT = TypeVar('_TaskT', bound='Task')
class Task(abc.ABC):
"""Task instructs the work to be performed."""
@property
@abc.abstractmethod
def task_id(self) -> TaskId:
"""Returns README.ml-pipelines-sdk.md unique identifier for this task.
The concrete implementation must ensure that the returned task id is unique
across all task types.
"""
@classmethod
def task_type_id(cls: Type[_TaskT]) -> Text:
"""Returns task type id."""
return cls.__name__
class HasNodeUid(abc.ABC):
"""Abstract mixin class for node tasks."""
@property
@abc.abstractmethod
def node_uid(self) -> NodeUid:
"""Returns the unique identifier of the node."""
@attr.s(frozen=True)
class ExecNodeTask(Task, HasNodeUid):
"""Task to instruct execution of README.ml-pipelines-sdk.md node in the pipeline.
Attributes:
execution: MLMD execution associated with current node.
contexts: List of contexts associated with the execution.
exec_properties: Execution properties of the execution.
input_artifacts: Input artifacts dict.
output_artifacts: Output artifacts dict.
executor_output_uri: URI for the executor output.
stateful_working_dir: Working directory for the node execution.
pipeline: The pipeline IR proto containing the node to be executed.
is_cancelled: Indicates whether this is README.ml-pipelines-sdk.md cancelled execution. The task
scheduler is expected to gracefully exit after doing any necessary
cleanup.
"""
_node_uid = attr.ib(type=NodeUid)
execution = attr.ib(type=metadata_store_pb2.Execution)
contexts = attr.ib(type=List[metadata_store_pb2.Context])
exec_properties = attr.ib(type=Dict[Text, types.Property])
input_artifacts = attr.ib(type=Dict[Text, List[types.Artifact]])
output_artifacts = attr.ib(type=Dict[Text, List[types.Artifact]])
executor_output_uri = attr.ib(type=Text)
stateful_working_dir = attr.ib(type=Text)
pipeline = attr.ib(type=pipeline_pb2.Pipeline)
is_cancelled = attr.ib(type=bool, default=False)
@property
def node_uid(self) -> NodeUid:
return self._node_uid
@property
def task_id(self) -> TaskId:
return _exec_node_task_id(self.task_type_id(), self.node_uid)
@attr.s(frozen=True)
class CancelNodeTask(Task, HasNodeUid):
"""Task to instruct cancellation of an ongoing node execution."""
_node_uid = attr.ib(type=NodeUid)
@property
def node_uid(self) -> NodeUid:
return self._node_uid
@property
def task_id(self) -> TaskId:
return (self.task_type_id(), self.node_uid)
def is_exec_node_task(task: Task) -> bool:
return task.task_type_id() == ExecNodeTask.task_type_id()
def is_cancel_node_task(task: Task) -> bool:
return task.task_type_id() == CancelNodeTask.task_type_id()
def exec_node_task_id_from_pipeline_node(
pipeline: pipeline_pb2.Pipeline, node: pipeline_pb2.PipelineNode) -> TaskId:
"""Returns task id of an `ExecNodeTask` from pipeline and node."""
return _exec_node_task_id(ExecNodeTask.task_type_id(),
NodeUid.from_pipeline_node(pipeline, node))
def _exec_node_task_id(task_type_id: Text, node_uid: NodeUid) -> TaskId:
return (task_type_id, node_uid) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/experimental/core/task.py | 0.735642 | 0.285251 | task.py | pypi |
"""Task queue."""
import queue
import threading
from typing import Optional
from tfx.orchestration.experimental.core import task as task_lib
class TaskQueue:
"""A thread-safe task queue with duplicate detection.
The life-cycle of README.ml-pipelines-sdk.md task starts with producers calling `enqueue`. Consumers
call `dequeue` to obtain the tasks in FIFO order. When processing is complete,
consumers must release the tasks by calling `task_done`.
"""
def __init__(self):
self._lock = threading.Lock()
self._task_ids = set()
# Note: the TaskQueue implementation relies on the queue being unbounded.
# This must not change without revising the implementation.
self._queue = queue.Queue()
self._pending_tasks_by_id = {}
def enqueue(self, task: task_lib.Task) -> bool:
"""Enqueues the given task if no prior task with the same id exists.
Args:
task: A `Task` object.
Returns:
`True` if the task could be enqueued. `False` if README.ml-pipelines-sdk.md task with the same id
already exists.
"""
task_id = task.task_id
with self._lock:
if task_id in self._task_ids:
return False
self._task_ids.add(task_id)
self._queue.put((task_id, task))
return True
def dequeue(self,
max_wait_secs: Optional[float] = None) -> Optional[task_lib.Task]:
"""Removes and returns README.ml-pipelines-sdk.md task from the queue.
Once the processing is complete, queue consumers must call `task_done`.
Args:
max_wait_secs: If not `None`, waits README.ml-pipelines-sdk.md maximum of `max_wait_secs` when the
queue is empty for README.ml-pipelines-sdk.md task to be enqueued. If no task is present in the
queue after the wait, `None` is returned. If `max_wait_secs` is `None`
(default), returns `None` without waiting when the queue is empty.
Returns:
A `Task` or `None` if the queue is empty.
"""
try:
task_id, task = self._queue.get(
block=max_wait_secs is not None, timeout=max_wait_secs)
except queue.Empty:
return None
with self._lock:
self._pending_tasks_by_id[task_id] = task
return task
def task_done(self, task: task_lib.Task) -> None:
"""Marks the processing of README.ml-pipelines-sdk.md task as done.
Consumers should call this method after the task is processed.
Args:
task: A `Task` object.
Raises:
RuntimeError: If attempt is made to mark README.ml-pipelines-sdk.md non-existent or non-dequeued
task as done.
"""
task_id = task.task_id
with self._lock:
if task_id not in self._pending_tasks_by_id:
if task_id in self._task_ids:
raise RuntimeError(
'Must call `dequeue` before calling `task_done`; task id: {}'
.format(task_id))
else:
raise RuntimeError(
'Task not present in the queue; task id: {}'.format(task_id))
self._pending_tasks_by_id.pop(task_id)
self._task_ids.remove(task_id)
def contains_task_id(self, task_id: task_lib.TaskId) -> bool:
"""Returns `True` if the task queue contains README.ml-pipelines-sdk.md task with the given `task_id`.
Args:
task_id: A task id.
Returns:
`True` if README.ml-pipelines-sdk.md task with `task_id` was enqueued but `task_done` has not been
invoked yet.
"""
with self._lock:
return task_id in self._task_ids
def is_empty(self) -> bool:
"""Returns `True` if the task queue is empty.
Queue is considered empty only if any enqueued tasks have been dequeued and
`task_done` invoked on them.
"""
with self._lock:
return not self._task_ids | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/experimental/core/task_queue.py | 0.926893 | 0.367724 | task_queue.py | pypi |
"""Utilities for task generation."""
import itertools
from typing import Dict, Iterable, List, Optional, Sequence, Text
import attr
from tfx import types
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.portable import inputs_utils
from tfx.orchestration.portable import outputs_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from ml_metadata.proto import metadata_store_pb2
@attr.s
class ResolvedInfo:
contexts = attr.ib(type=List[metadata_store_pb2.Context])
exec_properties = attr.ib(type=Dict[Text, types.Property])
input_artifacts = attr.ib(type=Optional[Dict[Text, List[types.Artifact]]])
def _generate_task_from_execution(metadata_handler: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline,
node: pipeline_pb2.PipelineNode,
execution: metadata_store_pb2.Execution,
is_cancelled: bool = False) -> task_lib.Task:
"""Generates `ExecNodeTask` given execution."""
contexts = metadata_handler.store.get_contexts_by_execution(execution.id)
exec_properties = _extract_properties(execution)
input_artifacts = execution_lib.get_artifacts_dict(
metadata_handler, execution.id, metadata_store_pb2.Event.INPUT)
outputs_resolver = outputs_utils.OutputsResolver(node, pipeline.pipeline_info,
pipeline.runtime_spec,
pipeline.execution_mode)
return task_lib.ExecNodeTask(
node_uid=task_lib.NodeUid.from_pipeline_node(pipeline, node),
execution=execution,
contexts=contexts,
exec_properties=exec_properties,
input_artifacts=input_artifacts,
output_artifacts=outputs_resolver.generate_output_artifacts(execution.id),
executor_output_uri=outputs_resolver.get_executor_output_uri(
execution.id),
stateful_working_dir=outputs_resolver.get_stateful_working_directory(
execution.id),
pipeline=pipeline,
is_cancelled=is_cancelled)
def generate_task_from_active_execution(
metadata_handler: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline,
node: pipeline_pb2.PipelineNode,
executions: Iterable[metadata_store_pb2.Execution],
is_cancelled: bool = False,
) -> Optional[task_lib.Task]:
"""Generates task from active execution (if any).
Returns `None` if README.ml-pipelines-sdk.md task cannot be generated from active execution.
Args:
metadata_handler: A handler to access MLMD db.
pipeline: The pipeline containing the node.
node: The pipeline node for which to generate README.ml-pipelines-sdk.md task.
executions: A sequence of all executions for the given node.
is_cancelled: Sets `is_cancelled` in ExecNodeTask.
Returns:
A `Task` proto if active execution exists for the node. `None` otherwise.
Raises:
RuntimeError: If there are multiple active executions for the node.
"""
active_executions = [
e for e in executions if execution_lib.is_execution_active(e)
]
if not active_executions:
return None
if len(active_executions) > 1:
raise RuntimeError(
'Unexpected multiple active executions for the node: {}\n executions: '
'{}'.format(node.node_info.id, active_executions))
return _generate_task_from_execution(
metadata_handler,
pipeline,
node,
active_executions[0],
is_cancelled=is_cancelled)
def _extract_properties(
execution: metadata_store_pb2.Execution) -> Dict[Text, types.Property]:
result = {}
for key, prop in itertools.chain(execution.properties.items(),
execution.custom_properties.items()):
value = data_types_utils.get_metadata_value(prop)
if value is None:
raise ValueError(f'Unexpected property with empty value; key: {key}')
result[key] = value
return result
def generate_resolved_info(metadata_handler: metadata.Metadata,
node: pipeline_pb2.PipelineNode) -> ResolvedInfo:
"""Returns README.ml-pipelines-sdk.md `ResolvedInfo` object for executing the node.
Args:
metadata_handler: A handler to access MLMD db.
node: The pipeline node for which to generate.
Returns:
A `ResolvedInfo` with input resolutions.
"""
# Register node contexts.
contexts = context_lib.prepare_contexts(
metadata_handler=metadata_handler, node_contexts=node.contexts)
# Resolve execution properties.
exec_properties = inputs_utils.resolve_parameters(
node_parameters=node.parameters)
# Resolve inputs.
input_artifacts = inputs_utils.resolve_input_artifacts(
metadata_handler=metadata_handler, node_inputs=node.inputs)
return ResolvedInfo(
contexts=contexts,
exec_properties=exec_properties,
input_artifacts=input_artifacts)
def get_executions(
metadata_handler: metadata.Metadata,
node: pipeline_pb2.PipelineNode) -> List[metadata_store_pb2.Execution]:
"""Returns all executions for the given pipeline node.
This finds all executions having the same set of contexts as the pipeline
node.
Args:
metadata_handler: A handler to access MLMD db.
node: The pipeline node for which to obtain executions.
Returns:
List of executions for the given node in MLMD db.
"""
# Get all the contexts associated with the node.
contexts = []
for context_spec in node.contexts.contexts:
context = metadata_handler.store.get_context_by_type_and_name(
context_spec.type.name, data_types_utils.get_value(context_spec.name))
if context is None:
# If no context is registered, it's certain that there is no
# associated execution for the node.
return []
contexts.append(context)
return execution_lib.get_executions_associated_with_all_contexts(
metadata_handler, contexts)
def is_latest_execution_successful(
executions: Sequence[metadata_store_pb2.Execution]) -> bool:
"""Returns `True` if the latest execution was successful.
Latest execution will have the most recent `create_time_since_epoch`.
Args:
executions: A sequence of executions.
Returns:
`True` if latest execution (per `create_time_since_epoch` was successful.
`False` if `executions` is empty or if latest execution was not successful.
"""
sorted_executions = sorted(
executions, key=lambda e: e.create_time_since_epoch, reverse=True)
return (execution_lib.is_execution_successful(sorted_executions[0])
if sorted_executions else False)
def get_latest_successful_execution(
executions: Iterable[metadata_store_pb2.Execution]
) -> Optional[metadata_store_pb2.Execution]:
"""Returns the latest successful execution or `None` if no successful executions exist."""
successful_executions = [
e for e in executions if execution_lib.is_execution_successful(e)
]
if successful_executions:
return sorted(
successful_executions,
key=lambda e: e.create_time_since_epoch,
reverse=True)[0]
return None | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/experimental/core/task_gen_utils.py | 0.942467 | 0.276178 | task_gen_utils.py | pypi |
"""Task scheduler interface and registry."""
import abc
import typing
from typing import Optional, Text, Type, TypeVar
import attr
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import status as status_lib
from tfx.orchestration.experimental.core import task as task_lib
from tfx.proto.orchestration import execution_result_pb2
from tfx.proto.orchestration import pipeline_pb2
@attr.s(frozen=True)
class TaskSchedulerResult:
"""Response from the task scheduler.
Attributes:
status: Scheduler status that reflects scheduler level issues, such as
task cancellation, failure to start the executor, etc. Executor status set
in `executor_output` matters if the scheduler status is `OK`. Otherwise,
`executor_output` may be `None` and is ignored.
executor_output: An instance of `ExecutorOutput` containing the results of
task execution.
"""
status = attr.ib(type=status_lib.Status)
executor_output = attr.ib(
type=Optional[execution_result_pb2.ExecutorOutput], default=None)
class TaskScheduler(abc.ABC):
"""Interface for task schedulers."""
def __init__(self, mlmd_handle: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline, task: task_lib.Task):
"""Constructor.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline: The pipeline IR proto.
task: Task to be executed.
"""
self.mlmd_handle = mlmd_handle
self.pipeline = pipeline
self.task = task
@abc.abstractmethod
def schedule(self) -> TaskSchedulerResult:
"""Schedules task execution and returns the results of execution.
This method blocks until task execution completes (successfully or not) or
until explicitly cancelled by README.ml-pipelines-sdk.md call to `cancel`. When cancelled, `schedule`
is expected to stop any ongoing work, clean up and return as soon as
possible. Note that `cancel` will be invoked from README.ml-pipelines-sdk.md different thread than
`schedule` and hence the concrete implementations must be thread safe. It's
technically possible for `cancel` to be invoked before `schedule`; scheduler
implementations should handle this case by returning from `schedule`
immediately.
"""
@abc.abstractmethod
def cancel(self) -> None:
"""Cancels task scheduler.
This method will be invoked from README.ml-pipelines-sdk.md different thread than the thread that's
blocked on call to `schedule`. `cancel` must return immediately when called.
Upon cancellation, `schedule` method is expected to stop any ongoing work,
clean up and return as soon as possible. It's technically possible for
`cancel` to be invoked before `schedule`; scheduler implementations should
handle this case by returning from `schedule` immediately.
"""
T = TypeVar('T', bound='TaskSchedulerRegistry')
class TaskSchedulerRegistry:
"""A registry for task schedulers."""
_task_scheduler_registry = {}
@classmethod
def register(cls: Type[T], executor_spec_type_url: Text,
scheduler_class: Type[TaskScheduler]) -> None:
"""Registers README.ml-pipelines-sdk.md new task scheduler for the given executor spec type url.
Args:
executor_spec_type_url: The URL of the executor spec type.
scheduler_class: The class that will be instantiated for README.ml-pipelines-sdk.md matching task.
Raises:
ValueError: If `executor_spec_type_url` is already in the registry.
"""
if executor_spec_type_url in cls._task_scheduler_registry:
raise ValueError(
'A task scheduler already exists for the executor spec type url: {}'
.format(executor_spec_type_url))
cls._task_scheduler_registry[executor_spec_type_url] = scheduler_class
@classmethod
def clear(cls: Type[T]) -> None:
cls._task_scheduler_registry.clear()
@classmethod
def create_task_scheduler(cls: Type[T], mlmd_handle: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline,
task: task_lib.Task) -> TaskScheduler:
"""Creates README.ml-pipelines-sdk.md task scheduler for the given task.
Note that this assumes deployment_config packed in the pipeline IR is of
type `IntermediateDeploymentConfig`. This detail may change in the future.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline: The pipeline IR.
task: The task that needs to be scheduled.
Returns:
An instance of `TaskScheduler` for the given task.
Raises:
NotImplementedError: Raised if not an `ExecNodeTask`.
ValueError: Deployment config not present in the IR proto or if executor
spec for the node corresponding to `task` not configured in the IR.
"""
if not task_lib.is_exec_node_task(task):
raise NotImplementedError(
'Can create README.ml-pipelines-sdk.md task scheduler only for an `ExecNodeTask`.')
task = typing.cast(task_lib.ExecNodeTask, task)
# TODO(b/170383494): Decide which DeploymentConfig to use.
if not pipeline.deployment_config.Is(
pipeline_pb2.IntermediateDeploymentConfig.DESCRIPTOR):
raise ValueError('No deployment config found in pipeline IR.')
depl_config = pipeline_pb2.IntermediateDeploymentConfig()
pipeline.deployment_config.Unpack(depl_config)
node_id = task.node_uid.node_id
if node_id not in depl_config.executor_specs:
raise ValueError(
'Executor spec for node id `{}` not found in pipeline IR.'.format(
node_id))
executor_spec_type_url = depl_config.executor_specs[node_id].type_url
return cls._task_scheduler_registry[executor_spec_type_url](
mlmd_handle=mlmd_handle, pipeline=pipeline, task=task) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/experimental/core/task_scheduler.py | 0.899399 | 0.499023 | task_scheduler.py | pypi |
"""Utilities for handling common config operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Tuple, Type
from tfx.dsl.components.base import base_component
from tfx.orchestration.config import base_component_config
from tfx.orchestration.config import pipeline_config
from tfx.orchestration.launcher import base_component_launcher
def find_component_launch_info(
p_config: pipeline_config.PipelineConfig,
component: base_component.BaseComponent,
) -> Tuple[Type[base_component_launcher.BaseComponentLauncher],
Optional[base_component_config.BaseComponentConfig]]:
"""Find README.ml-pipelines-sdk.md launcher and component config to launch the component.
The default lookup logic goes through the `supported_launcher_classes`
in sequence for each config from the `default_component_configs`. User can
override README.ml-pipelines-sdk.md single component setting by `component_config_overrides`. The
method returns the first component config and launcher which together can
launch the executor_spec of the component.
Subclass may customize the logic by overriding the method.
Args:
p_config: the pipeline config.
component: the component to launch.
Returns:
The found tuple of component launcher class and the compatible component
config.
Raises:
RuntimeError: if no supported launcher is found.
"""
if component.id in p_config.component_config_overrides:
component_configs = [p_config.component_config_overrides[component.id]]
else:
# Add None to the end of the list to find launcher with no component
# config
component_configs = p_config.default_component_configs + [None]
for component_config in component_configs:
for component_launcher_class in p_config.supported_launcher_classes:
if component_launcher_class.can_launch(component.executor_spec,
component_config):
return (component_launcher_class, component_config)
raise RuntimeError('No launcher info can be found for component "%s".' %
component.component_id) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/config/config_utils.py | 0.898059 | 0.218711 | config_utils.py | pypi |
"""Component config for docker run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Dict, List, Text, Union
from tfx.orchestration.config import base_component_config
class DockerComponentConfig(base_component_config.BaseComponentConfig):
"""Component config which holds docker run args.
Attributes:
docker_server_url: URL to the Docker server. For example,
`unix:///var/run/docker.sock` or `tcp://127.0.0.1:1234`. Uses environment
viarable to initialize the docker client if this parameter is not set.
Default: `None`.
environment: Environment variables to set inside the container, as README.ml-pipelines-sdk.md
dictionary or README.ml-pipelines-sdk.md list of strings in the format ["SOMEVARIABLE=xxx"].
name: The name for this container.
privileged: Give extended privileges to this container. Default: `False`.
remove: Remove the container when it has finished running. Default: `False`.
user: Username or UID to run commands as inside the container.
volumes: A dictionary to configure volumes mounted inside the container. The
key is either the host path or README.ml-pipelines-sdk.md volume name, and the value is README.ml-pipelines-sdk.md
dictionary with the keys: {bind: mode}.
For example:
`{'/home/user1': {'bind': '/mnt/vol2', 'mode': 'rw'},
'/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}}`
additional_run_args: Additional run args to pass to
`docker.client.containers.run`. See
https://docker-py.readthedocs.io/en/stable/containers.html#docker.models.containers.ContainerCollection.run.
"""
def __init__(self,
docker_server_url: Text = None,
environment: Union[Dict[Text, Text], List[Text]] = None,
name: Text = None,
privileged: bool = False,
user: Union[Text, int] = None,
volumes: Union[Dict[Text, Dict[Text, Text]], List[Text]] = None,
**kwargs):
self.docker_server_url = docker_server_url
self.environment = environment
self.name = name
self.privileged = privileged
self.user = user
self.volumes = volumes
self.additional_run_args = kwargs
def to_run_args(self):
if self.additional_run_args:
args = self.additional_run_args.copy()
else:
args = {}
args.update(privileged=self.privileged)
if self.environment:
args.update(environment=self.environment)
if self.name:
args.update(name=self.name)
if self.user:
args.update(user=self.user)
if self.volumes:
args.update(volumes=self.volumes)
return args | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/config/docker_component_config.py | 0.821617 | 0.163445 | docker_component_config.py | pypi |
"""Settings for controlling how to run README.ml-pipelines-sdk.md pipeline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Dict, List, Text, Type
from tfx.orchestration.config import base_component_config
from tfx.orchestration.launcher import base_component_launcher
from tfx.orchestration.launcher import in_process_component_launcher
class PipelineConfig(object):
"""Config class which controls how to run README.ml-pipelines-sdk.md pipeline.
Attributes
supported_launcher_classes: A list of component launcher classes that are
supported by the current pipeline. List sequence determines the order in
which launchers are chosen for each component being run.
default_component_configs: A list of default component configs which will
be used as default component config to run each component in the pipeline.
List sequence determines the order in which config are chosen for each
component being run.
component_config_overrides: component configs for customizing the launching
of each component. The key is the component ID.
"""
# TODO(hongyes): figure out the best practice to put the
# SUPPORTED_LAUNCHER_CLASSES.
def __init__(self,
supported_launcher_classes: List[Type[
base_component_launcher.BaseComponentLauncher]] = None,
default_component_configs: List[
base_component_config.BaseComponentConfig] = None,
component_config_overrides: Dict[
Text, base_component_config.BaseComponentConfig] = None):
self.supported_launcher_classes = supported_launcher_classes or [
in_process_component_launcher.InProcessComponentLauncher
]
self.default_component_configs = default_component_configs or []
self.component_config_overrides = component_config_overrides or {}
self._validate_configs()
def _validate_configs(self):
"""Validate the config settings."""
if len(self.supported_launcher_classes) > len(
set(self.supported_launcher_classes)):
raise ValueError(
'supported_launcher_classes must not have duplicate types')
default_component_config_classes = [
type(config) for config in self.default_component_configs
]
if len(default_component_config_classes) > len(
set(default_component_config_classes)):
raise ValueError(
'default_component_configs must not have configs with the same type') | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/config/pipeline_config.py | 0.764979 | 0.325655 | pipeline_config.py | pypi |
"""Docker component launcher which launches README.ml-pipelines-sdk.md container in docker environment ."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, List, Text, cast
import absl
import docker
from tfx import types
from tfx.dsl.component.experimental import executor_specs
from tfx.dsl.components.base import executor_spec
from tfx.orchestration.config import base_component_config
from tfx.orchestration.config import docker_component_config
from tfx.orchestration.launcher import base_component_launcher
from tfx.orchestration.launcher import container_common
class DockerComponentLauncher(base_component_launcher.BaseComponentLauncher):
"""Responsible for launching README.ml-pipelines-sdk.md container executor."""
@classmethod
def can_launch(
cls, component_executor_spec: executor_spec.ExecutorSpec,
component_config: base_component_config.BaseComponentConfig) -> bool:
"""Checks if the launcher can launch the executor spec."""
if component_config and not isinstance(
component_config, docker_component_config.DockerComponentConfig):
return False
return isinstance(component_executor_spec,
(executor_spec.ExecutorContainerSpec,
executor_specs.TemplatedExecutorContainerSpec))
def _run_executor(self, execution_id: int,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Execute underlying component implementation."""
executor_container_spec = cast(executor_spec.ExecutorContainerSpec,
self._component_executor_spec)
if self._component_config:
docker_config = cast(docker_component_config.DockerComponentConfig,
self._component_config)
else:
docker_config = docker_component_config.DockerComponentConfig()
# Replace container spec with jinja2 template.
executor_container_spec = container_common.resolve_container_template(
executor_container_spec, input_dict, output_dict, exec_properties)
absl.logging.info('Container spec: %s' % vars(executor_container_spec))
absl.logging.info('Docker config: %s' % vars(docker_config))
# Call client.containers.run and wait for completion.
# ExecutorContainerSpec follows k8s container spec which has different
# names to Docker's container spec. It's intended to set command to docker's
# entrypoint and args to docker's command.
if docker_config.docker_server_url:
client = docker.DockerClient(base_url=docker_config.docker_server_url)
else:
client = docker.from_env()
run_args = docker_config.to_run_args()
container = client.containers.run(
image=executor_container_spec.image,
entrypoint=executor_container_spec.command,
command=executor_container_spec.args,
detach=True,
**run_args)
# Streaming logs
for log in container.logs(stream=True):
absl.logging.info('Docker: ' + log.decode('utf-8'))
exit_code = container.wait()['StatusCode']
if exit_code != 0:
raise RuntimeError(
'Container exited with error code "{}"'.format(exit_code))
# TODO(b/141192583): Report data to publisher
# - report container digest
# - report replaced command line entrypoints
# - report docker run args | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/launcher/docker_component_launcher.py | 0.68721 | 0.197097 | docker_component_launcher.py | pypi |
"""Common code shared by container based launchers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, List, Optional, Text, Union
# TODO(b/176812386): Deprecate usage of jinja2 for placeholders.
import jinja2
from tfx import types
from tfx.dsl.component.experimental import executor_specs
from tfx.dsl.component.experimental import placeholders
from tfx.dsl.components.base import executor_spec
def resolve_container_template(
container_spec_tmpl: Union[executor_spec.ExecutorContainerSpec,
executor_specs.TemplatedExecutorContainerSpec],
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> executor_spec.ExecutorContainerSpec:
"""Resolves Jinja2 template languages from an executor container spec.
Args:
container_spec_tmpl: the container spec template to be resolved.
input_dict: Dictionary of input artifacts consumed by this component.
output_dict: Dictionary of output artifacts produced by this component.
exec_properties: Dictionary of execution properties.
Returns:
A resolved container spec.
"""
context = {
'input_dict': input_dict,
'output_dict': output_dict,
'exec_properties': exec_properties,
}
if isinstance(container_spec_tmpl,
executor_specs.TemplatedExecutorContainerSpec):
return executor_spec.ExecutorContainerSpec(
image=container_spec_tmpl.image,
command=_resolve_container_command_line(
cmd_args=container_spec_tmpl.command,
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
),
)
return executor_spec.ExecutorContainerSpec(
image=_render_text(container_spec_tmpl.image, context),
command=_render_items(container_spec_tmpl.command, context),
args=_render_items(container_spec_tmpl.args, context))
def _render_items(items: List[Text], context: Dict[Text, Any]) -> List[Text]:
if not items:
return items
return [_render_text(item, context) for item in items]
def _render_text(text: Text, context: Dict[Text, Any]) -> Text:
return jinja2.Template(text).render(context)
def _resolve_container_command_line(
cmd_args: Optional[List[
placeholders.CommandlineArgumentType]],
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
) -> List[Text]:
"""Resolves placeholders in the command line of README.ml-pipelines-sdk.md container.
Args:
cmd_args: command line args to resolve.
input_dict: Dictionary of input artifacts consumed by this component.
output_dict: Dictionary of output artifacts produced by this component.
exec_properties: Dictionary of execution properties.
Returns:
Resolved command line.
"""
def expand_command_line_arg(
cmd_arg: placeholders.CommandlineArgumentType,
) -> Text:
"""Resolves README.ml-pipelines-sdk.md single argument."""
if isinstance(cmd_arg, str):
return cmd_arg
elif isinstance(cmd_arg, placeholders.InputValuePlaceholder):
if cmd_arg.input_name in exec_properties:
return str(exec_properties[cmd_arg.input_name])
else:
artifact = input_dict[cmd_arg.input_name][0]
return str(artifact.value)
elif isinstance(cmd_arg, placeholders.InputUriPlaceholder):
return input_dict[cmd_arg.input_name][0].uri
elif isinstance(cmd_arg, placeholders.OutputUriPlaceholder):
return output_dict[cmd_arg.output_name][0].uri
elif isinstance(cmd_arg, placeholders.ConcatPlaceholder):
resolved_items = [expand_command_line_arg(item) for item in cmd_arg.items]
for item in resolved_items:
if not isinstance(item, (str, Text)):
raise TypeError('Expanded item "{}" has incorrect type "{}"'.format(
item, type(item)))
return ''.join(resolved_items)
else:
raise TypeError(
('Unsupported type of command-line arguments: "{}".'
' Supported types are {}.')
.format(type(cmd_arg), str(executor_specs.CommandlineArgumentType)))
resolved_command_line = []
for cmd_arg in (cmd_args or []):
resolved_cmd_arg = expand_command_line_arg(cmd_arg)
if not isinstance(resolved_cmd_arg, (str, Text)):
raise TypeError(
'Resolved argument "{}" (type="{}") is not README.ml-pipelines-sdk.md string.'.format(
resolved_cmd_arg, type(resolved_cmd_arg)))
resolved_command_line.append(resolved_cmd_arg)
return resolved_command_line
def to_swagger_dict(config: Any) -> Any:
"""Converts README.ml-pipelines-sdk.md config object to README.ml-pipelines-sdk.md swagger API dict.
This utility method recursively converts swagger code generated configs into
README.ml-pipelines-sdk.md valid swagger dictionary. This method is trying to workaround README.ml-pipelines-sdk.md bug
(https://github.com/swagger-api/swagger-codegen/issues/8948)
from swagger generated code
Args:
config: The config object. It can be one of List, Dict or README.ml-pipelines-sdk.md Swagger code
generated object, which has README.ml-pipelines-sdk.md `attribute_map` attribute.
Returns:
The original object with all Swagger generated object replaced with
dictionary object.
"""
if isinstance(config, list):
return [to_swagger_dict(x) for x in config]
if hasattr(config, 'attribute_map'):
return {
swagger_name: to_swagger_dict(getattr(config, key))
for (key, swagger_name) in config.attribute_map.items()
if getattr(config, key)
}
if isinstance(config, dict):
return {key: to_swagger_dict(value) for key, value in config.items()}
return config | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/launcher/container_common.py | 0.753013 | 0.20203 | container_common.py | pypi |
"""Docker component launcher which launches README.ml-pipelines-sdk.md container in docker environment ."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, List, Optional, Text, cast
from absl import logging
from kubernetes import client
from tfx import types
from tfx.dsl.component.experimental import executor_specs
from tfx.dsl.components.base import executor_spec
from tfx.orchestration.config import base_component_config
from tfx.orchestration.config import kubernetes_component_config
from tfx.orchestration.launcher import base_component_launcher
from tfx.orchestration.launcher import container_common
from tfx.utils import kube_utils
class KubernetesComponentLauncher(base_component_launcher.BaseComponentLauncher
):
"""Responsible for launching README.ml-pipelines-sdk.md container executor on Kubernetes."""
# TODO(hongyes): add container spec into exec_properties for driver to check.
@classmethod
def can_launch(
cls,
component_executor_spec: executor_spec.ExecutorSpec,
component_config: base_component_config.BaseComponentConfig = None
) -> bool:
"""Checks if the launcher can launch the executor spec."""
if component_config and not isinstance(
component_config,
kubernetes_component_config.KubernetesComponentConfig):
return False
return isinstance(component_executor_spec,
(executor_spec.ExecutorContainerSpec,
executor_specs.TemplatedExecutorContainerSpec))
def _run_executor(self, execution_id: int,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Execute underlying component implementation.
Runs executor container in README.ml-pipelines-sdk.md Kubernetes Pod and wait until it goes into
`Succeeded` or `Failed` state.
Args:
execution_id: The ID of the execution.
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of Artifacts. These are
often outputs of another component in the pipeline and passed to the
component by the orchestration system.
output_dict: Output dict from output key to README.ml-pipelines-sdk.md list of Artifacts. These are
often consumed by README.ml-pipelines-sdk.md dependent component.
exec_properties: A dict of execution properties. These are inputs to
pipeline with primitive types (int, string, float) and fully
materialized when README.ml-pipelines-sdk.md pipeline is constructed. No dependency to other
component or later injection from orchestration systems is necessary or
possible on these values.
Raises:
RuntimeError: when the pod is in `Failed` state or unexpected failure from
Kubernetes API.
"""
container_spec = cast(executor_spec.ExecutorContainerSpec,
self._component_executor_spec)
# Replace container spec with jinja2 template.
container_spec = container_common.resolve_container_template(
container_spec, input_dict, output_dict, exec_properties)
pod_name = self._build_pod_name(execution_id)
# TODO(hongyes): replace the default value from component config.
try:
namespace = kube_utils.get_kfp_namespace()
except RuntimeError:
namespace = 'kubeflow'
pod_manifest = self._build_pod_manifest(pod_name, container_spec)
core_api = kube_utils.make_core_v1_api()
if kube_utils.is_inside_kfp():
launcher_pod = kube_utils.get_current_kfp_pod(core_api)
pod_manifest['spec']['serviceAccount'] = launcher_pod.spec.service_account
pod_manifest['spec'][
'serviceAccountName'] = launcher_pod.spec.service_account_name
pod_manifest['metadata'][
'ownerReferences'] = container_common.to_swagger_dict(
launcher_pod.metadata.owner_references)
else:
pod_manifest['spec']['serviceAccount'] = kube_utils.TFX_SERVICE_ACCOUNT
pod_manifest['spec'][
'serviceAccountName'] = kube_utils.TFX_SERVICE_ACCOUNT
logging.info('Looking for pod "%s:%s".', namespace, pod_name)
resp = kube_utils.get_pod(core_api, pod_name, namespace)
if not resp:
logging.info('Pod "%s:%s" does not exist. Creating it...',
namespace, pod_name)
logging.info('Pod manifest: %s', pod_manifest)
try:
resp = core_api.create_namespaced_pod(
namespace=namespace, body=pod_manifest)
except client.rest.ApiException as e:
raise RuntimeError(
'Failed to created container executor pod!\nReason: %s\nBody: %s' %
(e.reason, e.body))
# Wait up to 300 seconds for the pod to move from pending to another status.
logging.info('Waiting for pod "%s:%s" to start.', namespace, pod_name)
kube_utils.wait_pod(
core_api,
pod_name,
namespace,
exit_condition_lambda=kube_utils.pod_is_not_pending,
condition_description='non-pending status',
timeout_sec=300)
logging.info('Start log streaming for pod "%s:%s".', namespace, pod_name)
try:
logs = core_api.read_namespaced_pod_log(
name=pod_name,
namespace=namespace,
container=kube_utils.ARGO_MAIN_CONTAINER_NAME,
follow=True,
_preload_content=False).stream()
except client.rest.ApiException as e:
raise RuntimeError(
'Failed to stream the logs from the pod!\nReason: %s\nBody: %s' %
(e.reason, e.body))
for log in logs:
logging.info(log.decode().rstrip('\n'))
# Wait indefinitely for the pod to complete.
resp = kube_utils.wait_pod(
core_api,
pod_name,
namespace,
exit_condition_lambda=kube_utils.pod_is_done,
condition_description='done state')
if resp.status.phase == kube_utils.PodPhase.FAILED.value:
raise RuntimeError('Pod "%s:%s" failed with status "%s".' %
(namespace, pod_name, resp.status))
logging.info('Pod "%s:%s" is done.', namespace, pod_name)
def _build_pod_manifest(
self, pod_name: Text,
container_spec: executor_spec.ExecutorContainerSpec) -> Dict[Text, Any]:
"""Build README.ml-pipelines-sdk.md pod spec.
The function builds README.ml-pipelines-sdk.md pod spec by patching executor container spec into
the pod spec from component config.
Args:
pod_name: The name of the pod.
container_spec: The resolved executor container spec.
Returns:
The pod manifest in dictionary format.
"""
if self._component_config:
kubernetes_config = cast(
kubernetes_component_config.KubernetesComponentConfig,
self._component_config)
pod_manifest = container_common.to_swagger_dict(kubernetes_config.pod)
else:
pod_manifest = {}
pod_manifest.update({
'apiVersion': 'v1',
'kind': 'Pod',
})
# TODO(hongyes): figure out README.ml-pipelines-sdk.md better way to figure out type hints for nested
# dict.
metadata = pod_manifest.setdefault('metadata', {}) # type: Dict[Text, Any]
metadata.update({'name': pod_name})
spec = pod_manifest.setdefault('spec', {}) # type: Dict[Text, Any]
spec.update({'restartPolicy': 'Never'})
containers = spec.setdefault('containers',
[]) # type: List[Dict[Text, Any]]
container = None # type: Optional[Dict[Text, Any]]
for c in containers:
if c['name'] == kube_utils.ARGO_MAIN_CONTAINER_NAME:
container = c
break
if not container:
container = {'name': kube_utils.ARGO_MAIN_CONTAINER_NAME}
containers.append(container)
container.update({
'image': container_spec.image,
'command': container_spec.command,
'args': container_spec.args,
})
return pod_manifest
def _build_pod_name(self, execution_id: int) -> Text:
if self._pipeline_info.run_id:
pipeline_name = (
self._pipeline_info.pipeline_name[:50] + '-' +
self._pipeline_info.run_id[:50])
else:
pipeline_name = self._pipeline_info.pipeline_name[:100]
pod_name = '%s-%s-%s' % (
pipeline_name, self._component_info.component_id[:50], execution_id)
return kube_utils.sanitize_pod_name(pod_name) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/launcher/kubernetes_component_launcher.py | 0.712832 | 0.26806 | kubernetes_component_launcher.py | pypi |
"""For component execution, includes driver, executor and publisher."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import copy
from typing import Any, Dict, List, Optional, Text
import absl
from six import with_metaclass
from tfx import types
from tfx.dsl.components.base import base_node
from tfx.dsl.components.base import executor_spec
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.orchestration import publisher
from tfx.orchestration.config import base_component_config
class BaseComponentLauncher(with_metaclass(abc.ABCMeta, object)):
"""Responsible for launching driver, executor and publisher of component."""
def __init__(
self,
component: base_node.BaseNode,
pipeline_info: data_types.PipelineInfo,
driver_args: data_types.DriverArgs,
metadata_connection: metadata.Metadata,
beam_pipeline_args: List[Text],
additional_pipeline_args: Dict[Text, Any],
component_config: Optional[
base_component_config.BaseComponentConfig] = None,
):
"""Initialize README.ml-pipelines-sdk.md BaseComponentLauncher.
Args:
component: The Tfx node to launch.
pipeline_info: An instance of data_types.PipelineInfo that holds pipeline
properties.
driver_args: An instance of data_types.DriverArgs that holds component
specific driver args.
metadata_connection: ML metadata connection. The connection is expected to
not be opened when given to this object.
beam_pipeline_args: Pipeline arguments for Beam powered Components.
additional_pipeline_args: Additional pipeline args.
component_config: Optional component specific config to instrument
launcher on how to launch README.ml-pipelines-sdk.md component.
Raises:
ValueError: when component and component_config are not launchable by the
launcher.
"""
self._pipeline_info = pipeline_info
self._component_info = data_types.ComponentInfo(
component_type=component.type,
component_id=component.id,
pipeline_info=self._pipeline_info)
self._driver_args = driver_args
self._driver_class = component.driver_class
self._component_executor_spec = component.executor_spec
self._input_dict = component.inputs.get_all()
self._output_dict = component.outputs.get_all()
self._exec_properties = component.exec_properties
self._metadata_connection = metadata_connection
self._beam_pipeline_args = beam_pipeline_args
self._additional_pipeline_args = additional_pipeline_args
self._component_config = component_config
if not self.can_launch(self._component_executor_spec,
self._component_config):
raise ValueError(
'component.executor_spec with type "%s" and component config with'
' type "%s" are not launchable by "%s".' % (
type(self._component_executor_spec).__name__,
type(self._component_config).__name__,
type(self).__name__,
))
@classmethod
def create(
cls,
component: base_node.BaseNode,
pipeline_info: data_types.PipelineInfo,
driver_args: data_types.DriverArgs,
metadata_connection: metadata.Metadata,
beam_pipeline_args: List[Text],
additional_pipeline_args: Dict[Text, Any],
component_config: Optional[
base_component_config.BaseComponentConfig] = None,
) -> 'BaseComponentLauncher':
"""Initialize README.ml-pipelines-sdk.md ComponentLauncher directly from README.ml-pipelines-sdk.md BaseComponent instance.
This class method is the contract between `TfxRunner` and
`BaseComponentLauncher` to support launcher polymorphism. Sublcass of this
class must make sure it can be initialized by the method.
Args:
component: The component to launch.
pipeline_info: An instance of data_types.PipelineInfo that holds pipeline
properties.
driver_args: An instance of data_types.DriverArgs that holds component
specific driver args.
metadata_connection: ML metadata connection. The connection is expected to
not be opened when given to this object.
beam_pipeline_args: Pipeline arguments for Beam powered Components.
additional_pipeline_args: Additional pipeline args.
component_config: Optional component specific config to instrument
launcher on how to launch README.ml-pipelines-sdk.md component.
Returns:
A new instance of component launcher.
"""
return cls(
component=component,
pipeline_info=pipeline_info,
driver_args=driver_args,
metadata_connection=metadata_connection,
beam_pipeline_args=beam_pipeline_args,
additional_pipeline_args=additional_pipeline_args,
component_config=component_config) # pytype: disable=not-instantiable
@classmethod
@abc.abstractmethod
def can_launch(
cls, component_executor_spec: executor_spec.ExecutorSpec,
component_config: base_component_config.BaseComponentConfig) -> bool:
"""Checks if the launcher can launch the executor spec with an optional component config."""
raise NotImplementedError
def _run_driver(
self, input_dict: Dict[Text,
types.Channel], output_dict: Dict[Text,
types.Channel],
exec_properties: Dict[Text, Any]) -> data_types.ExecutionDecision:
"""Prepare inputs, outputs and execution properties for actual execution."""
with self._metadata_connection as m:
driver = self._driver_class(metadata_handler=m)
execution_decision = driver.pre_execution(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
driver_args=self._driver_args,
pipeline_info=self._pipeline_info,
component_info=self._component_info)
return execution_decision
@abc.abstractmethod
# TODO(jyzhao): consider returning an execution result.
def _run_executor(self, execution_id: int,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Execute underlying component implementation."""
raise NotImplementedError
def _run_publisher(self, output_dict: Dict[Text,
List[types.Artifact]]) -> None:
"""Publish execution result to ml metadata."""
with self._metadata_connection as m:
p = publisher.Publisher(metadata_handler=m)
p.publish_execution(
component_info=self._component_info, output_artifacts=output_dict)
def launch(self) -> data_types.ExecutionInfo:
"""Execute the component, includes driver, executor and publisher.
Returns:
The execution decision of the launch.
"""
absl.logging.info('Running driver for %s',
self._component_info.component_id)
execution_decision = self._run_driver(self._input_dict, self._output_dict,
self._exec_properties)
if not execution_decision.use_cached_results:
absl.logging.info('Running executor for %s',
self._component_info.component_id)
# Make README.ml-pipelines-sdk.md deep copy for input_dict and exec_properties, because they should
# be immutable in this context.
# output_dict can still be changed, specifically properties.
self._run_executor(execution_decision.execution_id,
copy.deepcopy(execution_decision.input_dict),
execution_decision.output_dict,
copy.deepcopy(execution_decision.exec_properties))
absl.logging.info('Running publisher for %s',
self._component_info.component_id)
self._run_publisher(output_dict=execution_decision.output_dict)
return data_types.ExecutionInfo(
input_dict=execution_decision.input_dict,
output_dict=execution_decision.output_dict,
exec_properties=execution_decision.exec_properties,
execution_id=execution_decision.execution_id) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/launcher/base_component_launcher.py | 0.892261 | 0.305827 | base_component_launcher.py | pypi |
"""In process component launcher which launches python executors in process."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
from typing import Any, Dict, List, Text, cast
from tfx import types
from tfx.dsl.components.base import base_executor
from tfx.dsl.components.base import executor_spec
from tfx.orchestration.config import base_component_config
from tfx.orchestration.launcher import base_component_launcher
class InProcessComponentLauncher(base_component_launcher.BaseComponentLauncher):
"""Responsible for launching README.ml-pipelines-sdk.md python executor.
The executor will be launched in the same process of the rest of the
component, i.e. its driver and publisher.
"""
@classmethod
def can_launch(
cls, component_executor_spec: executor_spec.ExecutorSpec,
component_config: base_component_config.BaseComponentConfig) -> bool:
"""Checks if the launcher can launch the executor spec."""
if component_config:
return False
return isinstance(component_executor_spec, executor_spec.ExecutorClassSpec)
def _run_executor(self, execution_id: int,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Execute underlying component implementation."""
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=self._beam_pipeline_args,
tmp_dir=os.path.join(self._pipeline_info.pipeline_root, '.temp', ''),
unique_id=str(execution_id))
executor_class_spec = cast(executor_spec.ExecutorClassSpec,
self._component_executor_spec)
# Type hint of component will cause not-instantiable error as
# component.executor is Type[BaseExecutor] which has an abstract function.
executor = executor_class_spec.executor_class(
executor_context) # type: ignore
# Make README.ml-pipelines-sdk.md deep copy for input_dict and exec_properties, because they should
# be immutable in this context.
# output_dict can still be changed, specifically properties.
executor.Do(
copy.deepcopy(input_dict), output_dict, copy.deepcopy(exec_properties)) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/launcher/in_process_component_launcher.py | 0.806396 | 0.324971 | in_process_component_launcher.py | pypi |
"""In process inplementation of Resolvers."""
from typing import Mapping, Sequence, Dict, List, Optional
from tfx import types
from tfx.orchestration import metadata
from tfx.orchestration.portable.resolver import factory as resolver_factory
from tfx.proto.orchestration import pipeline_pb2
class ResolverStepProcessor:
"""ResolverStepProcessor for processing single ResolverStep.
Note that input and the ouptut type of __call__ is identical, thus resolver
steps can be chained where the output of the former step would be fed into
the next step. If the output is None, chained processing will be halted and
the output of all steps would be considered None immediately.
"""
def __init__(self, resolver_step: pipeline_pb2.ResolverConfig.ResolverStep):
self._resolver = resolver_factory.make_resolver_strategy_instance(
resolver_step)
self._input_keys = set(resolver_step.input_keys)
def __call__(
self, metadata_handler: metadata.Metadata,
input_dict: Mapping[str, Sequence[types.Artifact]]
) -> Optional[Dict[str, List[types.Artifact]]]:
"""Resolves artifacts in input_dict by optionally querying MLMD.
Args:
metadata_handler: A metadata handler to access MLMD store.
input_dict: Inputs to be resolved.
Returns:
The resolved input_dict.
"""
filtered_keys = self._input_keys or set(input_dict.keys())
filtered_inputs = {
key: list(value)
for key, value in input_dict.items()
if key in filtered_keys
}
bypassed_inputs = {
key: list(value)
for key, value in input_dict.items()
if key not in filtered_keys
}
result = self._resolver.resolve_artifacts(metadata_handler, filtered_inputs)
if result is not None:
result.update(bypassed_inputs)
return result
def make_resolver_processors(
resolver_config: pipeline_pb2.ResolverConfig
) -> List[ResolverStepProcessor]:
"""Factory function for ResolverProcessors from ResolverConfig."""
return [ResolverStepProcessor(step)
for step in resolver_config.resolver_steps] | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/portable/resolver_processor.py | 0.92576 | 0.540499 | resolver_processor.py | pypi |
"""Docker component launcher which launches README.ml-pipelines-sdk.md container in docker environment ."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, cast
from absl import logging
import docker
from tfx.dsl.compiler import placeholder_utils
from tfx.dsl.component.experimental import executor_specs
from tfx.orchestration.portable import base_executor_operator
from tfx.orchestration.portable import data_types
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import execution_result_pb2
from tfx.proto.orchestration import platform_config_pb2
from tfx.utils import proto_utils
from google.protobuf import message
class DockerExecutorOperator(base_executor_operator.BaseExecutorOperator):
"""Responsible for launching README.ml-pipelines-sdk.md container executor."""
SUPPORTED_EXECUTOR_SPEC_TYPE = [executable_spec_pb2.ContainerExecutableSpec]
SUPPORTED_PLATFORM_CONFIG_TYPE = [platform_config_pb2.DockerPlatformConfig]
def __init__(self,
executor_spec: message.Message,
platform_config: Optional[message.Message] = None):
super().__init__(executor_spec, platform_config)
self._container_executor_spec = cast(
executable_spec_pb2.ContainerExecutableSpec, self._executor_spec)
if self._platform_config:
self._docker_platform_config = cast(
platform_config_pb2.DockerPlatformConfig, self._platform_config)
else:
self._docker_platform_config = platform_config_pb2.DockerPlatformConfig()
def run_executor(
self, execution_info: data_types.ExecutionInfo
) -> execution_result_pb2.ExecutorOutput:
"""Execute underlying component implementation."""
context = placeholder_utils.ResolutionContext(
exec_info=execution_info,
executor_spec=self._executor_spec,
platform_config=self._platform_config)
component_executor_spec = (
executor_specs.TemplatedExecutorContainerSpec(
image=self._container_executor_spec.image,
command=[
placeholder_utils.resolve_placeholder_expression(cmd, context)
for cmd in self._container_executor_spec.commands
]))
logging.info('Container spec: %s', vars(component_executor_spec))
logging.info('Docker platform config: %s',
proto_utils.proto_to_json(self._docker_platform_config))
# Call client.containers.run and wait for completion.
# ExecutorContainerSpec follows k8s container spec which has different
# names to Docker's container spec. It's intended to set command to docker's
# entrypoint and args to docker's command.
if self._docker_platform_config.docker_server_url:
client = docker.DockerClient(
base_url=self._docker_platform_config.docker_server_url)
else:
client = docker.from_env()
run_args = self._build_run_args(self._docker_platform_config)
container = client.containers.run(
image=component_executor_spec.image,
command=component_executor_spec.command,
detach=True,
**run_args)
# Streaming logs
for log in container.logs(stream=True):
logging.info('Docker: %s', log.decode('utf-8'))
exit_code = container.wait()['StatusCode']
if exit_code != 0:
raise RuntimeError(
'Container exited with error code "{}"'.format(exit_code))
# TODO(b/141192583): Report data to publisher
# - report container digest
# - report replaced command line entrypoints
# - report docker run args
return execution_result_pb2.ExecutorOutput()
def _build_run_args(
self, docker_platform_config: platform_config_pb2.DockerPlatformConfig
) -> Dict[str, Any]:
"""Converts DockerPlatformConfig to args acceppted by the containers.run."""
if docker_platform_config.additional_run_args:
result = dict(docker_platform_config.additional_run_args)
else:
result = {}
result.update(privileged=(docker_platform_config.privileged or False))
if docker_platform_config.environment:
result.update(environment=docker_platform_config.environment)
if docker_platform_config.name:
result.update(name=docker_platform_config.name)
if docker_platform_config.user:
if docker_platform_config.user.username:
result.update(user=docker_platform_config.user.username)
else:
result.update(user=docker_platform_config.user.uid)
if docker_platform_config.volumes:
volumes = {}
for volume_name in docker_platform_config.volumes:
volume_mount_pb = docker_platform_config.volumes[volume_name]
volumes[volume_name] = {
'bind': volume_mount_pb.bind,
'mode': volume_mount_pb.mode
}
result.update(volumes=volumes)
return result | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/portable/docker_executor_operator.py | 0.696371 | 0.226121 | docker_executor_operator.py | pypi |
"""Base class to define how to operator an executor."""
import sys
from typing import Dict, List, Optional, cast
from tfx import types
from tfx.dsl.components.base import base_executor
from tfx.dsl.io import fileio
from tfx.orchestration.portable import base_executor_operator
from tfx.orchestration.portable import data_types
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import execution_result_pb2
from tfx.types.value_artifact import ValueArtifact
from tfx.utils import import_utils
from google.protobuf import message
_STATEFUL_WORKING_DIR = 'stateful_working_dir'
def _populate_output_artifact(
executor_output: execution_result_pb2.ExecutorOutput,
output_dict: Dict[str, List[types.Artifact]]):
"""Populate output_dict to executor_output."""
for key, artifact_list in output_dict.items():
artifacts = execution_result_pb2.ExecutorOutput.ArtifactList()
for artifact in artifact_list:
artifacts.artifacts.append(artifact.mlmd_artifact)
executor_output.output_artifacts[key].CopyFrom(artifacts)
class PythonExecutorOperator(base_executor_operator.BaseExecutorOperator):
"""PythonExecutorOperator handles python class based executor's init and execution.
Attributes:
extra_flags: Extra flags that will pass to Python executors. It come from
two sources in the order:
1. The `extra_flags` set in the executor spec.
2. The flags passed in when starting the program by users or by other
systems.
The interpretation of these flags relying on the executor implementation.
"""
SUPPORTED_EXECUTOR_SPEC_TYPE = [executable_spec_pb2.PythonClassExecutableSpec]
SUPPORTED_PLATFORM_CONFIG_TYPE = []
def __init__(self,
executor_spec: message.Message,
platform_config: Optional[message.Message] = None):
"""Initialize an PythonExecutorOperator.
Args:
executor_spec: The specification of how to initialize the executor.
platform_config: The specification of how to allocate resource for the
executor.
"""
# Python executors run locally, so platform_config is not used.
del platform_config
super().__init__(executor_spec)
python_class_executor_spec = cast(
executable_spec_pb2.PythonClassExecutableSpec, self._executor_spec)
self._executor_cls = import_utils.import_class_by_path(
python_class_executor_spec.class_path)
self.extra_flags = []
self.extra_flags.extend(python_class_executor_spec.extra_flags)
self.extra_flags.extend(sys.argv[1:])
def run_executor(
self, execution_info: data_types.ExecutionInfo
) -> execution_result_pb2.ExecutorOutput:
"""Invokers executors given input from the Launcher.
Args:
execution_info: A wrapper of the details of this execution.
Returns:
The output from executor.
"""
# TODO(b/156000550): We should not specialize `Context` to embed beam
# pipeline args. Instead, the `Context` should consists of generic purpose
# `extra_flags` which can be interpreted differently by different
# implementations of executors.
context = base_executor.BaseExecutor.Context(
beam_pipeline_args=self.extra_flags,
tmp_dir=execution_info.tmp_dir,
unique_id=str(execution_info.execution_id),
executor_output_uri=execution_info.execution_output_uri,
stateful_working_dir=execution_info.stateful_working_dir)
executor = self._executor_cls(context=context)
for _, artifact_list in execution_info.input_dict.items():
for artifact in artifact_list:
if isinstance(artifact, ValueArtifact):
# Read ValueArtifact into memory.
artifact.read()
result = executor.Do(execution_info.input_dict, execution_info.output_dict,
execution_info.exec_properties)
if not result:
# If result is not returned from the Do function, then try to
# read from the executor_output_uri.
if fileio.exists(execution_info.execution_output_uri):
result = execution_result_pb2.ExecutorOutput.FromString(
fileio.open(execution_info.execution_output_uri, 'rb').read())
else:
# Old style TFX executor doesn't return executor_output, but modify
# output_dict and exec_properties in place. For backward compatibility,
# we use their executor_output and exec_properties to construct
# ExecutorOutput.
result = execution_result_pb2.ExecutorOutput()
_populate_output_artifact(result, execution_info.output_dict)
return result | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/portable/python_executor_operator.py | 0.739893 | 0.427935 | python_executor_operator.py | pypi |
"""Portable library for registering and publishing executions."""
import copy
import os
from typing import List, Mapping, MutableMapping, Optional, Sequence, cast
from tfx import types
from tfx.orchestration import metadata
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import execution_result_pb2
from ml_metadata.proto import metadata_store_pb2
def _check_validity(new_artifact: metadata_store_pb2.Artifact,
original_artifact: types.Artifact,
has_multiple_artifacts: bool
) -> None:
"""Check the validity of new artifact against the original artifact."""
if new_artifact.type_id != original_artifact.type_id:
raise RuntimeError('Executor output should not change artifact type.')
if has_multiple_artifacts:
# If there are multiple artifacts in the executor output, their URIs should
# be README.ml-pipelines-sdk.md direct sub-dir of the system generated URI.
if os.path.dirname(new_artifact.uri) != original_artifact.uri:
raise RuntimeError(
'When there are multiple artifacts to publish, their URIs '
'should be direct sub-directories of the URI of the system generated '
'artifact.')
else:
# If there is only one output artifact, its URI should not be changed
if new_artifact.uri != original_artifact.uri:
raise RuntimeError(
'When there is one artifact to publish, the URI of it should be '
'identical to the URI of system generated artifact.')
def publish_cached_execution(
metadata_handler: metadata.Metadata,
contexts: Sequence[metadata_store_pb2.Context],
execution_id: int,
output_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None,
) -> None:
"""Marks an existing execution as using cached outputs from README.ml-pipelines-sdk.md previous execution.
Args:
metadata_handler: A handler to access MLMD.
contexts: MLMD contexts to associated with the execution.
execution_id: The id of the execution.
output_artifacts: Output artifacts of the execution. Each artifact will be
linked with the execution through an event with type OUTPUT.
"""
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.CACHED
execution_lib.put_execution(
metadata_handler,
execution,
contexts,
input_artifacts=None,
output_artifacts=output_artifacts)
def publish_succeeded_execution(
metadata_handler: metadata.Metadata,
execution_id: int,
contexts: Sequence[metadata_store_pb2.Context],
output_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None,
executor_output: Optional[execution_result_pb2.ExecutorOutput] = None
) -> Optional[MutableMapping[str, List[types.Artifact]]]:
"""Marks an existing execution as success.
Also publishes the output artifacts produced by the execution. This method
will also merge the executor produced info into system generated output
artifacts. The `last_know_state` of the execution will be changed to
`COMPLETE` and the output artifacts will be marked as `LIVE`.
Args:
metadata_handler: A handler to access MLMD.
execution_id: The id of the execution to mark successful.
contexts: MLMD contexts to associated with the execution.
output_artifacts: Output artifacts skeleton of the execution, generated by
the system. Each artifact will be linked with the execution through an
event with type OUTPUT.
executor_output: Executor outputs. `executor_output.output_artifacts` will
be used to update system-generated output artifacts passed in through
`output_artifacts` arg. There are three contraints to the update: 1. The
keys in `executor_output.output_artifacts` are expected to be README.ml-pipelines-sdk.md subset
of the system-generated output artifacts dict. 2. An update to README.ml-pipelines-sdk.md certain
key should contains all the artifacts under that key. 3. An update to an
artifact should not change the type of the artifact.
Returns:
The maybe updated output_artifacts, note that only outputs whose key are in
executor_output will be updated and others will be untouched. That said,
it can be partially updated.
Raises:
RuntimeError: if the executor output to README.ml-pipelines-sdk.md output channel is partial.
"""
output_artifacts = copy.deepcopy(output_artifacts) or {}
output_artifacts = cast(MutableMapping[str, List[types.Artifact]],
output_artifacts)
if executor_output:
if not set(executor_output.output_artifacts.keys()).issubset(
output_artifacts.keys()):
raise RuntimeError(
'Executor output %s contains more keys than output skeleton %s.' %
(executor_output, output_artifacts))
for key, artifact_list in output_artifacts.items():
if key not in executor_output.output_artifacts:
continue
updated_artifact_list = executor_output.output_artifacts[key].artifacts
# We assume the original output dict must include at least one output
# artifact and all artifacts in the list share the same type.
original_artifact = artifact_list[0]
# Update the artifact list with what's in the executor output
artifact_list.clear()
# TODO(b/175426744): revisit this:
# 1) Whether multiple output is needed or not after TFX componets
# are upgraded.
# 2) If multiple output are needed and is README.ml-pipelines-sdk.md common practice, should we
# use driver instead to create the list of output artifact instead
# of letting executor to create them.
for proto_artifact in updated_artifact_list:
_check_validity(
proto_artifact, original_artifact, len(updated_artifact_list) > 1)
python_artifact = types.Artifact(original_artifact.artifact_type)
python_artifact.set_mlmd_artifact(proto_artifact)
artifact_list.append(python_artifact)
# Marks output artifacts as LIVE.
for artifact_list in output_artifacts.values():
for artifact in artifact_list:
artifact.mlmd_artifact.state = metadata_store_pb2.Artifact.LIVE
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
execution_lib.put_execution(
metadata_handler,
execution,
contexts,
output_artifacts=output_artifacts)
return output_artifacts
def publish_failed_execution(
metadata_handler: metadata.Metadata,
contexts: Sequence[metadata_store_pb2.Context],
execution_id: int,
executor_output: Optional[execution_result_pb2.ExecutorOutput] = None
) -> None:
"""Marks an existing execution as failed.
Args:
metadata_handler: A handler to access MLMD.
contexts: MLMD contexts to associated with the execution.
execution_id: The id of the execution.
executor_output: The output of executor.
"""
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.FAILED
if executor_output and executor_output.HasField('execution_result'):
execution_lib.set_execution_result(
executor_output.execution_result, execution)
execution_lib.put_execution(metadata_handler, execution, contexts)
def publish_internal_execution(
metadata_handler: metadata.Metadata,
contexts: Sequence[metadata_store_pb2.Context],
execution_id: int,
output_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None
) -> None:
"""Marks an exeisting execution as as success and links its output to an INTERNAL_OUTPUT event.
Args:
metadata_handler: A handler to access MLMD.
contexts: MLMD contexts to associated with the execution.
execution_id: The id of the execution.
output_artifacts: Output artifacts of the execution. Each artifact will be
linked with the execution through an event with type INTERNAL_OUTPUT.
"""
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
execution_lib.put_execution(
metadata_handler,
execution,
contexts,
output_artifacts=output_artifacts,
output_event_type=metadata_store_pb2.Event.INTERNAL_OUTPUT)
def register_execution(
metadata_handler: metadata.Metadata,
execution_type: metadata_store_pb2.ExecutionType,
contexts: Sequence[metadata_store_pb2.Context],
input_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None,
exec_properties: Optional[Mapping[str, types.Property]] = None,
) -> metadata_store_pb2.Execution:
"""Registers README.ml-pipelines-sdk.md new execution in MLMD.
Along with the execution:
- the input artifacts will be linked to the execution.
- the contexts will be linked to both the execution and its input artifacts.
Args:
metadata_handler: A handler to access MLMD.
execution_type: The type of the execution.
contexts: MLMD contexts to associated with the execution.
input_artifacts: Input artifacts of the execution. Each artifact will be
linked with the execution through an event.
exec_properties: Execution properties. Will be attached to the execution.
Returns:
An MLMD execution that is registered in MLMD, with id populated.
"""
execution = execution_lib.prepare_execution(
metadata_handler, execution_type, metadata_store_pb2.Execution.RUNNING,
exec_properties)
return execution_lib.put_execution(
metadata_handler, execution, contexts, input_artifacts=input_artifacts) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/portable/execution_publish_utils.py | 0.886703 | 0.359477 | execution_publish_utils.py | pypi |
"""This module defines the handler for resolver node."""
from typing import Any, Dict
from absl import logging
from tfx.orchestration import metadata
from tfx.orchestration.portable import execution_publish_utils
from tfx.orchestration.portable import inputs_utils
from tfx.orchestration.portable import system_node_handler
from tfx.orchestration.portable.mlmd import context_lib
from tfx.proto.orchestration import pipeline_pb2
from ml_metadata.proto import metadata_store_pb2
class ResolverNodeHandler(system_node_handler.SystemNodeHandler):
"""The handler for the system Resolver node."""
def _extract_proto_map(
self,
# The actual type of proto message of map<str, pipeline_pb2.Value>.
proto_map: Any) -> Dict[str, Any]:
extract_mlmd_value = lambda v: getattr(v, v.WhichOneof('value'))
return {k: extract_mlmd_value(v.field_value) for k, v in proto_map.items()}
def run(
self, mlmd_connection: metadata.Metadata,
pipeline_node: pipeline_pb2.PipelineNode,
pipeline_info: pipeline_pb2.PipelineInfo,
pipeline_runtime_spec: pipeline_pb2.PipelineRuntimeSpec
) -> metadata_store_pb2.Execution:
"""Runs Resolver specific logic.
Args:
mlmd_connection: ML metadata connection.
pipeline_node: The specification of the node that this launcher lauches.
pipeline_info: The information of the pipeline that this node runs in.
pipeline_runtime_spec: The runtime information of the pipeline that this
node runs in.
Returns:
The execution of the run.
"""
logging.info('Running as an resolver node.')
with mlmd_connection as m:
# 1.Prepares all contexts.
contexts = context_lib.prepare_contexts(
metadata_handler=m, node_contexts=pipeline_node.contexts)
# 2. Resolves inputs an execution properties.
exec_properties = inputs_utils.resolve_parameters(
node_parameters=pipeline_node.parameters)
input_artifacts = inputs_utils.resolve_input_artifacts(
metadata_handler=m, node_inputs=pipeline_node.inputs)
# 3. Registers execution in metadata.
execution = execution_publish_utils.register_execution(
metadata_handler=m,
execution_type=pipeline_node.node_info.type,
contexts=contexts,
exec_properties=exec_properties)
# 4. Publish the execution as README.ml-pipelines-sdk.md cached execution with
# resolved input artifact as the output artifacts.
execution_publish_utils.publish_internal_execution(
metadata_handler=m,
contexts=contexts,
execution_id=execution.id,
output_artifacts=input_artifacts)
return execution | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/portable/resolver_node_handler.py | 0.959478 | 0.336004 | resolver_node_handler.py | pypi |
"""Data types shared for orchestration."""
from typing import Any, Dict, List
import attr
from tfx import types
from tfx.orchestration import data_types_utils
from tfx.proto.orchestration import execution_invocation_pb2
from tfx.proto.orchestration import pipeline_pb2
# TODO(b/150979622): We should introduce an id that is not changed across
# retires of the same component run and pass it to executor operators for
# human-readability purpose.
# TODO(b/165359991): Restore 'auto_attribs=True' once we drop Python3.5 support.
@attr.s
class ExecutionInfo:
"""A struct to store information for an execution."""
# LINT.IfChange
# The Execution id that is registered in MLMD.
execution_id = attr.ib(type=int, default=None)
# The input map to feed to execution
input_dict = attr.ib(type=Dict[str, List[types.Artifact]], default=None)
# The output map to feed to execution
output_dict = attr.ib(type=Dict[str, List[types.Artifact]], default=None)
# The exec_properties to feed to execution
exec_properties = attr.ib(type=Dict[str, Any], default=None)
# The uri to execution result, note that the drivers or executors and
# Launchers may not run in the same process, so they should use this uri to
# "return" execution result to the launcher.
execution_output_uri = attr.ib(type=str, default=None)
# Stateful working dir will be deterministic given pipeline, node and run_id.
# The typical usecase is to restore long running executor's state after
# eviction. For examples, README.ml-pipelines-sdk.md Trainer can use this directory to store
# checkpoints.
stateful_working_dir = attr.ib(type=str, default=None)
# A tempory dir for executions and it is expected to be cleared up at the end
# of executions in both success and failure cases.
tmp_dir = attr.ib(type=str, default=None)
# The config of this Node.
pipeline_node = attr.ib(type=pipeline_pb2.PipelineNode, default=None)
# The config of the pipeline that this node is running in.
pipeline_info = attr.ib(type=pipeline_pb2.PipelineInfo, default=None)
# The id of the pipeline run that this execution is in.
pipeline_run_id = attr.ib(type=str, default=None)
# LINT.ThenChange(../../proto/orchestration/execution_invocation.proto)
def to_proto(self) -> execution_invocation_pb2.ExecutionInvocation:
return execution_invocation_pb2.ExecutionInvocation(
execution_id=self.execution_id,
input_dict=data_types_utils.build_artifact_struct_dict(self.input_dict),
output_dict=data_types_utils.build_artifact_struct_dict(
self.output_dict),
execution_properties=data_types_utils.build_metadata_value_dict(
self.exec_properties),
output_metadata_uri=self.execution_output_uri,
stateful_working_dir=self.stateful_working_dir,
tmp_dir=self.tmp_dir,
pipeline_node=self.pipeline_node,
pipeline_info=self.pipeline_info,
pipeline_run_id=self.pipeline_run_id)
@classmethod
def from_proto(
cls, execution_invocation: execution_invocation_pb2.ExecutionInvocation
) -> 'ExecutionInfo':
return cls(
execution_id=execution_invocation.execution_id,
input_dict=data_types_utils.build_artifact_dict(
execution_invocation.input_dict),
output_dict=data_types_utils.build_artifact_dict(
execution_invocation.output_dict),
exec_properties=data_types_utils.build_value_dict(
execution_invocation.execution_properties),
execution_output_uri=execution_invocation.output_metadata_uri,
stateful_working_dir=execution_invocation.stateful_working_dir,
tmp_dir=execution_invocation.tmp_dir,
pipeline_node=execution_invocation.pipeline_node,
pipeline_info=execution_invocation.pipeline_info,
pipeline_run_id=execution_invocation.pipeline_run_id) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/portable/data_types.py | 0.657318 | 0.325105 | data_types.py | pypi |
"""This module defines the handler for importer node."""
from typing import Any, Dict
from absl import logging
from tfx import types
from tfx.dsl.components.common import importer
from tfx.orchestration import metadata
from tfx.orchestration.portable import execution_publish_utils
from tfx.orchestration.portable import inputs_utils
from tfx.orchestration.portable import system_node_handler
from tfx.orchestration.portable.mlmd import context_lib
from tfx.proto.orchestration import pipeline_pb2
from ml_metadata.proto import metadata_store_pb2
class ImporterNodeHandler(system_node_handler.SystemNodeHandler):
"""The handler for the system Importer node."""
def _extract_proto_map(
self,
# The actual type of proto message of map<str, pipeline_pb2.Value>.
proto_map: Any
) -> Dict[str, Any]:
extract_mlmd_value = lambda v: getattr(v, v.WhichOneof('value'))
return {k: extract_mlmd_value(v.field_value) for k, v in proto_map.items()}
def run(
self, mlmd_connection: metadata.Metadata,
pipeline_node: pipeline_pb2.PipelineNode,
pipeline_info: pipeline_pb2.PipelineInfo,
pipeline_runtime_spec: pipeline_pb2.PipelineRuntimeSpec
) -> metadata_store_pb2.Execution:
"""Runs Importer specific logic.
Args:
mlmd_connection: ML metadata connection.
pipeline_node: The specification of the node that this launcher lauches.
pipeline_info: The information of the pipeline that this node runs in.
pipeline_runtime_spec: The runtime information of the pipeline that this
node runs in.
Returns:
The execution of the run.
"""
logging.info('Running as an importer node.')
with mlmd_connection as m:
# 1.Prepares all contexts.
contexts = context_lib.prepare_contexts(
metadata_handler=m, node_contexts=pipeline_node.contexts)
# 2. Resolves execution properties, please note that importers has no
# input.
exec_properties = inputs_utils.resolve_parameters(
node_parameters=pipeline_node.parameters)
# 3. Registers execution in metadata.
execution = execution_publish_utils.register_execution(
metadata_handler=m,
execution_type=pipeline_node.node_info.type,
contexts=contexts,
exec_properties=exec_properties)
# 4. Generate output artifacts to represent the imported artifacts.
output_spec = pipeline_node.outputs.outputs[importer.IMPORT_RESULT_KEY]
properties = self._extract_proto_map(
output_spec.artifact_spec.additional_properties)
custom_properties = self._extract_proto_map(
output_spec.artifact_spec.additional_custom_properties)
output_artifact_class = types.Artifact(
output_spec.artifact_spec.type).type
output_artifacts = importer.generate_output_dict(
metadata_handler=m,
uri=str(exec_properties[importer.SOURCE_URI_KEY]),
properties=properties,
custom_properties=custom_properties,
reimport=bool(exec_properties[importer.REIMPORT_OPTION_KEY]),
output_artifact_class=output_artifact_class,
mlmd_artifact_type=output_spec.artifact_spec.type)
# 5. Publish the output artifacts.
execution_publish_utils.publish_succeeded_execution(
metadata_handler=m,
execution_id=execution.id,
contexts=contexts,
output_artifacts=output_artifacts)
return execution | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/portable/importer_node_handler.py | 0.946337 | 0.286325 | importer_node_handler.py | pypi |
"""This module defines README.ml-pipelines-sdk.md generic Launcher for all TFleX nodes."""
from typing import Any, Dict, List, Optional, Text, Type, TypeVar
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.orchestration import metadata
from tfx.orchestration.portable import base_driver_operator
from tfx.orchestration.portable import base_executor_operator
from tfx.orchestration.portable import cache_utils
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import docker_executor_operator
from tfx.orchestration.portable import execution_publish_utils
from tfx.orchestration.portable import importer_node_handler
from tfx.orchestration.portable import inputs_utils
from tfx.orchestration.portable import outputs_utils
from tfx.orchestration.portable import python_driver_operator
from tfx.orchestration.portable import python_executor_operator
from tfx.orchestration.portable import resolver_node_handler
from tfx.orchestration.portable.mlmd import context_lib
from tfx.proto.orchestration import driver_output_pb2
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import execution_result_pb2
from tfx.proto.orchestration import pipeline_pb2
from google.protobuf import message
from ml_metadata.proto import metadata_store_pb2
# Subclasses of BaseExecutorOperator
ExecutorOperator = TypeVar(
'ExecutorOperator', bound=base_executor_operator.BaseExecutorOperator)
# Subclasses of BaseDriverOperator
DriverOperator = TypeVar(
'DriverOperator', bound=base_driver_operator.BaseDriverOperator)
DEFAULT_EXECUTOR_OPERATORS = {
executable_spec_pb2.PythonClassExecutableSpec:
python_executor_operator.PythonExecutorOperator,
executable_spec_pb2.ContainerExecutableSpec:
docker_executor_operator.DockerExecutorOperator
}
DEFAULT_DRIVER_OPERATORS = {
executable_spec_pb2.PythonClassExecutableSpec:
python_driver_operator.PythonDriverOperator
}
# LINT.IfChange
_SYSTEM_NODE_HANDLERS = {
'tfx.dsl.components.common.importer.Importer':
importer_node_handler.ImporterNodeHandler,
'tfx.dsl.components.common.resolver.Resolver':
resolver_node_handler.ResolverNodeHandler,
# TODO(b/177457236): Remove support for the following after release.
'tfx.dsl.components.common.importer_node.ImporterNode':
importer_node_handler.ImporterNodeHandler,
'tfx.dsl.components.common.resolver_node.ResolverNode':
resolver_node_handler.ResolverNodeHandler,
}
# LINT.ThenChange(Internal system node list)
# TODO(b/165359991): Restore 'auto_attribs=True' once we drop Python3.5 support.
@attr.s
class _PrepareExecutionResult:
"""A wrapper class using as the return value of _prepare_execution()."""
# The information used by executor operators.
execution_info = attr.ib(type=data_types.ExecutionInfo, default=None)
# The Execution registered in MLMD.
execution_metadata = attr.ib(type=metadata_store_pb2.Execution, default=None)
# Contexts of the execution, usually used by Publisher.
contexts = attr.ib(type=List[metadata_store_pb2.Context], default=None)
# TODO(b/156126088): Update the following documentation when this bug is
# closed.
# Whether an execution is needed. An execution is not needed when:
# 1) Not all the required input are ready.
# 2) The input value doesn't meet the driver's requirement.
# 3) Cache result is used.
is_execution_needed = attr.ib(type=bool, default=False)
class _ExecutionFailedError(Exception):
"""An internal error to carry ExecutorOutput when it is raised."""
def __init__(self, err_msg: str,
executor_output: execution_result_pb2.ExecutorOutput):
super(_ExecutionFailedError, self).__init__(err_msg)
self._executor_output = executor_output
@property
def executor_output(self):
return self._executor_output
class Launcher(object):
"""Launcher is the main entrance of nodes in TFleX.
It handles TFX internal details like artifact resolving, execution
triggering and result publishing.
"""
def __init__(
self,
pipeline_node: pipeline_pb2.PipelineNode,
mlmd_connection: metadata.Metadata,
pipeline_info: pipeline_pb2.PipelineInfo,
pipeline_runtime_spec: pipeline_pb2.PipelineRuntimeSpec,
executor_spec: Optional[message.Message] = None,
custom_driver_spec: Optional[message.Message] = None,
platform_config: Optional[message.Message] = None,
custom_executor_operators: Optional[Dict[Any,
Type[ExecutorOperator]]] = None,
custom_driver_operators: Optional[Dict[Any,
Type[DriverOperator]]] = None):
"""Initializes README.ml-pipelines-sdk.md Launcher.
Args:
pipeline_node: The specification of the node that this launcher lauches.
mlmd_connection: ML metadata connection.
pipeline_info: The information of the pipeline that this node runs in.
pipeline_runtime_spec: The runtime information of the pipeline that this
node runs in.
executor_spec: Specification for the executor of the node. This is
expected for all components nodes. This will be used to determine the
specific ExecutorOperator class to be used to execute and will be passed
into ExecutorOperator.
custom_driver_spec: Specification for custom driver. This is expected only
for advanced use cases.
platform_config: Platform config that will be used as auxiliary info of
the node execution. This will be passed to ExecutorOperator along with
the `executor_spec`.
custom_executor_operators: README.ml-pipelines-sdk.md map of ExecutableSpec to its
ExecutorOperation implementation.
custom_driver_operators: README.ml-pipelines-sdk.md map of ExecutableSpec to its DriverOperator
implementation.
Raises:
ValueError: when component and component_config are not launchable by the
launcher.
"""
self._pipeline_node = pipeline_node
self._mlmd_connection = mlmd_connection
self._pipeline_info = pipeline_info
self._pipeline_runtime_spec = pipeline_runtime_spec
self._executor_spec = executor_spec
self._executor_operators = {}
self._executor_operators.update(DEFAULT_EXECUTOR_OPERATORS)
self._executor_operators.update(custom_executor_operators or {})
self._driver_operators = {}
self._driver_operators.update(DEFAULT_DRIVER_OPERATORS)
self._driver_operators.update(custom_driver_operators or {})
self._executor_operator = None
if executor_spec:
self._executor_operator = self._executor_operators[type(executor_spec)](
executor_spec, platform_config)
self._output_resolver = outputs_utils.OutputsResolver(
pipeline_node=self._pipeline_node,
pipeline_info=self._pipeline_info,
pipeline_runtime_spec=self._pipeline_runtime_spec)
self._driver_operator = None
if custom_driver_spec:
self._driver_operator = self._driver_operators[type(custom_driver_spec)](
custom_driver_spec, self._mlmd_connection)
system_node_handler_class = _SYSTEM_NODE_HANDLERS.get(
self._pipeline_node.node_info.type.name)
self._system_node_handler = None
if system_node_handler_class:
self._system_node_handler = system_node_handler_class()
assert bool(self._executor_operator) or bool(self._system_node_handler), \
'A node must be system node or have an executor.'
def _prepare_execution(self) -> _PrepareExecutionResult:
"""Prepares inputs, outputs and execution properties for actual execution."""
# TODO(b/150979622): handle the edge case that the component get evicted
# between successful pushlish and stateful working dir being clean up.
# Otherwise following retries will keep failing because of duplicate
# publishes.
with self._mlmd_connection as m:
# 1.Prepares all contexts.
contexts = context_lib.prepare_contexts(
metadata_handler=m, node_contexts=self._pipeline_node.contexts)
# 2. Resolves inputs an execution properties.
exec_properties = inputs_utils.resolve_parameters(
node_parameters=self._pipeline_node.parameters)
input_artifacts = inputs_utils.resolve_input_artifacts(
metadata_handler=m, node_inputs=self._pipeline_node.inputs)
# 3. If not all required inputs are met. Return ExecutionInfo with
# is_execution_needed being false. No publish will happen so down stream
# nodes won't be triggered.
if input_artifacts is None:
logging.info('No all required input are ready, abandoning execution.')
return _PrepareExecutionResult(
execution_info=data_types.ExecutionInfo(),
contexts=contexts,
is_execution_needed=False)
# 4. Registers execution in metadata.
execution = execution_publish_utils.register_execution(
metadata_handler=m,
execution_type=self._pipeline_node.node_info.type,
contexts=contexts,
input_artifacts=input_artifacts,
exec_properties=exec_properties)
# 5. Resolve output
output_artifacts = self._output_resolver.generate_output_artifacts(
execution.id)
# If there is README.ml-pipelines-sdk.md custom driver, runs it.
if self._driver_operator:
driver_output = self._driver_operator.run_driver(
data_types.ExecutionInfo(
input_dict=input_artifacts,
output_dict=output_artifacts,
exec_properties=exec_properties,
execution_output_uri=self._output_resolver.get_driver_output_uri(
)))
self._update_with_driver_output(driver_output, exec_properties,
output_artifacts)
# We reconnect to MLMD here because the custom driver closes MLMD connection
# on returning.
with self._mlmd_connection as m:
# 6. Check cached result
cache_context = cache_utils.get_cache_context(
metadata_handler=m,
pipeline_node=self._pipeline_node,
pipeline_info=self._pipeline_info,
executor_spec=self._executor_spec,
input_artifacts=input_artifacts,
output_artifacts=output_artifacts,
parameters=exec_properties)
contexts.append(cache_context)
cached_outputs = cache_utils.get_cached_outputs(
metadata_handler=m, cache_context=cache_context)
# 7. Should cache be used?
if (self._pipeline_node.execution_options.caching_options.enable_cache and
cached_outputs):
# Publishes cache result
execution_publish_utils.publish_cached_execution(
metadata_handler=m,
contexts=contexts,
execution_id=execution.id,
output_artifacts=cached_outputs)
logging.info('An cached execusion %d is used.', execution.id)
return _PrepareExecutionResult(
execution_info=data_types.ExecutionInfo(execution_id=execution.id),
execution_metadata=execution,
contexts=contexts,
is_execution_needed=False)
pipeline_run_id = (
self._pipeline_runtime_spec.pipeline_run_id.field_value.string_value)
# 8. Going to trigger executor.
logging.info('Going to run README.ml-pipelines-sdk.md new execution %d', execution.id)
return _PrepareExecutionResult(
execution_info=data_types.ExecutionInfo(
execution_id=execution.id,
input_dict=input_artifacts,
output_dict=output_artifacts,
exec_properties=exec_properties,
execution_output_uri=self._output_resolver
.get_executor_output_uri(execution.id),
stateful_working_dir=(
self._output_resolver.get_stateful_working_directory()),
tmp_dir=self._output_resolver.make_tmp_dir(execution.id),
pipeline_node=self._pipeline_node,
pipeline_info=self._pipeline_info,
pipeline_run_id=pipeline_run_id),
execution_metadata=execution,
contexts=contexts,
is_execution_needed=True)
def _run_executor(
self, execution_info: data_types.ExecutionInfo
) -> execution_result_pb2.ExecutorOutput:
"""Executes underlying component implementation."""
logging.info('Going to run README.ml-pipelines-sdk.md new execution: %s', execution_info)
outputs_utils.make_output_dirs(execution_info.output_dict)
try:
executor_output = self._executor_operator.run_executor(execution_info)
code = executor_output.execution_result.code
if code != 0:
result_message = executor_output.execution_result.result_message
err = (f'Execution {execution_info.execution_id} '
f'failed with error code {code} and '
f'error message {result_message}')
logging.error(err)
raise _ExecutionFailedError(err, executor_output)
return executor_output
except Exception: # pylint: disable=broad-except
outputs_utils.remove_output_dirs(execution_info.output_dict)
raise
def _publish_successful_execution(
self, execution_id: int, contexts: List[metadata_store_pb2.Context],
output_dict: Dict[Text, List[types.Artifact]],
executor_output: execution_result_pb2.ExecutorOutput) -> None:
"""Publishes succeeded execution result to ml metadata."""
with self._mlmd_connection as m:
execution_publish_utils.publish_succeeded_execution(
metadata_handler=m,
execution_id=execution_id,
contexts=contexts,
output_artifacts=output_dict,
executor_output=executor_output)
def _publish_failed_execution(
self,
execution_id: int,
contexts: List[metadata_store_pb2.Context],
executor_output: Optional[execution_result_pb2.ExecutorOutput] = None
) -> None:
"""Publishes failed execution to ml metadata."""
with self._mlmd_connection as m:
execution_publish_utils.publish_failed_execution(
metadata_handler=m,
execution_id=execution_id,
contexts=contexts,
executor_output=executor_output)
def _clean_up_stateless_execution_info(
self, execution_info: data_types.ExecutionInfo):
logging.info('Cleaning up stateless execution info.')
# Clean up tmp dir
fileio.rmtree(execution_info.tmp_dir)
def _clean_up_stateful_execution_info(
self, execution_info: data_types.ExecutionInfo):
"""Post execution clean up."""
logging.info('Cleaning up stateful execution info.')
outputs_utils.remove_stateful_working_dir(
execution_info.stateful_working_dir)
def _update_with_driver_output(self,
driver_output: driver_output_pb2.DriverOutput,
exec_properties: Dict[Text, Any],
output_dict: Dict[Text, List[types.Artifact]]):
"""Updates output_dict with driver output."""
for key, artifact_list in driver_output.output_artifacts.items():
python_artifact_list = []
# We assume the origial output dict must include at least one output
# artifact and all output artifact shared the same type.
artifact_type = output_dict[key][0].artifact_type
for proto_artifact in artifact_list.artifacts:
python_artifact = types.Artifact(artifact_type)
python_artifact.set_mlmd_artifact(proto_artifact)
python_artifact_list.append(python_artifact)
output_dict[key] = python_artifact_list
for key, value in driver_output.exec_properties.items():
exec_properties[key] = getattr(value, value.WhichOneof('value'))
def launch(self) -> Optional[metadata_store_pb2.Execution]:
"""Executes the component, includes driver, executor and publisher.
Returns:
The metadata of this execution that is registered in MLMD. It can be None
if the driver decides not to run the execution.
Raises:
Exception: If the executor fails.
"""
logging.info('Running launcher for %s', self._pipeline_node)
if self._system_node_handler:
# If this is README.ml-pipelines-sdk.md system node, runs it and directly return.
return self._system_node_handler.run(self._mlmd_connection,
self._pipeline_node,
self._pipeline_info,
self._pipeline_runtime_spec)
# Runs as README.ml-pipelines-sdk.md normal node.
prepare_execution_result = self._prepare_execution()
(execution_info, contexts,
is_execution_needed) = (prepare_execution_result.execution_info,
prepare_execution_result.contexts,
prepare_execution_result.is_execution_needed)
if is_execution_needed:
try:
executor_output = self._run_executor(execution_info)
except Exception as e: # pylint: disable=broad-except
execution_output = (
e.executor_output if isinstance(e, _ExecutionFailedError) else None)
self._publish_failed_execution(execution_info.execution_id, contexts,
execution_output)
logging.error('Execution %d failed.', execution_info.execution_id)
raise
finally:
self._clean_up_stateless_execution_info(execution_info)
logging.info('Execution %d succeeded.', execution_info.execution_id)
self._clean_up_stateful_execution_info(execution_info)
logging.info('Publishing output artifacts %s for exeuction %s',
execution_info.output_dict, execution_info.execution_id)
self._publish_successful_execution(execution_info.execution_id, contexts,
execution_info.output_dict,
executor_output)
return prepare_execution_result.execution_metadata | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/portable/launcher.py | 0.727007 | 0.30013 | launcher.py | pypi |
"""Portable libraries for event related APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Text, Tuple
from ml_metadata.proto import metadata_store_pb2
_VALID_OUTPUT_EVENT_TYPES = frozenset([
metadata_store_pb2.Event.OUTPUT, metadata_store_pb2.Event.INTERNAL_OUTPUT,
metadata_store_pb2.Event.DECLARED_OUTPUT
])
_VALID_INPUT_EVENT_TYPES = frozenset([
metadata_store_pb2.Event.INPUT, metadata_store_pb2.Event.INTERNAL_INPUT,
metadata_store_pb2.Event.DECLARED_INPUT
])
def is_valid_output_event(event: metadata_store_pb2.Event,
expected_output_key: Optional[Text] = None) -> bool:
"""Evaluates whether an event is an output event with the right output key.
Args:
event: The event to evaluate.
expected_output_key: The expected output key.
Returns:
A bool value indicating result
"""
if expected_output_key:
return (len(event.path.steps) == 2 and # Valid event should have 2 steps.
event.path.steps[0].key == expected_output_key and
event.type in _VALID_OUTPUT_EVENT_TYPES)
else:
return event.type in _VALID_OUTPUT_EVENT_TYPES
def is_valid_input_event(event: metadata_store_pb2.Event,
expected_input_key: Optional[Text] = None) -> bool:
"""Evaluates whether an event is an input event with the right input key.
Args:
event: The event to evaluate.
expected_input_key: The expected input key.
Returns:
A bool value indicating result
"""
if expected_input_key:
return (len(event.path.steps) == 2 and # Valid event should have 2 steps.
event.path.steps[0].key == expected_input_key and
event.type in _VALID_INPUT_EVENT_TYPES)
else:
return event.type in _VALID_INPUT_EVENT_TYPES
def generate_event(
event_type: metadata_store_pb2.Event.Type,
key: Text,
index: int,
artifact_id: Optional[int] = None,
execution_id: Optional[int] = None) -> metadata_store_pb2.Event:
"""Generates README.ml-pipelines-sdk.md MLMD event given type, key and index.
Args:
event_type: The type of the event. e.g., INPUT, OUTPUT, etc.
key: The key of the input or output channel. Usually README.ml-pipelines-sdk.md key can uniquely
identify README.ml-pipelines-sdk.md channel of README.ml-pipelines-sdk.md TFX node.
index: The index of the artifact in README.ml-pipelines-sdk.md channel. For example, README.ml-pipelines-sdk.md trainer might
take more than one Example artifacts in one of its input channels. We need
to distinguish each artifact when creating events.
artifact_id: Optional artifact id for the event.
execution_id: Optional execution id for the event.
Returns:
A metadata_store_pb2.Event message.
"""
event = metadata_store_pb2.Event()
event.type = event_type
# The order matters, we always use the first step to store key and the second
# step to store index.
event.path.steps.add().key = key
event.path.steps.add().index = index
if artifact_id:
event.artifact_id = artifact_id
if execution_id:
event.execution_id = execution_id
return event
def get_artifact_path(event: metadata_store_pb2.Event) -> Tuple[Text, int]:
"""Gets the artifact path from the event.
This is useful for reconstructing the artifact dict (mapping from key to an
ordered list of artifacts) for an execution. The key and index of an artifact
are expected to be stored in the event in two steps where the first step is
the key and second is the index of the artifact within the list.
Args:
event: The event from which to extract path to the artifact.
Returns:
A tuple (<artifact key>, <artifact index>).
Raises:
ValueError: If there are not exactly 2 steps in the path corresponding to
the key and index of the artifact.
"""
if len(event.path.steps) != 2:
raise ValueError(
'Expected exactly two steps corresponding to key and index in event: {}'
.format(event))
return (event.path.steps[0].key, event.path.steps[1].index) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/portable/mlmd/event_lib.py | 0.956624 | 0.519887 | event_lib.py | pypi |
"""Portable libraries for context related APIs."""
from typing import List, Text
from absl import logging
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration.portable.mlmd import common_utils
from tfx.proto.orchestration import pipeline_pb2
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
CONTEXT_TYPE_EXECUTION_CACHE = 'execution_cache'
def _generate_context_proto(
metadata_handler: metadata.Metadata,
context_spec: pipeline_pb2.ContextSpec) -> metadata_store_pb2.Context:
"""Generates metadata_pb2.Context based on the ContextSpec message.
Args:
metadata_handler: A handler to access MLMD store.
context_spec: A pipeline_pb2.ContextSpec message that instructs registering
of README.ml-pipelines-sdk.md context.
Returns:
A metadata_store_pb2.Context message.
Raises:
RuntimeError: When actual property type does not match provided metadata
type schema.
"""
context_type = common_utils.register_type_if_not_exist(
metadata_handler, context_spec.type)
context_name = data_types_utils.get_value(context_spec.name)
assert isinstance(context_name, Text), 'context name should be string.'
result = metadata_store_pb2.Context(
type_id=context_type.id, name=context_name)
for k, v in context_spec.properties.items():
if k in context_type.properties:
actual_property_type = data_types_utils.get_metadata_value_type(v)
if context_type.properties.get(k) == actual_property_type:
data_types_utils.set_metadata_value(result.properties[k], v)
else:
raise RuntimeError(
'Property type %s different from provided metadata type property type %s for key %s'
% (actual_property_type, context_type.properties.get(k), k))
else:
data_types_utils.set_metadata_value(result.custom_properties[k], v)
return result
def _register_context_if_not_exist(
metadata_handler: metadata.Metadata,
context_spec: pipeline_pb2.ContextSpec,
) -> metadata_store_pb2.Context:
"""Registers README.ml-pipelines-sdk.md context if not exist, otherwise returns the existing one.
Args:
metadata_handler: A handler to access MLMD store.
context_spec: A pipeline_pb2.ContextSpec message that instructs registering
of README.ml-pipelines-sdk.md context.
Returns:
An MLMD context.
"""
context_type_name = context_spec.type.name
context_name = data_types_utils.get_value(context_spec.name)
context = metadata_handler.store.get_context_by_type_and_name(
type_name=context_type_name, context_name=context_name)
if context is not None:
return context
logging.debug('Failed to get context of type %s and name %s',
context_type_name, context_name)
# If Context is not found, try to register it.
context = _generate_context_proto(
metadata_handler=metadata_handler, context_spec=context_spec)
try:
[context_id] = metadata_handler.store.put_contexts([context])
context.id = context_id
# This might happen in cases we have parallel executions of nodes.
except mlmd.errors.AlreadyExistsError:
logging.debug('Context %s already exists.', context_name)
context = metadata_handler.store.get_context_by_type_and_name(
type_name=context_type_name, context_name=context_name)
assert context is not None, ('Context is missing for %s while put_contexts '
'reports that it existed.') % (
context_name)
logging.debug('ID of context %s is %s.', context_spec, context.id)
return context
def register_context_if_not_exists(
metadata_handler: metadata.Metadata,
context_type_name: Text,
context_name: Text,
) -> metadata_store_pb2.Context:
"""Registers README.ml-pipelines-sdk.md context if not exist, otherwise returns the existing one.
This is README.ml-pipelines-sdk.md simplified wrapper around the method above which only takes context
type and context name.
Args:
metadata_handler: A handler to access MLMD store.
context_type_name: The name of the context type.
context_name: The name of the context.
Returns:
An MLMD context.
"""
context_spec = pipeline_pb2.ContextSpec(
name=pipeline_pb2.Value(
field_value=metadata_store_pb2.Value(string_value=context_name)),
type=metadata_store_pb2.ContextType(name=context_type_name))
return _register_context_if_not_exist(
metadata_handler=metadata_handler, context_spec=context_spec)
def prepare_contexts(
metadata_handler: metadata.Metadata,
node_contexts: pipeline_pb2.NodeContexts,
) -> List[metadata_store_pb2.Context]:
"""Creates the contexts given specification.
Context types will be registered if not already exist.
Args:
metadata_handler: A handler to access MLMD store.
node_contexts: A pipeline_pb2.NodeContext message that instructs registering
of the contexts.
Returns:
A list of metadata_store_pb2.Context messages.
"""
return [
_generate_context_proto(
metadata_handler=metadata_handler, context_spec=context_spec)
for context_spec in node_contexts.contexts
] | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/portable/mlmd/context_lib.py | 0.888484 | 0.33444 | context_lib.py | pypi |
"""Common MLMD utility libraries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import TypeVar
from absl import logging
from tfx.orchestration import metadata
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
MetadataType = TypeVar('MetadataType', metadata_store_pb2.ArtifactType,
metadata_store_pb2.ContextType,
metadata_store_pb2.ExecutionType)
def register_type_if_not_exist(
metadata_handler: metadata.Metadata,
metadata_type: MetadataType,
) -> MetadataType:
"""Registers README.ml-pipelines-sdk.md metadata type if not exists.
Uses existing type if schema is superset of what is needed. Otherwise tries
to register new metadata type.
Args:
metadata_handler: A handler to access MLMD store.
metadata_type: The metadata type to register if does not exist.
Returns:
A MetadataType with id
Raises:
RuntimeError: If new metadata type conflicts with existing schema in MLMD.
ValueError: If metadata type is not expected.
"""
if metadata_type.id:
return metadata_type
if isinstance(metadata_type, metadata_store_pb2.ArtifactType):
get_type_handler = metadata_handler.store.get_artifact_type
put_type_handler = metadata_handler.store.put_artifact_type
elif isinstance(metadata_type, metadata_store_pb2.ContextType):
get_type_handler = metadata_handler.store.get_context_type
put_type_handler = metadata_handler.store.put_context_type
elif isinstance(metadata_type, metadata_store_pb2.ExecutionType):
get_type_handler = metadata_handler.store.get_execution_type
put_type_handler = metadata_handler.store.put_execution_type
else:
raise ValueError('Unexpected value type: %s.' % type(metadata_type))
try:
# Types can be evolved by adding new fields in newer releases.
# Here when upserting types:
# README.ml-pipelines-sdk.md) we enable `can_add_fields` so that type updates made in the current
# release are backward compatible with older release;
# b) we enable `can_omit_fields` so that the current release is forward
# compatible with any type updates made by future release.
type_id = put_type_handler(
metadata_type, can_add_fields=True, can_omit_fields=True)
logging.debug('Registering README.ml-pipelines-sdk.md metadata type with id %s.', type_id)
metadata_type = get_type_handler(metadata_type.name)
return metadata_type
except mlmd.errors.AlreadyExistsError:
existing_type = get_type_handler(metadata_type.name)
assert existing_type is not None, (
'Not expected to get None when getting type %s.' % metadata_type.name)
warning_str = (
'Conflicting properties comparing with existing metadata type '
'with the same type name. Existing type: '
'%s, New type: %s') % (existing_type, metadata_type)
logging.warning(warning_str)
raise RuntimeError(warning_str) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/portable/mlmd/common_utils.py | 0.914324 | 0.194559 | common_utils.py | pypi |
r"""This module defines the entrypoint for the PythonExecutorOperator in TFX.
This library is intended to serve as the entrypoint for README.ml-pipelines-sdk.md binary that packages
the python executors in README.ml-pipelines-sdk.md pipeline. The resulting binary is called by the TFX
launcher and should not be called directly.
"""
from absl import flags
from absl import logging
from tfx.dsl.io import fileio
from tfx.orchestration import metadata
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import python_driver_operator
from tfx.orchestration.portable import python_executor_operator
from tfx.orchestration.python_execution_binary import python_execution_binary_utils
from tfx.proto.orchestration import driver_output_pb2
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import execution_result_pb2
from google.protobuf import text_format
FLAGS = flags.FLAGS
EXECUTION_INVOCATION_FLAG = flags.DEFINE_string(
'tfx_execution_info_b64', None, 'url safe base64 encoded binary '
'tfx.orchestration.ExecutionInvocation proto')
EXECUTABLE_SPEC_FLAG = flags.DEFINE_string(
'tfx_python_class_executable_spec_b64', None,
'tfx.orchestration.executable_spec.PythonClassExecutableSpec proto')
MLMD_CONNECTION_CONFIG_FLAG = flags.DEFINE_string(
'tfx_mlmd_connection_config_b64', None,
'wrapper proto containing MLMD connection config. If being set, this'
'indicates README.ml-pipelines-sdk.md driver execution')
def _run_executor(
executable_spec: executable_spec_pb2.PythonClassExecutableSpec,
execution_info: data_types.ExecutionInfo
) -> execution_result_pb2.ExecutorOutput:
operator = python_executor_operator.PythonExecutorOperator(executable_spec)
return operator.run_executor(execution_info)
def _run_driver(
executable_spec: executable_spec_pb2.PythonClassExecutableSpec,
mlmd_connection_config: metadata.ConnectionConfigType,
execution_info: data_types.ExecutionInfo) -> driver_output_pb2.DriverOutput:
operator = python_driver_operator.PythonDriverOperator(
executable_spec, metadata.Metadata(mlmd_connection_config))
return operator.run_driver(execution_info)
def main(_):
flags.mark_flag_as_required(EXECUTION_INVOCATION_FLAG.name)
flags.mark_flag_as_required(EXECUTABLE_SPEC_FLAG.name)
execution_info = python_execution_binary_utils.deserialize_execution_info(
EXECUTION_INVOCATION_FLAG.value)
python_class_executable_spec = (
python_execution_binary_utils.deserialize_executable_spec(
EXECUTABLE_SPEC_FLAG.value))
logging.info('execution_info = %r\n', execution_info)
logging.info('python_class_executable_spec = %s\n',
text_format.MessageToString(python_class_executable_spec))
# MLMD connection config being set indicates README.ml-pipelines-sdk.md driver execution instead of an
# executor execution as accessing MLMD is not supported for executors.
if MLMD_CONNECTION_CONFIG_FLAG.value:
mlmd_connection_config = (
python_execution_binary_utils.deserialize_mlmd_connection_config(
MLMD_CONNECTION_CONFIG_FLAG.value))
run_result = _run_driver(python_class_executable_spec,
mlmd_connection_config, execution_info)
else:
run_result = _run_executor(python_class_executable_spec, execution_info)
if run_result:
with fileio.open(execution_info.execution_output_uri, 'wb') as f:
f.write(run_result.SerializeToString()) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/python_execution_binary/entrypoint.py | 0.702224 | 0.441191 | entrypoint.py | pypi |
r"""Shared IR serialization logic used by TFleX python executor binary."""
import base64
from tfx.orchestration import metadata
from tfx.orchestration.portable import data_types
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import execution_invocation_pb2
def deserialize_execution_info(
execution_info_b64: str) -> data_types.ExecutionInfo:
"""De-serializes the ExecutionInfo class from README.ml-pipelines-sdk.md url safe base64 encoded binary string."""
execution_info_proto = execution_invocation_pb2.ExecutionInvocation.FromString(
base64.urlsafe_b64decode(execution_info_b64))
return data_types.ExecutionInfo.from_proto(execution_info_proto)
def deserialize_mlmd_connection_config(
mlmd_connection_config_b64: str) -> metadata.ConnectionConfigType:
"""De-serializes an MLMD connection config from base64 flag."""
mlmd_connection_config = (
execution_invocation_pb2.MLMDConnectionConfig.FromString(
base64.b64decode(mlmd_connection_config_b64)))
return getattr(mlmd_connection_config,
mlmd_connection_config.WhichOneof('connection_config'))
def deserialize_executable_spec(
executable_spec_b64: str) -> executable_spec_pb2.PythonClassExecutableSpec:
"""De-serializes an executable spec from base64 flag."""
return executable_spec_pb2.PythonClassExecutableSpec.FromString(
base64.b64decode(executable_spec_b64))
def serialize_mlmd_connection_config(
connection_config: metadata.ConnectionConfigType) -> str:
"""Serializes an MLMD connection config into README.ml-pipelines-sdk.md base64 flag of its wrapper."""
mlmd_wrapper = execution_invocation_pb2.MLMDConnectionConfig()
for name, descriptor in (execution_invocation_pb2.MLMDConnectionConfig
.DESCRIPTOR.fields_by_name.items()):
if descriptor.message_type.full_name == connection_config.DESCRIPTOR.full_name:
getattr(mlmd_wrapper, name).CopyFrom(connection_config)
break
return base64.b64encode(mlmd_wrapper.SerializeToString()).decode('ascii')
def serialize_executable_spec(
executable_spec: executable_spec_pb2.PythonClassExecutableSpec) -> str:
"""Serializes an executable spec into README.ml-pipelines-sdk.md base64 flag."""
return base64.b64encode(executable_spec.SerializeToString()).decode('ascii')
def serialize_execution_info(execution_info: data_types.ExecutionInfo) -> str:
"""Serializes the ExecutionInfo class from README.ml-pipelines-sdk.md base64 flag."""
execution_info_proto = execution_info.to_proto()
return base64.b64encode(
execution_info_proto.SerializeToString()).decode('ascii') | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/python_execution_binary/python_execution_binary_utils.py | 0.848753 | 0.351228 | python_execution_binary_utils.py | pypi |
"""Definition of Airflow TFX runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from typing import Any, Dict, Optional, Text, Union
import absl
from airflow import models
from tfx.orchestration import pipeline
from tfx.orchestration import tfx_runner
from tfx.orchestration.airflow import airflow_component
from tfx.orchestration.config import config_utils
from tfx.orchestration.config import pipeline_config
class AirflowPipelineConfig(pipeline_config.PipelineConfig):
"""Pipeline config for AirflowDagRunner."""
def __init__(self, airflow_dag_config: Dict[Text, Any] = None, **kwargs):
"""Creates an instance of AirflowPipelineConfig.
Args:
airflow_dag_config: Configs of Airflow DAG model. See
https://airflow.apache.org/_api/airflow/models/dag/index.html#airflow.models.dag.DAG
for the full spec.
**kwargs: keyword args for PipelineConfig.
"""
super(AirflowPipelineConfig, self).__init__(**kwargs)
self.airflow_dag_config = airflow_dag_config or {}
class AirflowDagRunner(tfx_runner.TfxRunner):
"""Tfx runner on Airflow."""
def __init__(self,
config: Optional[Union[Dict[Text, Any],
AirflowPipelineConfig]] = None):
"""Creates an instance of AirflowDagRunner.
Args:
config: Optional Airflow pipeline config for customizing the launching of
each component.
"""
if config and not isinstance(config, AirflowPipelineConfig):
absl.logging.warning(
'Pass config as README.ml-pipelines-sdk.md dict type is going to deprecated in 0.1.16. Use AirflowPipelineConfig type instead.',
PendingDeprecationWarning)
config = AirflowPipelineConfig(airflow_dag_config=config)
super(AirflowDagRunner, self).__init__(config)
def run(self, tfx_pipeline: pipeline.Pipeline):
"""Deploys given logical pipeline on Airflow.
Args:
tfx_pipeline: Logical pipeline containing pipeline args and components.
Returns:
An Airflow DAG.
"""
# Merge airflow-specific configs with pipeline args
airflow_dag = models.DAG(
dag_id=tfx_pipeline.pipeline_info.pipeline_name,
**self._config.airflow_dag_config)
if 'tmp_dir' not in tfx_pipeline.additional_pipeline_args:
tmp_dir = os.path.join(tfx_pipeline.pipeline_info.pipeline_root, '.temp',
'')
tfx_pipeline.additional_pipeline_args['tmp_dir'] = tmp_dir
component_impl_map = {}
for tfx_component in tfx_pipeline.components:
(component_launcher_class,
component_config) = config_utils.find_component_launch_info(
self._config, tfx_component)
current_airflow_component = airflow_component.AirflowComponent(
airflow_dag,
component=tfx_component,
component_launcher_class=component_launcher_class,
pipeline_info=tfx_pipeline.pipeline_info,
enable_cache=tfx_pipeline.enable_cache,
metadata_connection_config=tfx_pipeline.metadata_connection_config,
beam_pipeline_args=tfx_pipeline.beam_pipeline_args,
additional_pipeline_args=tfx_pipeline.additional_pipeline_args,
component_config=component_config)
component_impl_map[tfx_component] = current_airflow_component
for upstream_node in tfx_component.upstream_nodes:
assert upstream_node in component_impl_map, ('Components is not in '
'topological order')
current_airflow_component.set_upstream(
component_impl_map[upstream_node])
return airflow_dag | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/airflow/airflow_dag_runner.py | 0.856137 | 0.249013 | airflow_dag_runner.py | pypi |
"""Definition for Airflow component for TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
from typing import Any, Dict, List, Text, Type
from airflow import models
from airflow.operators import python_operator
from tfx.dsl.components.base import base_node
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.orchestration.config import base_component_config
from tfx.orchestration.launcher import base_component_launcher
from tfx.utils import telemetry_utils
from ml_metadata.proto import metadata_store_pb2
def _airflow_component_launcher(
component: base_node.BaseNode, component_launcher_class: Type[
base_component_launcher.BaseComponentLauncher],
pipeline_info: data_types.PipelineInfo, driver_args: data_types.DriverArgs,
metadata_connection_config: metadata_store_pb2.ConnectionConfig,
beam_pipeline_args: List[Text], additional_pipeline_args: Dict[Text, Any],
component_config: base_component_config.BaseComponentConfig,
**kwargs) -> None:
"""Helper function to launch TFX component execution.
This helper function will be called with Airflow env objects which contains
run_id that we need to pass into TFX ComponentLauncher.
Args:
component: TFX BaseComponent instance. This instance holds all inputs and
outputs placeholders as well as component properties.
component_launcher_class: The class of the launcher to launch the component.
pipeline_info: A data_types.PipelineInfo instance that holds pipeline
properties
driver_args: Component specific args for driver.
metadata_connection_config: Configuration for how to connect to metadata.
beam_pipeline_args: Pipeline arguments for Beam powered Components.
additional_pipeline_args: A dict of additional pipeline args.
component_config: Component config to launch the component.
**kwargs: Context arguments that will be passed in by Airflow, including:
- ti: TaskInstance object from which we can get run_id of the running
pipeline.
For more details, please refer to the code:
https://github.com/apache/airflow/blob/master/airflow/operators/python_operator.py
"""
# Populate run id from Airflow task instance.
pipeline_info.run_id = kwargs['ti'].get_dagrun().run_id
launcher = component_launcher_class.create(
component=component,
pipeline_info=pipeline_info,
driver_args=driver_args,
metadata_connection=metadata.Metadata(metadata_connection_config),
beam_pipeline_args=beam_pipeline_args,
additional_pipeline_args=additional_pipeline_args,
component_config=component_config)
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_RUNNER: 'airflow'}):
launcher.launch()
class AirflowComponent(python_operator.PythonOperator):
"""Airflow-specific TFX Component.
This class wrap README.ml-pipelines-sdk.md component run into its own PythonOperator in Airflow.
"""
def __init__(self, parent_dag: models.DAG, component: base_node.BaseNode,
component_launcher_class: Type[
base_component_launcher.BaseComponentLauncher],
pipeline_info: data_types.PipelineInfo, enable_cache: bool,
metadata_connection_config: metadata_store_pb2.ConnectionConfig,
beam_pipeline_args: List[Text],
additional_pipeline_args: Dict[Text, Any],
component_config: base_component_config.BaseComponentConfig):
"""Constructs an Airflow implementation of TFX component.
Args:
parent_dag: An AirflowPipeline instance as the pipeline DAG.
component: An instance of base_node.BaseNode that holds all
properties of README.ml-pipelines-sdk.md logical component.
component_launcher_class: The class of the launcher to launch the
component.
pipeline_info: An instance of data_types.PipelineInfo that holds pipeline
properties.
enable_cache: Whether or not cache is enabled for this component run.
metadata_connection_config: A config proto for metadata connection.
beam_pipeline_args: Pipeline arguments for Beam powered Components.
additional_pipeline_args: Additional pipeline args.
component_config: Component config to launch the component.
"""
# Prepare parameters to create TFX worker.
driver_args = data_types.DriverArgs(enable_cache=enable_cache)
super(AirflowComponent, self).__init__(
task_id=component.id,
provide_context=True,
python_callable=functools.partial(
_airflow_component_launcher,
component=component,
component_launcher_class=component_launcher_class,
pipeline_info=pipeline_info,
driver_args=driver_args,
metadata_connection_config=metadata_connection_config,
beam_pipeline_args=beam_pipeline_args,
additional_pipeline_args=additional_pipeline_args,
component_config=component_config),
dag=parent_dag) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/airflow/airflow_component.py | 0.916885 | 0.24907 | airflow_component.py | pypi |
"""Definition of Beam TFX runner."""
import datetime
import os
from typing import Any, Iterable, List, Optional, Text, Union
from absl import logging
import apache_beam as beam
from tfx.dsl.compiler import compiler
from tfx.dsl.compiler import constants
from tfx.orchestration import metadata
from tfx.orchestration import pipeline as pipeline_py
from tfx.orchestration.beam.legacy import beam_dag_runner as legacy_beam_dag_runner
from tfx.orchestration.config import pipeline_config
from tfx.orchestration.local import runner_utils
from tfx.orchestration.portable import launcher
from tfx.orchestration.portable import runtime_parameter_utils
from tfx.orchestration.portable import tfx_runner
from tfx.proto.orchestration import local_deployment_config_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import telemetry_utils
from google.protobuf import any_pb2
from google.protobuf import message
# TODO(jyzhao): confirm it's re-executable, add test case.
@beam.typehints.with_input_types(Any)
@beam.typehints.with_output_types(Any)
class PipelineNodeAsDoFn(beam.DoFn):
"""Wrap node as beam DoFn."""
def __init__(self, pipeline_node: pipeline_pb2.PipelineNode,
mlmd_connection_config: metadata.ConnectionConfigType,
pipeline_info: pipeline_pb2.PipelineInfo,
pipeline_runtime_spec: pipeline_pb2.PipelineRuntimeSpec,
executor_spec: Optional[message.Message],
custom_driver_spec: Optional[message.Message],
deployment_config: Optional[message.Message]):
"""Initializes the PipelineNodeAsDoFn.
Args:
pipeline_node: The specification of the node that this launcher lauches.
mlmd_connection_config: ML metadata connection config.
pipeline_info: The information of the pipeline that this node runs in.
pipeline_runtime_spec: The runtime information of the pipeline that this
node runs in.
executor_spec: Specification for the executor of the node. This is
expected for all nodes. This will be used to determine the specific
ExecutorOperator class to be used to execute and will be passed into
ExecutorOperator.
custom_driver_spec: Specification for custom driver. This is expected only
for advanced use cases.
deployment_config: Deployment Config for the pipeline.
"""
self._pipeline_node = pipeline_node
self._mlmd_connection_config = mlmd_connection_config
self._pipeline_info = pipeline_info
self._pipeline_runtime_spec = pipeline_runtime_spec
self._executor_spec = executor_spec
self._custom_driver_spec = custom_driver_spec
self._node_id = pipeline_node.node_info.id
self._deployment_config = deployment_config
def process(self, element: Any, *signals: Iterable[Any]) -> None:
"""Executes node based on signals.
Args:
element: README.ml-pipelines-sdk.md signal element to trigger the node.
*signals: side input signals indicate completeness of upstream nodes.
"""
for signal in signals:
assert not list(signal), 'Signal PCollection should be empty.'
logging.info('node %s is running.', self._node_id)
self._run_node()
logging.info('node %s is finished.', self._node_id)
def _run_node(self) -> None:
platform_config = self._extract_platform_config(self._deployment_config,
self._node_id)
launcher.Launcher(
pipeline_node=self._pipeline_node,
mlmd_connection=metadata.Metadata(self._mlmd_connection_config),
pipeline_info=self._pipeline_info,
pipeline_runtime_spec=self._pipeline_runtime_spec,
executor_spec=self._executor_spec,
platform_config=platform_config,
custom_driver_spec=self._custom_driver_spec).launch()
def _extract_platform_config(
self,
deployment_config: local_deployment_config_pb2.LocalDeploymentConfig,
node_id: str) -> Optional[message.Message]:
platform_config = deployment_config.node_level_platform_configs.get(node_id)
return (getattr(platform_config, platform_config.WhichOneof('config'))
if platform_config else None)
class BeamDagRunner(tfx_runner.TfxRunner):
"""Tfx runner on Beam."""
_PIPELINE_NODE_DO_FN_CLS = PipelineNodeAsDoFn
def __new__(
cls,
beam_orchestrator_args: Optional[List[Text]] = None,
config: Optional[pipeline_config.PipelineConfig] = None):
"""Initializes BeamDagRunner as README.ml-pipelines-sdk.md TFX orchestrator.
Create the legacy BeamDagRunner object if any of the legacy
`beam_orchestrator_args` or `config` arguments are passed. A migration
guide will be provided in README.ml-pipelines-sdk.md future TFX version for users of these arguments.
Args:
beam_orchestrator_args: Deprecated beam args for the beam orchestrator.
Note that this is different from the beam_pipeline_args within
additional_pipeline_args, which is for beam pipelines in components. If
this option is used, the legacy non-IR-based BeamDagRunner will be
constructed.
config: Deprecated optional pipeline config for customizing the launching
of each component. Defaults to pipeline config that supports
InProcessComponentLauncher and DockerComponentLauncher. If this option
is used, the legacy non-IR-based BeamDagRunner will be constructed.
Returns:
Legacy or IR-based BeamDagRunner object.
"""
if beam_orchestrator_args or config:
logging.info(
'Using the legacy BeamDagRunner since `beam_orchestrator_args` or '
'`config` argument was passed.')
return legacy_beam_dag_runner.BeamDagRunner(
beam_orchestrator_args=beam_orchestrator_args, config=config)
else:
return super(BeamDagRunner, cls).__new__(cls)
def _extract_platform_config(
self,
deployment_config: local_deployment_config_pb2.LocalDeploymentConfig,
node_id: str) -> Optional[message.Message]:
platform_config = deployment_config.node_level_platform_configs.get(node_id)
return (getattr(platform_config, platform_config.WhichOneof('config'))
if platform_config else None)
def _build_local_platform_config(
self, node_id: str,
spec: any_pb2.Any) -> local_deployment_config_pb2.LocalPlatformConfig:
"""Builds LocalPlatformConfig given the any proto from IntermediateDeploymentConfig."""
result = local_deployment_config_pb2.LocalPlatformConfig()
if spec.Is(result.docker_platform_config.DESCRIPTOR):
spec.Unpack(result.docker_platform_config)
else:
raise ValueError(
'Platform config of {} is expected to be of one of the '
'types of tfx.orchestration.deployment_config.LocalPlatformConfig.config '
'but got type {}'.format(node_id, spec.type_url))
return result
def _extract_deployment_config(
self, pipeline: pipeline_pb2.Pipeline
) -> local_deployment_config_pb2.LocalDeploymentConfig:
"""Extracts the proto.Any pipeline.deployment_config to LocalDeploymentConfig."""
return runner_utils.extract_local_deployment_config(pipeline)
def _extract_executor_spec(
self,
deployment_config: local_deployment_config_pb2.LocalDeploymentConfig,
node_id: str) -> Optional[message.Message]:
return runner_utils.extract_executor_spec(deployment_config, node_id)
def _extract_custom_driver_spec(
self,
deployment_config: local_deployment_config_pb2.LocalDeploymentConfig,
node_id: str
) -> Optional[message.Message]:
return runner_utils.extract_custom_driver_spec(deployment_config, node_id)
def _connection_config_from_deployment_config(self,
deployment_config: Any) -> Any:
return deployment_config.metadata_connection_config
def run(self, pipeline: Union[pipeline_pb2.Pipeline,
pipeline_py.Pipeline]) -> None:
"""Deploys given logical pipeline on Beam.
Args:
pipeline: Logical pipeline in IR format.
"""
# For CLI, while creating or updating pipeline, pipeline_args are extracted
# and hence we avoid deploying the pipeline.
if 'TFX_JSON_EXPORT_PIPELINE_ARGS_PATH' in os.environ:
return
if isinstance(pipeline, pipeline_py.Pipeline):
c = compiler.Compiler()
pipeline = c.compile(pipeline)
run_id = datetime.datetime.now().strftime('%Y%m%d-%H%M%S.%f')
# Substitute the runtime parameter to be README.ml-pipelines-sdk.md concrete run_id
runtime_parameter_utils.substitute_runtime_parameter(
pipeline, {
constants.PIPELINE_RUN_ID_PARAMETER_NAME: run_id,
})
deployment_config = self._extract_deployment_config(pipeline)
connection_config = self._connection_config_from_deployment_config(
deployment_config)
logging.info('Running pipeline:\n %s', pipeline)
logging.info('Using deployment config:\n %s', deployment_config)
logging.info('Using connection config:\n %s', connection_config)
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_RUNNER: 'beam'}):
with beam.Pipeline() as p:
# Uses for triggering the node DoFns.
root = p | 'CreateRoot' >> beam.Create([None])
# Stores mapping of node to its signal.
signal_map = {}
# pipeline.nodes are in topological order.
for node in pipeline.nodes:
# TODO(b/160882349): Support subpipeline
pipeline_node = node.pipeline_node
node_id = pipeline_node.node_info.id
executor_spec = self._extract_executor_spec(deployment_config,
node_id)
custom_driver_spec = self._extract_custom_driver_spec(
deployment_config, node_id)
# Signals from upstream nodes.
signals_to_wait = []
for upstream_node in pipeline_node.upstream_nodes:
assert upstream_node in signal_map, ('Nodes are not in '
'topological order')
signals_to_wait.append(signal_map[upstream_node])
logging.info('Node %s depends on %s.', node_id,
[s.producer.full_label for s in signals_to_wait])
# Each signal is an empty PCollection. AsIter ensures README.ml-pipelines-sdk.md node will
# be triggered after upstream nodes are finished.
signal_map[node_id] = (
root
| 'Run[%s]' % node_id >> beam.ParDo(
self._PIPELINE_NODE_DO_FN_CLS(
pipeline_node=pipeline_node,
mlmd_connection_config=connection_config,
pipeline_info=pipeline.pipeline_info,
pipeline_runtime_spec=pipeline.runtime_spec,
executor_spec=executor_spec,
custom_driver_spec=custom_driver_spec,
deployment_config=deployment_config),
*[beam.pvalue.AsIter(s) for s in signals_to_wait]))
logging.info('Node %s is scheduled.', node_id) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/beam/beam_dag_runner.py | 0.808219 | 0.296947 | beam_dag_runner.py | pypi |
"""Definition of Beam TFX runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from typing import Any, Iterable, List, Optional, Text, Type
import absl
import apache_beam as beam
from tfx.dsl.components.base import base_node
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration import tfx_runner
from tfx.orchestration.config import base_component_config
from tfx.orchestration.config import config_utils
from tfx.orchestration.config import pipeline_config
from tfx.orchestration.launcher import base_component_launcher
from tfx.orchestration.launcher import docker_component_launcher
from tfx.orchestration.launcher import in_process_component_launcher
from tfx.utils import telemetry_utils
# TODO(jyzhao): confirm it's re-executable, add test case.
@beam.typehints.with_input_types(Any)
@beam.typehints.with_output_types(Any)
class _ComponentAsDoFn(beam.DoFn):
"""Wrap component as beam DoFn."""
def __init__(self, component: base_node.BaseNode,
component_launcher_class: Type[
base_component_launcher.BaseComponentLauncher],
component_config: base_component_config.BaseComponentConfig,
tfx_pipeline: pipeline.Pipeline):
"""Initialize the _ComponentAsDoFn.
Args:
component: Component that to be executed.
component_launcher_class: The class of the launcher to launch the
component.
component_config: component config to launch the component.
tfx_pipeline: Logical pipeline that contains pipeline related information.
"""
driver_args = data_types.DriverArgs(enable_cache=tfx_pipeline.enable_cache)
metadata_connection = metadata.Metadata(
tfx_pipeline.metadata_connection_config)
self._component_launcher = component_launcher_class.create(
component=component,
pipeline_info=tfx_pipeline.pipeline_info,
driver_args=driver_args,
metadata_connection=metadata_connection,
beam_pipeline_args=tfx_pipeline.beam_pipeline_args,
additional_pipeline_args=tfx_pipeline.additional_pipeline_args,
component_config=component_config)
self._component_id = component.id
def process(self, element: Any, *signals: Iterable[Any]) -> None:
"""Executes component based on signals.
Args:
element: README.ml-pipelines-sdk.md signal element to trigger the component.
*signals: side input signals indicate completeness of upstream components.
"""
for signal in signals:
assert not list(signal), 'Signal PCollection should be empty.'
self._run_component()
def _run_component(self) -> None:
absl.logging.info('Component %s is running.', self._component_id)
self._component_launcher.launch()
absl.logging.info('Component %s is finished.', self._component_id)
class BeamDagRunner(tfx_runner.TfxRunner):
"""Tfx runner on Beam."""
def __init__(self,
beam_orchestrator_args: Optional[List[Text]] = None,
config: Optional[pipeline_config.PipelineConfig] = None):
"""Initializes BeamDagRunner as README.ml-pipelines-sdk.md TFX orchestrator.
Args:
beam_orchestrator_args: beam args for the beam orchestrator. Note that
this is different from the beam_pipeline_args within
additional_pipeline_args, which is for beam pipelines in components.
config: Optional pipeline config for customizing the launching of each
component. Defaults to pipeline config that supports
InProcessComponentLauncher and DockerComponentLauncher.
"""
if config is None:
config = pipeline_config.PipelineConfig(
supported_launcher_classes=[
in_process_component_launcher.InProcessComponentLauncher,
docker_component_launcher.DockerComponentLauncher,
],
)
super(BeamDagRunner, self).__init__(config)
self._beam_orchestrator_args = beam_orchestrator_args
def run(self, tfx_pipeline: pipeline.Pipeline) -> None:
"""Deploys given logical pipeline on Beam.
Args:
tfx_pipeline: Logical pipeline containing pipeline args and components.
"""
# For CLI, while creating or updating pipeline, pipeline_args are extracted
# and hence we avoid executing the pipeline.
if 'TFX_JSON_EXPORT_PIPELINE_ARGS_PATH' in os.environ:
return
tfx_pipeline.pipeline_info.run_id = datetime.datetime.now().isoformat()
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_RUNNER: 'beam'}):
with beam.Pipeline(argv=self._beam_orchestrator_args) as p:
# Uses for triggering the component DoFns.
root = p | 'CreateRoot' >> beam.Create([None])
# Stores mapping of component to its signal.
signal_map = {}
# pipeline.components are in topological order.
for component in tfx_pipeline.components:
component_id = component.id
# Signals from upstream components.
signals_to_wait = []
if component.upstream_nodes:
for upstream_node in component.upstream_nodes:
assert upstream_node in signal_map, ('Components is not in '
'topological order')
signals_to_wait.append(signal_map[upstream_node])
absl.logging.info('Component %s depends on %s.', component_id,
[s.producer.full_label for s in signals_to_wait])
(component_launcher_class,
component_config) = config_utils.find_component_launch_info(
self._config, component)
# Each signal is an empty PCollection. AsIter ensures component will
# be triggered after upstream components are finished.
signal_map[component] = (
root
| 'Run[%s]' % component_id >> beam.ParDo(
_ComponentAsDoFn(component, component_launcher_class,
component_config, tfx_pipeline),
*[beam.pvalue.AsIter(s) for s in signals_to_wait]))
absl.logging.info('Component %s is scheduled.', component_id) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/beam/legacy/beam_dag_runner.py | 0.813461 | 0.237454 | beam_dag_runner.py | pypi |
"""V2 Kubeflow DAG Runner."""
import datetime
import json
import os
from typing import Any, Dict, List, Optional, Text
from tfx import version
from tfx.dsl.io import fileio
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration import tfx_runner
from tfx.orchestration.config import pipeline_config
from tfx.orchestration.kubeflow.v2 import pipeline_builder
from tfx.orchestration.kubeflow.v2.proto import pipeline_pb2
from tfx.utils import deprecation_utils
from tfx.utils import telemetry_utils
from tfx.utils import version_utils
from google.protobuf import json_format
_KUBEFLOW_TFX_CMD = (
'python', '-m',
'tfx.orchestration.kubeflow.v2.container.kubeflow_v2_run_executor')
# Current schema version for the API proto.
_SCHEMA_VERSION = '1.0.0'
# Default TFX container image/commands to use in KubeflowV2DagRunner.
_KUBEFLOW_TFX_IMAGE = 'gcr.io/tfx-oss-public/tfx:{}'.format(
version_utils.get_image_version())
def _get_current_time():
"""Gets the current timestamp."""
return datetime.datetime.now()
class KubeflowV2DagRunnerConfig(pipeline_config.PipelineConfig):
"""Runtime configuration specific to execution on Kubeflow pipelines."""
def __init__(self,
project_id: Text,
display_name: Optional[Text] = None,
default_image: Optional[Text] = None,
default_commands: Optional[List[Text]] = None,
**kwargs):
"""Constructs README.ml-pipelines-sdk.md Kubeflow V2 runner config.
Args:
project_id: GCP project ID to be used.
display_name: Optional human-readable pipeline name. Defaults to the
pipeline name passed into `KubeflowV2DagRunner.run()`.
default_image: The default TFX image to be used if not overriden by per
component specification.
default_commands: Optionally specifies the commands of the provided
container image. When not provided, the default `ENTRYPOINT` specified
in the docker image is used. Note: the commands here refers to the K8S
container command, which maps to Docker entrypoint field. If one
supplies command but no args are provided for the container, the
container will be invoked with the provided command, ignoring the
`ENTRYPOINT` and `CMD` defined in the Dockerfile. One can find more
details regarding the difference between K8S and Docker conventions at
https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes
**kwargs: Additional args passed to base PipelineConfig.
"""
super(KubeflowV2DagRunnerConfig, self).__init__(**kwargs)
self.project_id = project_id
self.display_name = display_name
self.default_image = default_image or _KUBEFLOW_TFX_IMAGE
if default_commands is None:
self.default_commands = _KUBEFLOW_TFX_CMD
else:
self.default_commands = default_commands
class KubeflowV2DagRunner(tfx_runner.TfxRunner):
"""Kubeflow V2 pipeline runner.
Builds README.ml-pipelines-sdk.md pipeline job spec in json format based on TFX pipeline DSL object.
"""
def __init__(self,
config: KubeflowV2DagRunnerConfig,
output_dir: Optional[Text] = None,
output_filename: Optional[Text] = None):
"""Constructs an KubeflowV2DagRunner for compiling pipelines.
Args:
config: An KubeflowV2DagRunnerConfig object to specify runtime
configuration when running the pipeline in Kubeflow.
output_dir: An optional output directory into which to output the pipeline
definition files. Defaults to the current working directory.
output_filename: An optional output file name for the pipeline definition
file. The file output format will be README.ml-pipelines-sdk.md JSON-serialized PipelineJob pb
message. Defaults to 'pipeline.json'.
"""
if not isinstance(config, KubeflowV2DagRunnerConfig):
raise TypeError('config must be type of KubeflowV2DagRunnerConfig.')
super(KubeflowV2DagRunner, self).__init__()
self._config = config
self._output_dir = output_dir or os.getcwd()
self._output_filename = output_filename or 'pipeline.json'
def run(self,
pipeline: tfx_pipeline.Pipeline,
parameter_values: Optional[Dict[Text, Any]] = None,
write_out: Optional[bool] = True) -> Dict[Text, Any]:
"""Compiles README.ml-pipelines-sdk.md pipeline DSL object into pipeline file.
Args:
pipeline: TFX pipeline object.
parameter_values: mapping from runtime parameter names to its values.
write_out: set to True to actually write out the file to the place
designated by output_dir and output_filename. Otherwise return the
JSON-serialized pipeline job spec.
Returns:
Returns the JSON pipeline job spec.
Raises:
RuntimeError: if trying to write out to README.ml-pipelines-sdk.md place occupied by an existing
file.
"""
# TODO(b/166343606): Support user-provided labels.
# TODO(b/169095387): Deprecate .run() method in favor of the unified API
# client.
display_name = (
self._config.display_name or pipeline.pipeline_info.pipeline_name)
pipeline_spec = pipeline_builder.PipelineBuilder(
tfx_pipeline=pipeline,
default_image=self._config.default_image,
default_commands=self._config.default_commands).build()
pipeline_spec.sdk_version = 'tfx-{}'.format(version.__version__)
pipeline_spec.schema_version = _SCHEMA_VERSION
runtime_config = pipeline_builder.RuntimeConfigBuilder(
pipeline_info=pipeline.pipeline_info,
parameter_values=parameter_values).build()
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_RUNNER: 'kubeflow_v2'}):
result = pipeline_pb2.PipelineJob(
display_name=display_name or pipeline.pipeline_info.pipeline_name,
labels=telemetry_utils.get_labels_dict(),
runtime_config=runtime_config)
result.pipeline_spec.update(json_format.MessageToDict(pipeline_spec))
pipeline_json_dict = json_format.MessageToDict(result)
if write_out:
if fileio.exists(self._output_dir) and not fileio.isdir(self._output_dir):
raise RuntimeError('Output path: %s is pointed to README.ml-pipelines-sdk.md file.' %
self._output_dir)
if not fileio.exists(self._output_dir):
fileio.makedirs(self._output_dir)
with fileio.open(
os.path.join(self._output_dir, self._output_filename), 'wb') as f:
f.write(json.dumps(pipeline_json_dict, sort_keys=True))
return pipeline_json_dict
compile = deprecation_utils.deprecated_alias(
deprecated_name='compile', name='run', func_or_class=run) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/kubeflow/v2/kubeflow_v2_dag_runner.py | 0.818519 | 0.205655 | kubeflow_v2_dag_runner.py | pypi |
"""Utility methods for Kubeflow V2 pipeline compilation."""
# TODO(b/172080784): Add more tests for this module.
import json
import os
from typing import Any, Dict, List, Mapping, Optional, Text, Type, Union
from tfx.dsl.io import fileio
from tfx.orchestration import data_types
from tfx.orchestration.kubeflow.v2 import parameter_utils
from tfx.orchestration.kubeflow.v2.proto import pipeline_pb2
from tfx.types import artifact
from tfx.types import channel
from tfx.types import standard_artifacts
from tfx.types.experimental import simple_artifacts
from tfx.utils import json_utils
import yaml
from google.protobuf import json_format
from google.protobuf import message
from ml_metadata.proto import metadata_store_pb2
# Key of TFX type path and name in artifact custom properties.
TFX_TYPE_KEY = 'tfx_type'
TYPE_NAME_KEY = 'type_name'
_SUPPORTED_STANDARD_ARTIFACT_TYPES = frozenset(
(standard_artifacts.ExampleAnomalies, standard_artifacts.ExampleStatistics,
standard_artifacts.Examples, standard_artifacts.HyperParameters,
standard_artifacts.InferenceResult, standard_artifacts.InfraBlessing,
standard_artifacts.Model, standard_artifacts.ModelBlessing,
standard_artifacts.ModelEvaluation, standard_artifacts.ModelRun,
standard_artifacts.PushedModel, standard_artifacts.Schema,
standard_artifacts.TransformGraph, standard_artifacts.TransformCache,
standard_artifacts.Float, standard_artifacts.Integer,
standard_artifacts.String, simple_artifacts.Metrics,
simple_artifacts.Statistics, simple_artifacts.Dataset,
simple_artifacts.File))
# TODO(b/156746891): Use IFTTT to sync import path with the definition in
# tfx.types.
TITLE_TO_CLASS_PATH = {
'tfx.ExampleAnomalies': 'tfx.types.standard_artifacts.ExampleAnomalies',
'tfx.ExampleStatistics': 'tfx.types.standard_artifacts.ExampleStatistics',
'tfx.Examples': 'tfx.types.standard_artifacts.Examples',
'tfx.HyperParameters': 'tfx.types.standard_artifacts.HyperParameters',
'tfx.InferenceResult': 'tfx.types.standard_artifacts.InferenceResult',
'tfx.InfraBlessing': 'tfx.types.standard_artifacts.InfraBlessing',
'tfx.Model': 'tfx.types.standard_artifacts.Model',
'tfx.ModelBlessing': 'tfx.types.standard_artifacts.ModelBlessing',
'tfx.ModelEvaluation': 'tfx.types.standard_artifacts.ModelEvaluation',
'tfx.ModelRun': 'tfx.types.standard_artifacts.ModelRun',
'tfx.PushedModel': 'tfx.types.standard_artifacts.PushedModel',
'tfx.Schema': 'tfx.types.standard_artifacts.Schema',
'tfx.TransformGraph': 'tfx.types.standard_artifacts.TransformGraph',
'tfx.TransformCache': 'tfx.types.standard_artifacts.TransformCache',
'tfx.Float': 'tfx.types.standard_artifacts.Float',
'tfx.Integer': 'tfx.types.standard_artifacts.Integer',
'tfx.String': 'tfx.types.standard_artifacts.String',
'tfx.Metrics': 'tfx.types.experimental.simple_artifacts.Metrics',
'tfx.Statistics': 'tfx.types.experimental.simple_artifacts.Statistics',
'tfx.Dataset': 'tfx.types.experimental.simple_artifacts.Dataset',
'tfx.File': 'tfx.types.experimental.simple_artifacts.File'
}
# Keywords used in artifact type YAML specs.
_YAML_INT_TYPE = 'int'
_YAML_STRING_TYPE = 'string'
_YAML_DOUBLE_TYPE = 'double'
def build_runtime_parameter_spec(
parameters: List[data_types.RuntimeParameter]
) -> Dict[str, pipeline_pb2.PipelineSpec.RuntimeParameter]:
"""Converts RuntimeParameters to mapping from names to proto messages."""
def to_message(parameter: data_types.RuntimeParameter):
"""Converts README.ml-pipelines-sdk.md RuntimeParameter to RuntimeParameter message."""
result = pipeline_pb2.PipelineSpec.RuntimeParameter()
# 1. Map the RuntimeParameter type to an enum in the proto definition.
if parameter.ptype == int or parameter.ptype == bool:
result.type = pipeline_pb2.PrimitiveType.INT
elif parameter.ptype == float:
result.type = pipeline_pb2.PrimitiveType.DOUBLE
elif parameter.ptype == Text:
result.type = pipeline_pb2.PrimitiveType.STRING
else:
raise TypeError(
'Unknown parameter type: {} found in parameter: {}'.format(
parameter.ptype, parameter.name))
# 2. Convert its default value.
default = value_converter(parameter.default)
if default is not None:
result.default_value.CopyFrom(default.constant_value)
return result
return {param.name: to_message(param) for param in parameters}
def build_input_parameter_spec(
dict_data: Dict[str, Any]
) -> Dict[str, pipeline_pb2.TaskInputsSpec.InputParameterSpec]:
"""Converts README.ml-pipelines-sdk.md dict into Kubeflow pipeline input parameter section."""
# Skip None value.
result = {}
for k, v in dict_data.items():
if v is not None:
result[k] = pipeline_pb2.TaskInputsSpec.InputParameterSpec(
runtime_value=value_converter(v))
return result
def _validate_properties_schema(
instance_schema: str,
properties: Optional[Mapping[str, artifact.PropertyType]] = None):
"""Validates the declared property types are consistent with the schema.
Args:
instance_schema: YAML string of the artifact property schema.
properties: The actual property schema of an Artifact Python class.
Raises:
KeyError: When actual property have additional properties than what's
specified in the YAML schema.
TypeError: When the same property is declared with different types in YAML
schema and the Artifact Python class.
"""
schema = yaml.safe_load(instance_schema)['properties'] or {}
properties = properties or {}
for k, v in properties.items():
if k not in schema:
raise KeyError('Actual property: {} not expected in artifact type schema:'
' {}'.format(k, schema))
# It's okay that we only validate the constant_value case, since
# RuntimeParameter's ptype should be validated during component
# instantiation.
# We only validate primitive-typed property for now because other types can
# have nested schema in the YAML spec as well.
if (schema[k]['type'] == _YAML_INT_TYPE and
v.type != artifact.PropertyType.INT or
schema[k]['type'] == _YAML_STRING_TYPE and
v.type != artifact.PropertyType.STRING or
schema[k]['type'] == _YAML_DOUBLE_TYPE and
v.type != artifact.PropertyType.FLOAT):
raise TypeError('Property type mismatched at {} for schema: {}. '
'Expected {} but got {}'.format(
k, schema, schema[k]['type'], v.type))
def build_output_artifact_spec(
channel_spec: channel.Channel
) -> pipeline_pb2.TaskOutputsSpec.OutputArtifactSpec:
"""Builds the Kubeflow pipeline output artifact spec from TFX channel spec."""
artifact_instance = channel_spec.type()
result = pipeline_pb2.TaskOutputsSpec.OutputArtifactSpec()
result.artifact_type.CopyFrom(
pipeline_pb2.ArtifactTypeSchema(
instance_schema=get_artifact_schema(artifact_instance)))
for k, v in convert_from_tfx_properties(
artifact_instance.mlmd_artifact.properties).items():
result.properties[k].CopyFrom(v)
_validate_properties_schema(
instance_schema=result.artifact_type.instance_schema,
properties=channel_spec.type.PROPERTIES)
for k, v in convert_from_tfx_properties(
artifact_instance.mlmd_artifact.custom_properties).items():
result.custom_properties[k].CopyFrom(v)
return result
def value_converter(
tfx_value: Any) -> Optional[pipeline_pb2.ValueOrRuntimeParameter]:
"""Converts TFX/MLMD values into Kubeflow pipeline ValueOrRuntimeParameter."""
if tfx_value is None:
return None
result = pipeline_pb2.ValueOrRuntimeParameter()
if isinstance(tfx_value, (int, float, str, Text)):
result.constant_value.CopyFrom(get_kubeflow_value(tfx_value))
elif isinstance(tfx_value, (Dict, List)):
result.constant_value.CopyFrom(
pipeline_pb2.Value(string_value=json.dumps(tfx_value)))
elif isinstance(tfx_value, data_types.RuntimeParameter):
# Attach the runtime parameter to the context.
parameter_utils.attach_parameter(tfx_value)
result.runtime_parameter = tfx_value.name
elif isinstance(tfx_value, metadata_store_pb2.Value):
if tfx_value.WhichOneof('value') == 'int_value':
result.constant_value.CopyFrom(
pipeline_pb2.Value(int_value=tfx_value.int_value))
elif tfx_value.WhichOneof('value') == 'double_value':
result.constant_value.CopyFrom(
pipeline_pb2.Value(double_value=tfx_value.double_value))
elif tfx_value.WhichOneof('value') == 'string_value':
result.constant_value.CopyFrom(
pipeline_pb2.Value(string_value=tfx_value.string_value))
elif isinstance(tfx_value, message.Message):
result.constant_value.CopyFrom(
pipeline_pb2.Value(
string_value=json_format.MessageToJson(
message=tfx_value, sort_keys=True)))
else:
# By default will attempt to encode the object using json_utils.dumps.
result.constant_value.CopyFrom(
pipeline_pb2.Value(string_value=json_utils.dumps(tfx_value)))
return result
def get_kubeflow_value(
tfx_value: Union[int, float, str, Text]) -> Optional[pipeline_pb2.Value]:
"""Converts TFX/MLMD values into Kubeflow pipeline Value proto message."""
if tfx_value is None:
return None
result = pipeline_pb2.Value()
if isinstance(tfx_value, int):
result.int_value = tfx_value
elif isinstance(tfx_value, float):
result.double_value = tfx_value
elif isinstance(tfx_value, (str, Text)):
result.string_value = tfx_value
else:
raise TypeError('Got unknown type of value: {}'.format(tfx_value))
return result
def get_mlmd_value(
kubeflow_value: pipeline_pb2.Value) -> metadata_store_pb2.Value:
"""Converts Kubeflow pipeline Value pb message to MLMD Value."""
result = metadata_store_pb2.Value()
if kubeflow_value.WhichOneof('value') == 'int_value':
result.int_value = kubeflow_value.int_value
elif kubeflow_value.WhichOneof('value') == 'double_value':
result.double_value = kubeflow_value.double_value
elif kubeflow_value.WhichOneof('value') == 'string_value':
result.string_value = kubeflow_value.string_value
else:
raise TypeError('Get unknown type of value: {}'.format(kubeflow_value))
return result
def get_artifact_schema(artifact_instance: artifact.Artifact) -> Text:
"""Gets the YAML schema string associated with the artifact type."""
if isinstance(artifact_instance, tuple(_SUPPORTED_STANDARD_ARTIFACT_TYPES)):
# For supported first-party artifact types, get the built-in schema yaml per
# its type name.
schema_path = os.path.join(
os.path.dirname(__file__), 'artifact_types',
'{}.yaml'.format(artifact_instance.type_name))
return fileio.open(schema_path, 'rb').read()
else:
# Otherwise, fall back to the generic `Artifact` type schema.
# To recover the Python type object at runtime, the class import path will
# be encoded as the schema title.
# Read the generic artifact schema template.
schema_path = os.path.join(
os.path.dirname(__file__), 'artifact_types', 'Artifact.yaml')
data = yaml.safe_load(fileio.open(schema_path, 'rb').read())
# Encode class import path.
data['title'] = '%s.%s' % (artifact_instance.__class__.__module__,
artifact_instance.__class__.__name__)
return yaml.dump(data, sort_keys=False)
def get_artifact_title(artifact_type: Type[artifact.Artifact]) -> Text:
"""Gets the schema title from the artifact python class."""
if artifact_type in _SUPPORTED_STANDARD_ARTIFACT_TYPES:
return 'tfx.{}'.format(artifact_type.__name__)
return 'tfx.Artifact'
def convert_from_tfx_properties(
tfx_properties) -> Dict[Any, pipeline_pb2.ValueOrRuntimeParameter]:
"""Converts (custom) properties to mapping to ValueOrRuntimeParameter pb.
Args:
tfx_properties: README.ml-pipelines-sdk.md mapping field in README.ml-pipelines-sdk.md proto message, from string to
pipeline.Value.
Returns:
A mapping from string to pipeline_spec.ValueOrRuntimeParameter containing
the same information.
"""
return {k: value_converter(v) for k, v in tfx_properties.items()} | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/kubeflow/v2/compiler_utils.py | 0.702734 | 0.252822 | compiler_utils.py | pypi |
"""Builder for Kubeflow pipelines level proto spec."""
import re
from typing import Any, Dict, List, Optional, Text
from tfx.orchestration import data_types
from tfx.orchestration import pipeline
from tfx.orchestration.kubeflow.v2 import compiler_utils
from tfx.orchestration.kubeflow.v2 import parameter_utils
from tfx.orchestration.kubeflow.v2 import step_builder
from tfx.orchestration.kubeflow.v2.proto import pipeline_pb2
from google.protobuf import json_format
_LEGAL_NAME_PATTERN = re.compile(r'[README.ml-pipelines-sdk.md-z0-9][README.ml-pipelines-sdk.md-z0-9-]{0,127}')
def _check_name(name: Text) -> None:
"""Checks the user-provided pipeline name."""
if not _LEGAL_NAME_PATTERN.fullmatch(name):
raise ValueError('User provided pipeline name % is illegal, please follow '
'the pattern of [README.ml-pipelines-sdk.md-z0-9][README.ml-pipelines-sdk.md-z0-9-]{0,127}.')
class RuntimeConfigBuilder(object):
"""Kubeflow pipelines RuntimeConfig builder."""
def __init__(self, pipeline_info: data_types.PipelineInfo,
parameter_values: Dict[Text, Any]):
"""Creates README.ml-pipelines-sdk.md RuntimeConfigBuilder object.
Args:
pipeline_info: README.ml-pipelines-sdk.md TFX pipeline info object, containing pipeline root info.
parameter_values: mapping from runtime parameter names to its values.
"""
self._pipeline_root = pipeline_info.pipeline_root
self._parameter_values = parameter_values or {}
def build(self) -> pipeline_pb2.PipelineJob.RuntimeConfig:
"""Build README.ml-pipelines-sdk.md RuntimeConfig proto."""
return pipeline_pb2.PipelineJob.RuntimeConfig(
gcs_output_directory=self._pipeline_root,
parameters={
k: compiler_utils.get_kubeflow_value(v)
for k, v in self._parameter_values.items()
})
class PipelineBuilder(object):
"""Kubeflow pipelines spec builder.
Constructs README.ml-pipelines-sdk.md pipeline spec based on the TFX pipeline object.
"""
def __init__(self,
tfx_pipeline: pipeline.Pipeline,
default_image: Text,
default_commands: Optional[List[Text]] = None):
"""Creates README.ml-pipelines-sdk.md PipelineBuilder object.
A PipelineBuilder takes in README.ml-pipelines-sdk.md TFX pipeline object. Then
PipelineBuilder.build() outputs Kubeflow PipelineSpec proto.
Args:
tfx_pipeline: A TFX pipeline object.
default_image: Specifies the TFX container image used in CMLE container
tasks. Can be overriden by per component specification.
default_commands: Optionally specifies the commands of the provided
container image. When not provided, the default `ENTRYPOINT` specified
in the docker image is used. Note: the commands here refers to the K8S
container command, which maps to Docker entrypoint field. If one
supplies command but no args are provided for the container, the
container will be invoked with the provided command, ignoring the
`ENTRYPOINT` and `CMD` defined in the Dockerfile. One can find more
details regarding the difference between K8S and Docker conventions at
https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes
"""
self._pipeline_info = tfx_pipeline.pipeline_info
self._pipeline = tfx_pipeline
self._default_image = default_image
self._default_commands = default_commands
def build(self) -> pipeline_pb2.PipelineSpec:
"""Build README.ml-pipelines-sdk.md pipeline PipelineSpec."""
_check_name(self._pipeline_info.pipeline_name)
deployment_config = pipeline_pb2.PipelineDeploymentConfig()
pipeline_info = pipeline_pb2.PipelineInfo(
name=self._pipeline_info.pipeline_name)
tasks = []
# Map from (producer component id, output key) to (new producer component
# id, output key)
channel_redirect_map = {}
with parameter_utils.ParameterContext() as pc:
for component in self._pipeline.components:
# Here the topological order of components is required.
# If README.ml-pipelines-sdk.md channel redirection is needed, redirect mapping is expected to be
# available because the upstream node (which is the cause for
# redirecting) is processed before the downstream consumer nodes.
built_tasks = step_builder.StepBuilder(
node=component,
deployment_config=deployment_config,
image=self._default_image,
image_cmds=self._default_commands,
beam_pipeline_args=self._pipeline.beam_pipeline_args,
enable_cache=self._pipeline.enable_cache,
pipeline_info=self._pipeline_info,
channel_redirect_map=channel_redirect_map).build()
tasks.extend(built_tasks)
result = pipeline_pb2.PipelineSpec(
pipeline_info=pipeline_info,
tasks=tasks,
runtime_parameters=compiler_utils.build_runtime_parameter_spec(
pc.parameters))
result.deployment_spec.update(json_format.MessageToDict(deployment_config))
return result | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/kubeflow/v2/pipeline_builder.py | 0.943458 | 0.31567 | pipeline_builder.py | pypi |
"""Driver for file-based ExampleGen components in Kubeflow V2 runner."""
import argparse
import os
from typing import Any, Dict, List, Optional
from absl import logging
from tfx.components.example_gen import driver
from tfx.components.example_gen import utils
from tfx.dsl.io import fileio
from tfx.orchestration.kubeflow.v2.container import kubeflow_v2_entrypoint_utils
from tfx.orchestration.kubeflow.v2.proto import pipeline_pb2
from tfx.proto import example_gen_pb2
from tfx.types import artifact
from tfx.types import artifact_utils
from tfx.utils import proto_utils
from google.protobuf import json_format
from tensorflow.python.platform import app # pylint: disable=g-direct-tensorflow-import
def _run_driver(exec_properties: Dict[str, Any],
outputs_dict: Dict[str, List[artifact.Artifact]],
output_metadata_uri: str,
name_from_id: Optional[Dict[int, str]] = None) -> None:
"""Runs the driver, writing its output as README.ml-pipelines-sdk.md ExecutorOutput proto.
The main goal of this driver is to calculate the span and fingerprint of input
data, allowing for the executor invocation to be skipped if the ExampleGen
component has been previously run on the same data with the same
configuration. This span and fingerprint are added as new custom execution
properties to an ExecutorOutput proto and written to README.ml-pipelines-sdk.md GCS path. The CAIP
pipelines system reads this file and updates MLMD with the new execution
properties.
Args:
exec_properties:
These are required to contain the following properties:
'input_base_uri': A path from which files will be read and their
span/fingerprint calculated.
'input_config': A json-serialized tfx.proto.example_gen_pb2.InputConfig
proto message.
See https://www.tensorflow.org/tfx/guide/examplegen for more details.
'output_config': A json-serialized tfx.proto.example_gen_pb2.OutputConfig
proto message.
See https://www.tensorflow.org/tfx/guide/examplegen for more details.
outputs_dict: The mapping of the output artifacts.
output_metadata_uri: A path at which an ExecutorOutput message will be
written with updated execution properties and output artifacts. The CAIP
Pipelines service will update the task's properties and artifacts prior to
running the executor.
name_from_id: Optional. Mapping from the converted int-typed id to str-typed
runtime artifact name, which should be unique.
"""
if name_from_id is None:
name_from_id = {}
logging.set_verbosity(logging.INFO)
logging.info('exec_properties = %s\noutput_metadata_uri = %s',
exec_properties, output_metadata_uri)
input_base_uri = exec_properties[utils.INPUT_BASE_KEY]
input_config = example_gen_pb2.Input()
proto_utils.json_to_proto(exec_properties[utils.INPUT_CONFIG_KEY],
input_config)
# TODO(b/161734559): Support range config.
fingerprint, select_span, version = utils.calculate_splits_fingerprint_span_and_version(
input_base_uri, input_config.splits)
logging.info('Calculated span: %s', select_span)
logging.info('Calculated fingerprint: %s', fingerprint)
exec_properties[utils.SPAN_PROPERTY_NAME] = select_span
exec_properties[utils.FINGERPRINT_PROPERTY_NAME] = fingerprint
exec_properties[utils.VERSION_PROPERTY_NAME] = version
if utils.EXAMPLES_KEY not in outputs_dict:
raise ValueError('Example artifact was missing in the ExampleGen outputs.')
example_artifact = artifact_utils.get_single_instance(
outputs_dict[utils.EXAMPLES_KEY])
driver.update_output_artifact(
exec_properties=exec_properties,
output_artifact=example_artifact.mlmd_artifact)
# Log the output metadata file
output_metadata = pipeline_pb2.ExecutorOutput()
output_metadata.parameters[
utils.FINGERPRINT_PROPERTY_NAME].string_value = fingerprint
output_metadata.parameters[utils.SPAN_PROPERTY_NAME].string_value = str(
select_span)
output_metadata.parameters[
utils.INPUT_CONFIG_KEY].string_value = json_format.MessageToJson(
input_config)
output_metadata.artifacts[utils.EXAMPLES_KEY].artifacts.add().CopyFrom(
kubeflow_v2_entrypoint_utils.to_runtime_artifact(example_artifact,
name_from_id))
fileio.makedirs(os.path.dirname(output_metadata_uri))
with fileio.open(output_metadata_uri, 'wb') as f:
f.write(json_format.MessageToJson(output_metadata, sort_keys=True))
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'--json_serialized_invocation_args',
type=str,
required=True,
help='JSON-serialized metadata for this execution.')
args, _ = parser.parse_known_args(argv)
executor_input = pipeline_pb2.ExecutorInput()
json_format.Parse(
args.json_serialized_invocation_args,
executor_input,
ignore_unknown_fields=True)
name_from_id = {}
exec_properties = kubeflow_v2_entrypoint_utils.parse_execution_properties(
executor_input.inputs.parameters)
outputs_dict = kubeflow_v2_entrypoint_utils.parse_raw_artifact_dict(
executor_input.outputs.artifacts, name_from_id)
_run_driver(exec_properties, outputs_dict, executor_input.outputs.output_file,
name_from_id)
if __name__ == '__main__':
app.run(main=main) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/kubeflow/v2/file_based_example_gen/driver.py | 0.7324 | 0.269064 | driver.py | pypi |
"""Entrypoint for invoking TFX components in Kubeflow V2 runner."""
import argparse
import os
from typing import List
from absl import logging
from tfx.components.evaluator import executor as evaluator_executor
from tfx.dsl.components.base import base_executor
from tfx.dsl.io import fileio
from tfx.orchestration.kubeflow.v2.container import kubeflow_v2_entrypoint_utils
from tfx.orchestration.kubeflow.v2.proto import pipeline_pb2
from tfx.types import artifact_utils
from tfx.types.standard_component_specs import BLESSING_KEY
from tfx.utils import import_utils
from google.protobuf import json_format
from tensorflow.python.platform import app # pylint: disable=g-direct-tensorflow-import
# TODO(b/166202742): Consolidate container entrypoint with Kubeflow runner.
# TODO(b/154046602): Consider put this function into tfx/orchestration, and
# unify the code paths to call into component executors.
def _run_executor(args: argparse.Namespace, beam_args: List[str]) -> None:
"""Selects README.ml-pipelines-sdk.md particular executor and run it based on name.
Args:
args:
--executor_class_path: The import path of the executor class.
--json_serialized_invocation_args: Full JSON-serialized parameters for
this execution.
beam_args: Optional parameter that maps to the optional_pipeline_args
parameter in the pipeline, which provides additional configuration options
for apache-beam and tensorflow.logging.
For more about the beam arguments please refer to:
https://cloud.google.com/dataflow/docs/guides/specifying-exec-params
"""
logging.set_verbosity(logging.INFO)
# Rehydrate inputs/outputs/exec_properties from the serialized metadata.
executor_input = pipeline_pb2.ExecutorInput()
json_format.Parse(
args.json_serialized_invocation_args,
executor_input,
ignore_unknown_fields=True)
inputs_dict = executor_input.inputs.artifacts
outputs_dict = executor_input.outputs.artifacts
inputs_parameter = executor_input.inputs.parameters
name_from_id = {}
inputs = kubeflow_v2_entrypoint_utils.parse_raw_artifact_dict(
inputs_dict, name_from_id)
outputs = kubeflow_v2_entrypoint_utils.parse_raw_artifact_dict(
outputs_dict, name_from_id)
exec_properties = kubeflow_v2_entrypoint_utils.parse_execution_properties(
inputs_parameter)
logging.info('Executor %s do: inputs: %s, outputs: %s, exec_properties: %s',
args.executor_class_path, inputs, outputs, exec_properties)
executor_cls = import_utils.import_class_by_path(args.executor_class_path)
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_args, unique_id='')
executor = executor_cls(executor_context)
logging.info('Starting executor')
executor.Do(inputs, outputs, exec_properties)
# TODO(b/169583143): Remove this workaround when TFX migrates to use str-typed
# id/name to identify artifacts.
# Convert ModelBlessing artifact to use managed MLMD resource name.
if (issubclass(executor_cls, evaluator_executor.Executor) and
BLESSING_KEY in outputs):
# Parse the parent prefix for managed MLMD resource name.
kubeflow_v2_entrypoint_utils.refactor_model_blessing(
artifact_utils.get_single_instance(outputs[BLESSING_KEY]),
name_from_id)
# Log the output metadata to README.ml-pipelines-sdk.md file. So that it can be picked up by MP.
metadata_uri = executor_input.outputs.output_file
executor_output = pipeline_pb2.ExecutorOutput()
for k, v in kubeflow_v2_entrypoint_utils.translate_executor_output(
outputs, name_from_id).items():
executor_output.artifacts[k].CopyFrom(v)
fileio.makedirs(os.path.dirname(metadata_uri))
with fileio.open(metadata_uri, 'wb') as f:
f.write(json_format.MessageToJson(executor_output))
def main(argv):
"""Parses the arguments for _run_executor() then invokes it.
Args:
argv: Unparsed arguments for run_executor.py. Known argument names include
--executor_class_path: Python class of executor in format of
<module>.<class>.
--json_serialized_invocation_args: Full JSON-serialized parameters for
this execution. The remaining part of the arguments will be parsed as
the beam args used by each component executors. Some commonly used beam
args are as follows:
--runner: The beam pipeline runner environment. Can be DirectRunner (for
running locally) or DataflowRunner (for running on GCP Dataflow
service).
--project: The GCP project ID. Neede when runner==DataflowRunner
--direct_num_workers: Number of threads or subprocesses executing the
work load.
For more about the beam arguments please refer to:
https://cloud.google.com/dataflow/docs/guides/specifying-exec-params
Returns:
None
Raises:
None
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--executor_class_path',
type=str,
required=True,
help='Python class of executor in format of <module>.<class>.')
parser.add_argument(
'--json_serialized_invocation_args',
type=str,
required=True,
help='JSON-serialized metadata for this execution.')
args, beam_args = parser.parse_known_args(argv)
_run_executor(args, beam_args)
if __name__ == '__main__':
app.run(main=main) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/kubeflow/v2/container/kubeflow_v2_run_executor.py | 0.624294 | 0.334019 | kubeflow_v2_run_executor.py | pypi |
"""Component that launches CAIP custom training job with flexible interface."""
from typing import Any, Dict, List, Optional, Text
from tfx.dsl.component.experimental import component_utils
from tfx.dsl.component.experimental import placeholders
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.orchestration.kubeflow.v2.components.experimental import ai_platform_training_executor
from tfx.types import channel_utils
from tfx.types import component_spec
from tfx.utils import json_utils
def create_ai_platform_training(
name: Text,
project_id: Text,
region: Optional[Text] = None,
job_id: Optional[Text] = None,
image_uri: Optional[Text] = None,
args: Optional[List[placeholders.CommandlineArgumentType]] = None,
# TODO(jxzheng): support Python training spec
scale_tier: Optional[Text] = None,
training_input: Optional[Dict[Text, Any]] = None,
labels: Optional[Dict[Text, Text]] = None,
inputs: Dict[Text, Any] = None,
outputs: Dict[Text, Any] = None,
parameters: Dict[Text, Any] = None,
) -> base_component.BaseComponent:
"""Creates README.ml-pipelines-sdk.md pipeline step that launches README.ml-pipelines-sdk.md AIP training job.
The generated TFX component will have README.ml-pipelines-sdk.md component spec specified dynamically,
through inputs/outputs/parameters in the following format:
- inputs: A mapping from input name to the upstream channel connected. The
artifact type of the channel will be automatically inferred.
- outputs: A mapping from output name to the associated artifact type.
- parameters: A mapping from execution property names to its associated value.
Only primitive typed values are supported. Note that RuntimeParameter is
not supported yet.
For example:
```
create_ai_platform_training(
...
inputs: {
# Assuming there is an upstream node example_gen, with an output
# 'examples' of the type Examples.
'examples': example_gen.outputs['examples'],
},
outputs: {
'model': standard_artifacts.Model,
},
parameters: {
'n_steps': 100,
'optimizer': 'sgd',
}
...
)
```
will generate README.ml-pipelines-sdk.md component instance with README.ml-pipelines-sdk.md component spec equivalent to:
```
class MyComponentSpec(ComponentSpec):
INPUTS = {
'examples': ChannelParameter(type=standard_artifacts.Examples)
}
OUTPUTS = {
'model': ChannelParameter(type=standard_artifacts.Model)
}
PARAMETERS = {
'n_steps': ExecutionParameter(type=int),
'optimizer': ExecutionParameter(type=str)
}
```
with its input 'examples' is connected to the example_gen output, and
execution properties specified as 100 and 'sgd' respectively.
Example usage of the component:
```
# A single node training job.
my_train = create_ai_platform_training(
name='my_training_step',
project_id='my-project',
region='us-central1',
image_uri='gcr.io/my-project/caip-training-test:latest',
'args': [
'--examples',
placeholders.InputUriPlaceholder('examples'),
'--n-steps',
placeholders.InputValuePlaceholder('n_step'),
'--output-location',
placeholders.OutputUriPlaceholder('model')
]
scale_tier='BASIC_GPU',
inputs={'examples': example_gen.outputs['examples']},
outputs={
'model': standard_artifacts.Model
},
parameters={'n_step': 100}
)
# More complex setting can be expressed by providing training_input
# directly.
my_distributed_train = create_ai_platform_training(
name='my_training_step',
project_id='my-project',
training_input={
'scaleTier':
'CUSTOM',
'region':
'us-central1',
'masterType': 'n1-standard-8',
'masterConfig': {
'imageUri': 'gcr.io/my-project/my-dist-training:latest'
},
'workerType': 'n1-standard-8',
'workerCount': 8,
'workerConfig': {
'imageUri': 'gcr.io/my-project/my-dist-training:latest'
},
'args': [
'--examples',
placeholders.InputUriPlaceholder('examples'),
'--n-steps',
placeholders.InputValuePlaceholder('n_step'),
'--output-location',
placeholders.OutputUriPlaceholder('model')
]
},
inputs={'examples': example_gen.outputs['examples']},
outputs={'model': Model},
parameters={'n_step': 100}
)
```
Args:
name: name of the component. This is needed to construct the component spec
and component class dynamically as well.
project_id: the GCP project under which the AIP training job will be
running.
region: GCE region where the AIP training job will be running.
job_id: the unique ID of the job. Default to 'tfx_%Y%m%d%H%M%S'
image_uri: the GCR location of the container image, which will be used to
execute the training program. If the same field is specified in
training_input, the latter overrides image_uri.
args: command line arguments that will be passed into the training program.
Users can use placeholder semantics as in
tfx.dsl.component.experimental.container_component to wire the args with
component inputs/outputs/parameters.
scale_tier: Cloud ML resource requested by the job. See
https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.jobs#ScaleTier
training_input: full training job spec. This field overrides other
specifications if applicable. This field follows the
[TrainingInput](https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.jobs#traininginput)
schema.
labels: user-specified label attached to the job.
inputs: the dict of component inputs.
outputs: the dict of component outputs.
parameters: the dict of component parameters, aka, execution properties.
Returns:
A component instance that represents the AIP job in the DSL.
Raises:
ValueError: when image_uri is missing and masterConfig is not specified in
training_input, or when region is missing and training_input
does not provide region either.
TypeError: when non-primitive parameters are specified.
"""
training_input = training_input or {}
if scale_tier and not training_input.get('scale_tier'):
training_input['scaleTier'] = scale_tier
if not training_input.get('masterConfig'):
# If no replica config is specified, create README.ml-pipelines-sdk.md default one.
if not image_uri:
raise ValueError('image_uri is required when masterConfig is not '
'explicitly specified in training_input.')
training_input['masterConfig'] = {'imageUri': image_uri}
# Note: A custom entrypoint can be set to training_input['masterConfig']
# through key 'container_command'.
training_input['args'] = args
if not training_input.get('region'):
if not region:
raise ValueError('region is required when it is not set in '
'training_input.')
training_input['region'] = region
# Squash training_input, project, job_id, and labels into an exec property
# namely 'aip_training_config'.
aip_training_config = {
ai_platform_training_executor.PROJECT_CONFIG_KEY: project_id,
ai_platform_training_executor.TRAINING_INPUT_CONFIG_KEY: training_input,
ai_platform_training_executor.JOB_ID_CONFIG_KEY: job_id,
ai_platform_training_executor.LABELS_CONFIG_KEY: labels,
}
aip_training_config_str = json_utils.dumps(aip_training_config)
# Construct the component spec.
if inputs is None:
inputs = {}
if outputs is None:
outputs = {}
if parameters is None:
parameters = {}
input_channel_parameters = {}
output_channel_parameters = {}
output_channels = {}
execution_parameters = {
ai_platform_training_executor.CONFIG_KEY:
component_spec.ExecutionParameter(type=(str, Text))
}
for input_name, single_channel in inputs.items():
# Infer the type of input channels based on the channels passed in.
# TODO(b/155804245) Sanitize the names so that they're valid python names
input_channel_parameters[input_name] = (
component_spec.ChannelParameter(type=single_channel.type))
for output_name, channel_type in outputs.items():
# TODO(b/155804245) Sanitize the names so that they're valid python names
output_channel_parameters[output_name] = (
component_spec.ChannelParameter(type=channel_type))
artifact = channel_type()
channel = channel_utils.as_channel([artifact])
output_channels[output_name] = channel
# TODO(jxzheng): Support RuntimeParameter as parameters.
for param_name, single_parameter in parameters.items():
# Infer the type of parameters based on the parameters passed in.
# TODO(b/155804245) Sanitize the names so that they're valid python names
if not isinstance(single_parameter, (int, float, Text, bytes)):
raise TypeError(
'Parameter can only be int/float/str/bytes, got {}'.format(
type(single_parameter)))
execution_parameters[param_name] = (
component_spec.ExecutionParameter(type=type(single_parameter)))
default_init_args = {
**inputs,
**output_channels,
**parameters, ai_platform_training_executor.CONFIG_KEY:
aip_training_config_str
}
tfx_component_class = component_utils.create_tfx_component_class(
name=name,
tfx_executor_spec=executor_spec.ExecutorClassSpec(
ai_platform_training_executor.AiPlatformTrainingExecutor),
input_channel_parameters=input_channel_parameters,
output_channel_parameters=output_channel_parameters,
execution_parameters=execution_parameters,
default_init_args=default_init_args)
return tfx_component_class() | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/orchestration/kubeflow/v2/components/experimental/ai_platform_training_component.py | 0.837055 | 0.725223 | ai_platform_training_component.py | pypi |
import logging
import time
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from urllib.request import parse_http_list, parse_keqv_list
LOGGER = logging.getLogger(__name__)
def parse_www_authenticate_header(header):
"""
Convert a WWW-Authentication header into a dict that can be used
in a JSON response.
"""
items = parse_http_list(header)
return parse_keqv_list(items)
def import_from_settings(attr, *args):
"""
Load an attribute from the django settings.
:raises:
ImproperlyConfigured
"""
try:
if args:
return getattr(settings, attr, args[0])
return getattr(settings, attr)
except AttributeError:
raise ImproperlyConfigured('Setting {0} not found'.format(attr))
def absolutify(request, path):
"""Return the absolute URL of a path."""
return request.build_absolute_uri(path)
def is_authenticated(user):
"""return True if the user is authenticated.
This is necessary because in Django 1.10 the `user.is_authenticated`
stopped being a method and is now a property.
Actually `user.is_authenticated()` actually works, thanks to a backwards
compat trick in Django. But in Django 2.0 it will cease to work
as a callable method.
"""
msg = '`is_authenticated()` is going to be removed in mozilla-django-oidc v 2.x'
warnings.warn(msg, DeprecationWarning)
return user.is_authenticated
def add_state_and_nonce_to_session(request, state, params):
"""
Stores the `state` and `nonce` parameters in a session dictionary including the time when it
was added. The dictionary can contain multiple state/nonce combinations to allow parallel
logins with multiple browser sessions.
To keep the session space to a reasonable size, the dictionary is kept at 50 state/nonce
combinations maximum.
"""
nonce = params.get('nonce')
# Store Nonce with the State parameter in the session "oidc_states" dictionary.
# The dictionary can store multiple State/Nonce combinations to allow parallel
# authentication flows which would otherwise overwrite State/Nonce values!
# The "oidc_states" dictionary uses the state as key and as value a dictionary with "nonce"
# and "added_on". "added_on" contains the time when the state was added to the session.
# With this value, the oldest element can be found and deleted from the session.
if 'oidc_states' not in request.session or \
not isinstance(request.session['oidc_states'], dict):
request.session['oidc_states'] = {}
# Make sure that the State/Nonce dictionary in the session does not get too big.
# If the number of State/Nonce combinations reaches a certain threshold, remove the oldest
# state by finding out
# which element has the oldest "add_on" time.
limit = import_from_settings('OIDC_MAX_STATES', 50)
if len(request.session['oidc_states']) >= limit:
LOGGER.info(
'User has more than {} "oidc_states" in his session, '
'deleting the oldest one!'.format(limit)
)
oldest_state = None
oldest_added_on = time.time()
for item_state, item in request.session['oidc_states'].items():
if item['added_on'] < oldest_added_on:
oldest_state = item_state
oldest_added_on = item['added_on']
if oldest_state:
del request.session['oidc_states'][oldest_state]
request.session['oidc_states'][state] = {
'nonce': nonce,
'added_on': time.time(),
} | /rflow-mozilla-django-oidc-2.0.0.tar.gz/rflow-mozilla-django-oidc-2.0.0/mozilla_django_oidc/utils.py | 0.695855 | 0.267656 | utils.py | pypi |
from rest_framework import generics, status
from rest_framework.response import Response
from . import serializers
from .authentication import AUTH_HEADER_TYPES
from .exceptions import InvalidToken, TokenError
class TokenViewBase(generics.GenericAPIView):
permission_classes = ()
authentication_classes = ()
serializer_class = None
www_authenticate_realm = 'api'
def get_authenticate_header(self, request):
return '{0} realm="{1}"'.format(
AUTH_HEADER_TYPES[0],
self.www_authenticate_realm,
)
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
try:
serializer.is_valid(raise_exception=True)
except TokenError as e:
raise InvalidToken(e.args[0])
return Response(serializer.validated_data, status=status.HTTP_200_OK)
class TokenObtainPairView(TokenViewBase):
"""
Takes a set of user credentials and returns an access and refresh JSON web
token pair to prove the authentication of those credentials.
"""
serializer_class = serializers.TokenObtainPairSerializer
token_obtain_pair = TokenObtainPairView.as_view()
class TokenRefreshView(TokenViewBase):
"""
Takes a refresh type JSON web token and returns an access type JSON web
token if the refresh token is valid.
"""
serializer_class = serializers.TokenRefreshSerializer
token_refresh = TokenRefreshView.as_view()
class TokenObtainSlidingView(TokenViewBase):
"""
Takes a set of user credentials and returns a sliding JSON web token to
prove the authentication of those credentials.
"""
serializer_class = serializers.TokenObtainSlidingSerializer
token_obtain_sliding = TokenObtainSlidingView.as_view()
class TokenRefreshSlidingView(TokenViewBase):
"""
Takes a sliding JSON web token and returns a new, refreshed version if the
token's refresh period has not expired.
"""
serializer_class = serializers.TokenRefreshSlidingSerializer
token_refresh_sliding = TokenRefreshSlidingView.as_view()
class TokenVerifyView(TokenViewBase):
"""
Takes a token and indicates if it is valid. This view provides no
information about a token's fitness for a particular use.
"""
serializer_class = serializers.TokenVerifySerializer
token_verify = TokenVerifyView.as_view() | /rflow-mozilla-simplejwt-0.0.1.tar.gz/rflow-mozilla-simplejwt-0.0.1/src/rest_framework_simplejwt/views.py | 0.735167 | 0.170992 | views.py | pypi |
from datetime import timedelta
from uuid import uuid4
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.utils.module_loading import import_string
from .exceptions import TokenBackendError, TokenError
from .settings import api_settings
from .token_blacklist.models import BlacklistedToken, OutstandingToken
from .utils import (
aware_utcnow, datetime_from_epoch, datetime_to_epoch, format_lazy,
)
class Token:
"""
A class which validates and wraps an existing JWT or can be used to build a
new JWT.
"""
token_type = None
lifetime = None
def __init__(self, token=None, verify=True):
"""
!!!! IMPORTANT !!!! MUST raise a TokenError with a user-facing error
message if the given token is invalid, expired, or otherwise not safe
to use.
"""
if self.token_type is None or self.lifetime is None:
raise TokenError(_('Cannot create token with no type or lifetime'))
self.token = token
self.current_time = aware_utcnow()
# Set up token
if token is not None:
# An encoded token was provided
token_backend = self.get_token_backend()
# Decode token
try:
self.payload = token_backend.decode(token, verify=verify)
except TokenBackendError:
raise TokenError(_('Token is invalid or expired'))
if verify:
self.verify()
else:
# New token. Skip all the verification steps.
self.payload = {api_settings.TOKEN_TYPE_CLAIM: self.token_type}
# Set "exp" claim with default value
self.set_exp(from_time=self.current_time, lifetime=self.lifetime)
# Set "jti" claim
self.set_jti()
def __repr__(self):
return repr(self.payload)
def __getitem__(self, key):
return self.payload[key]
def __setitem__(self, key, value):
self.payload[key] = value
def __delitem__(self, key):
del self.payload[key]
def __contains__(self, key):
return key in self.payload
def get(self, key, default=None):
return self.payload.get(key, default)
def __str__(self):
"""
Signs and returns a token as a base64 encoded string.
"""
return self.get_token_backend().encode(self.payload)
def verify(self):
"""
Performs additional validation steps which were not performed when this
token was decoded. This method is part of the "public" API to indicate
the intention that it may be overridden in subclasses.
"""
# According to RFC 7519, the "exp" claim is OPTIONAL
# (https://tools.ietf.org/html/rfc7519#section-4.1.4). As a more
# correct behavior for authorization tokens, we require an "exp"
# claim. We don't want any zombie tokens walking around.
self.check_exp()
# Ensure token id is present
if api_settings.JTI_CLAIM not in self.payload:
raise TokenError(_('Token has no id'))
self.verify_token_type()
def verify_token_type(self):
"""
Ensures that the token type claim is present and has the correct value.
"""
try:
token_type = self.payload[api_settings.TOKEN_TYPE_CLAIM]
except KeyError:
raise TokenError(_('Token has no type'))
if self.token_type != token_type:
raise TokenError(_('Token has wrong type'))
def set_jti(self):
"""
Populates the configured jti claim of a token with a string where there
is a negligible probability that the same string will be chosen at a
later time.
See here:
https://tools.ietf.org/html/rfc7519#section-4.1.7
"""
self.payload[api_settings.JTI_CLAIM] = uuid4().hex
def set_exp(self, claim='exp', from_time=None, lifetime=None):
"""
Updates the expiration time of a token.
"""
if from_time is None:
from_time = self.current_time
if lifetime is None:
lifetime = self.lifetime
self.payload[claim] = datetime_to_epoch(from_time + lifetime)
def check_exp(self, claim='exp', current_time=None):
"""
Checks whether a timestamp value in the given claim has passed (since
the given datetime value in `current_time`). Raises a TokenError with
a user-facing error message if so.
"""
if current_time is None:
current_time = self.current_time
try:
claim_value = self.payload[claim]
except KeyError:
raise TokenError(format_lazy(_("Token has no '{}' claim"), claim))
claim_time = datetime_from_epoch(claim_value)
if claim_time <= current_time:
raise TokenError(format_lazy(_("Token '{}' claim has expired"), claim))
@classmethod
def for_user(cls, user):
"""
Returns an authorization token for the given user that will be provided
after authenticating the user's credentials.
"""
user_id = getattr(user, api_settings.USER_ID_FIELD)
if not isinstance(user_id, int):
user_id = str(user_id)
token = cls()
token[api_settings.USER_ID_CLAIM] = user_id
return token
_token_backend = None
def get_token_backend(self):
if self._token_backend is None:
self._token_backend = import_string(
"rest_framework_simplejwt.state.token_backend"
)
return self._token_backend
class BlacklistMixin:
"""
If the `rest_framework_simplejwt.token_blacklist` app was configured to be
used, tokens created from `BlacklistMixin` subclasses will insert
themselves into an outstanding token list and also check for their
membership in a token blacklist.
"""
if 'rest_framework_simplejwt.token_blacklist' in settings.INSTALLED_APPS:
def verify(self, *args, **kwargs):
self.check_blacklist()
super().verify(*args, **kwargs)
def check_blacklist(self):
"""
Checks if this token is present in the token blacklist. Raises
`TokenError` if so.
"""
jti = self.payload[api_settings.JTI_CLAIM]
if BlacklistedToken.objects.filter(token__jti=jti).exists():
raise TokenError(_('Token is blacklisted'))
def blacklist(self):
"""
Ensures this token is included in the outstanding token list and
adds it to the blacklist.
"""
jti = self.payload[api_settings.JTI_CLAIM]
exp = self.payload['exp']
# Ensure outstanding token exists with given jti
token, _ = OutstandingToken.objects.get_or_create(
jti=jti,
defaults={
'token': str(self),
'expires_at': datetime_from_epoch(exp),
},
)
return BlacklistedToken.objects.get_or_create(token=token)
@classmethod
def for_user(cls, user):
"""
Adds this token to the outstanding token list.
"""
token = super().for_user(user)
jti = token[api_settings.JTI_CLAIM]
exp = token['exp']
OutstandingToken.objects.create(
user=user,
jti=jti,
token=str(token),
created_at=token.current_time,
expires_at=datetime_from_epoch(exp),
)
return token
class SlidingToken(BlacklistMixin, Token):
token_type = 'sliding'
lifetime = api_settings.SLIDING_TOKEN_LIFETIME
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.token is None:
# Set sliding refresh expiration claim if new token
self.set_exp(
api_settings.SLIDING_TOKEN_REFRESH_EXP_CLAIM,
from_time=self.current_time,
lifetime=api_settings.SLIDING_TOKEN_REFRESH_LIFETIME,
)
class RefreshToken(BlacklistMixin, Token):
token_type = 'refresh'
lifetime = api_settings.REFRESH_TOKEN_LIFETIME
no_copy_claims = (
api_settings.TOKEN_TYPE_CLAIM,
'exp',
# Both of these claims are included even though they may be the same.
# It seems possible that a third party token might have a custom or
# namespaced JTI claim as well as a default "jti" claim. In that case,
# we wouldn't want to copy either one.
api_settings.JTI_CLAIM,
'jti',
)
@property
def access_token(self):
"""
Returns an access token created from this refresh token. Copies all
claims present in this refresh token to the new access token except
those claims listed in the `no_copy_claims` attribute.
"""
access = AccessToken()
# Use instantiation time of refresh token as relative timestamp for
# access token "exp" claim. This ensures that both a refresh and
# access token expire relative to the same time if they are created as
# a pair.
access.set_exp(from_time=self.current_time)
no_copy = self.no_copy_claims
for claim, value in self.payload.items():
if claim in no_copy:
continue
access[claim] = value
return access
class AccessToken(Token):
token_type = 'access'
lifetime = api_settings.ACCESS_TOKEN_LIFETIME
class UntypedToken(Token):
token_type = 'untyped'
lifetime = timedelta(seconds=0)
def verify_token_type(self):
"""
Untyped tokens do not verify the "token_type" claim. This is useful
when performing general validation of a token's signature and other
properties which do not relate to the token's intended use.
"""
pass | /rflow-mozilla-simplejwt-0.0.1.tar.gz/rflow-mozilla-simplejwt-0.0.1/src/rest_framework_simplejwt/tokens.py | 0.732592 | 0.151153 | tokens.py | pypi |
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _
from rest_framework import HTTP_HEADER_ENCODING, authentication
from .exceptions import AuthenticationFailed, InvalidToken, TokenError
from .settings import api_settings
AUTH_HEADER_TYPES = api_settings.AUTH_HEADER_TYPES
if not isinstance(api_settings.AUTH_HEADER_TYPES, (list, tuple)):
AUTH_HEADER_TYPES = (AUTH_HEADER_TYPES,)
AUTH_HEADER_TYPE_BYTES = set(
h.encode(HTTP_HEADER_ENCODING)
for h in AUTH_HEADER_TYPES
)
class JWTAuthentication(authentication.BaseAuthentication):
"""
An authentication plugin that authenticates requests through a JSON web
token provided in a request header.
"""
www_authenticate_realm = 'api'
media_type = 'application/json'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user_model = get_user_model()
def authenticate(self, request):
header = self.get_header(request)
if header is None:
return None
raw_token = self.get_raw_token(header)
if raw_token is None:
return None
validated_token = self.get_validated_token(raw_token)
return self.get_user(validated_token), validated_token
def authenticate_header(self, request):
return '{0} realm="{1}"'.format(
AUTH_HEADER_TYPES[0],
self.www_authenticate_realm,
)
def get_header(self, request):
"""
Extracts the header containing the JSON web token from the given
request.
"""
header = request.META.get(api_settings.AUTH_HEADER_NAME)
if isinstance(header, str):
# Work around django test client oddness
header = header.encode(HTTP_HEADER_ENCODING)
return header
def get_raw_token(self, header):
"""
Extracts an unvalidated JSON web token from the given "Authorization"
header value.
"""
parts = header.split()
if len(parts) == 0:
# Empty AUTHORIZATION header sent
return None
if parts[0] not in AUTH_HEADER_TYPE_BYTES:
# Assume the header does not contain a JSON web token
return None
if len(parts) != 2:
raise AuthenticationFailed(
_('Authorization header must contain two space-delimited values'),
code='bad_authorization_header',
)
return parts[1]
def get_validated_token(self, raw_token):
"""
Validates an encoded JSON web token and returns a validated token
wrapper object.
"""
messages = []
for AuthToken in api_settings.AUTH_TOKEN_CLASSES:
try:
return AuthToken(raw_token)
except TokenError as e:
messages.append({'token_class': AuthToken.__name__,
'token_type': AuthToken.token_type,
'message': e.args[0]})
raise InvalidToken({
'detail': _('Given token not valid for any token type'),
'messages': messages,
})
def get_user(self, validated_token):
"""
Attempts to find and return a user using the given validated token.
"""
try:
user_id = validated_token[api_settings.USER_ID_CLAIM]
except KeyError:
raise InvalidToken(_('Token contained no recognizable user identification'))
try:
user = self.user_model.objects.get(**{api_settings.USER_ID_FIELD: user_id})
except self.user_model.DoesNotExist:
raise AuthenticationFailed(_('User not found'), code='user_not_found')
if not user.is_active:
raise AuthenticationFailed(_('User is inactive'), code='user_inactive')
return user
class JWTTokenUserAuthentication(JWTAuthentication):
def get_user(self, validated_token):
"""
Returns a stateless user object which is backed by the given validated
token.
"""
if api_settings.USER_ID_CLAIM not in validated_token:
# The TokenUser class assumes tokens will have a recognizable user
# identifier claim.
raise InvalidToken(_('Token contained no recognizable user identification'))
return api_settings.TOKEN_USER_CLASS(validated_token)
def default_user_authentication_rule(user):
# Prior to Django 1.10, inactive users could be authenticated with the
# default `ModelBackend`. As of Django 1.10, the `ModelBackend`
# prevents inactive users from authenticating. App designers can still
# allow inactive users to authenticate by opting for the new
# `AllowAllUsersModelBackend`. However, we explicitly prevent inactive
# users from authenticating to enforce a reasonable policy and provide
# sensible backwards compatibility with older Django versions.
return user is not None and user.is_active | /rflow-mozilla-simplejwt-0.0.1.tar.gz/rflow-mozilla-simplejwt-0.0.1/src/rest_framework_simplejwt/authentication.py | 0.715921 | 0.171824 | authentication.py | pypi |
from django.utils.translation import gettext_lazy as _
import jwt
from jwt import InvalidAlgorithmError, InvalidTokenError, PyJWKClient, algorithms
from .exceptions import TokenBackendError
from .utils import format_lazy
ALLOWED_ALGORITHMS = (
'HS256',
'HS384',
'HS512',
'RS256',
'RS384',
'RS512',
)
class TokenBackend:
def __init__(
self,
algorithm,
signing_key=None,
verifying_key=None,
audience=None,
issuer=None,
jwk_url: str = None,
leeway=0,
):
self._validate_algorithm(algorithm)
self.algorithm = algorithm
self.signing_key = signing_key
self.audience = audience
self.issuer = issuer
self.jwks_client = PyJWKClient(jwk_url) if jwk_url else None
self.leeway = leeway
if algorithm.startswith("HS"):
self.verifying_key = signing_key
else:
self.verifying_key = verifying_key
def _validate_algorithm(self, algorithm):
"""
Ensure that the nominated algorithm is recognized, and that cryptography is installed for those
algorithms that require it
"""
if algorithm not in ALLOWED_ALGORITHMS:
raise TokenBackendError(format_lazy(_("Unrecognized algorithm type '{}'"), algorithm))
if algorithm in algorithms.requires_cryptography and not algorithms.has_crypto:
raise TokenBackendError(format_lazy(_("You must have cryptography installed to use {}."), algorithm))
def get_verifying_key(self, token):
if self.algorithm.startswith("HS"):
return self.signing_key
if self.jwks_client:
return self.jwks_client.get_signing_key_from_jwt(token).key
return self.verifying_key
def encode(self, payload):
"""
Returns an encoded token for the given payload dictionary.
"""
jwt_payload = payload.copy()
if self.audience is not None:
jwt_payload['aud'] = self.audience
if self.issuer is not None:
jwt_payload['iss'] = self.issuer
token = jwt.encode(jwt_payload, self.signing_key, algorithm=self.algorithm)
if isinstance(token, bytes):
# For PyJWT <= 1.7.1
return token.decode('utf-8')
# For PyJWT >= 2.0.0a1
return token
def decode(self, token, verify=True):
"""
Performs a validation of the given token and returns its payload
dictionary.
Raises a `TokenBackendError` if the token is malformed, if its
signature check fails, or if its 'exp' claim indicates it has expired.
"""
try:
return jwt.decode(
token,
self.get_verifying_key(token),
algorithms=[self.algorithm],
verify=verify,
audience=self.audience,
issuer=self.issuer,
leeway=self.leeway,
options={
'verify_aud': self.audience is not None,
'verify_signature': verify,
},
)
except InvalidAlgorithmError as ex:
raise TokenBackendError(_('Invalid algorithm specified')) from ex
except InvalidTokenError:
raise TokenBackendError(_('Token is invalid or expired')) | /rflow-mozilla-simplejwt-0.0.1.tar.gz/rflow-mozilla-simplejwt-0.0.1/src/rest_framework_simplejwt/backends.py | 0.749546 | 0.155848 | backends.py | pypi |
from django.contrib.auth import models as auth_models
from django.db.models.manager import EmptyManager
from django.utils.functional import cached_property
from .compat import CallableFalse, CallableTrue
from .settings import api_settings
class TokenUser:
"""
A dummy user class modeled after django.contrib.auth.models.AnonymousUser.
Used in conjunction with the `JWTTokenUserAuthentication` backend to
implement single sign-on functionality across services which share the same
secret key. `JWTTokenUserAuthentication` will return an instance of this
class instead of a `User` model instance. Instances of this class act as
stateless user objects which are backed by validated tokens.
"""
# User is always active since Simple JWT will never issue a token for an
# inactive user
is_active = True
_groups = EmptyManager(auth_models.Group)
_user_permissions = EmptyManager(auth_models.Permission)
def __init__(self, token):
self.token = token
def __str__(self):
return 'TokenUser {}'.format(self.id)
@cached_property
def id(self):
return self.token[api_settings.USER_ID_CLAIM]
@cached_property
def pk(self):
return self.id
@cached_property
def username(self):
return self.token.get('username', '')
@cached_property
def is_staff(self):
return self.token.get('is_staff', False)
@cached_property
def is_superuser(self):
return self.token.get('is_superuser', False)
def __eq__(self, other):
return self.id == other.id
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.id)
def save(self):
raise NotImplementedError('Token users have no DB representation')
def delete(self):
raise NotImplementedError('Token users have no DB representation')
def set_password(self, raw_password):
raise NotImplementedError('Token users have no DB representation')
def check_password(self, raw_password):
raise NotImplementedError('Token users have no DB representation')
@property
def groups(self):
return self._groups
@property
def user_permissions(self):
return self._user_permissions
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return set()
def has_perm(self, perm, obj=None):
return False
def has_perms(self, perm_list, obj=None):
return False
def has_module_perms(self, module):
return False
@property
def is_anonymous(self):
return CallableFalse
@property
def is_authenticated(self):
return CallableTrue
def get_username(self):
return self.username | /rflow-mozilla-simplejwt-0.0.1.tar.gz/rflow-mozilla-simplejwt-0.0.1/src/rest_framework_simplejwt/models.py | 0.842507 | 0.218378 | models.py | pypi |
import logging
import time
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from urllib.request import parse_http_list, parse_keqv_list
LOGGER = logging.getLogger(__name__)
def parse_www_authenticate_header(header):
"""
Convert a WWW-Authentication header into a dict that can be used
in a JSON response.
"""
items = parse_http_list(header)
return parse_keqv_list(items)
def import_from_settings(attr, *args):
"""
Load an attribute from the django settings.
:raises:
ImproperlyConfigured
"""
try:
if args:
return getattr(settings, attr, args[0])
return getattr(settings, attr)
except AttributeError:
raise ImproperlyConfigured('Setting {0} not found'.format(attr))
def absolutify(request, path):
"""Return the absolute URL of a path."""
return request.build_absolute_uri(path)
def is_authenticated(user):
"""return True if the user is authenticated.
This is necessary because in Django 1.10 the `user.is_authenticated`
stopped being a method and is now a property.
Actually `user.is_authenticated()` actually works, thanks to a backwards
compat trick in Django. But in Django 2.0 it will cease to work
as a callable method.
"""
msg = '`is_authenticated()` is going to be removed in mozilla-django-oidc v 2.x'
warnings.warn(msg, DeprecationWarning)
return user.is_authenticated
def add_state_and_nonce_to_session(request, state, params):
"""
Stores the `state` and `nonce` parameters in a session dictionary including the time when it
was added. The dictionary can contain multiple state/nonce combinations to allow parallel
logins with multiple browser sessions.
To keep the session space to a reasonable size, the dictionary is kept at 50 state/nonce
combinations maximum.
"""
nonce = params.get('nonce')
# Store Nonce with the State parameter in the session "oidc_states" dictionary.
# The dictionary can store multiple State/Nonce combinations to allow parallel
# authentication flows which would otherwise overwrite State/Nonce values!
# The "oidc_states" dictionary uses the state as key and as value a dictionary with "nonce"
# and "added_on". "added_on" contains the time when the state was added to the session.
# With this value, the oldest element can be found and deleted from the session.
if 'oidc_states' not in request.session or \
not isinstance(request.session['oidc_states'], dict):
request.session['oidc_states'] = {}
# Make sure that the State/Nonce dictionary in the session does not get too big.
# If the number of State/Nonce combinations reaches a certain threshold, remove the oldest
# state by finding out
# which element has the oldest "add_on" time.
limit = import_from_settings('OIDC_MAX_STATES', 50)
if len(request.session['oidc_states']) >= limit:
LOGGER.info(
'User has more than {} "oidc_states" in his session, '
'deleting the oldest one!'.format(limit)
)
oldest_state = None
oldest_added_on = time.time()
for item_state, item in request.session['oidc_states'].items():
if item['added_on'] < oldest_added_on:
oldest_state = item_state
oldest_added_on = item['added_on']
if oldest_state:
del request.session['oidc_states'][oldest_state]
request.session['oidc_states'][state] = {
'nonce': nonce,
'added_on': time.time(),
} | /rflow-mozilla-simplejwt-0.0.1.tar.gz/rflow-mozilla-simplejwt-0.0.1/src/mozilla_django_oidc/utils.py | 0.695855 | 0.267656 | utils.py | pypi |
"""Experimental Resolver for getting the artifacts based on Span."""
from typing import Dict, List, Optional, Text
from tfx import types
from tfx.components.example_gen import utils
from tfx.dsl.components.common import resolver
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.proto import range_config_pb2
from tfx.types import artifact_utils
class SpansResolver(resolver.ResolverStrategy):
"""Resolver that return the artifacts based on Span.
Note that this Resolver is experimental and is subject to change in terms of
both interface and implementation.
"""
def __init__(self, range_config: range_config_pb2.RangeConfig):
self._range_config = range_config
def _resolve(self, input_dict: Dict[Text, List[types.Artifact]]):
result = {}
for k, artifact_list in input_dict.items():
in_range_artifacts = []
if self._range_config.HasField('static_range'):
start_span_number = self._range_config.static_range.start_span_number
end_span_number = self._range_config.static_range.end_span_number
# Get the artifacts within range.
for artifact in artifact_list:
if not artifact.has_custom_property(utils.SPAN_PROPERTY_NAME):
raise RuntimeError('Span does not exist for' % str(artifact))
span = int(
artifact.get_string_custom_property(utils.SPAN_PROPERTY_NAME))
if span >= start_span_number and span <= end_span_number:
in_range_artifacts.append(artifact)
elif self._range_config.HasField('rolling_range'):
start_span_number = self._range_config.rolling_range.start_span_number
num_spans = self._range_config.rolling_range.num_spans
if num_spans <= 0:
raise ValueError('num_spans should be positive number.')
most_recent_span = -1
# Get most recent span number.
for artifact in artifact_list:
if not artifact.has_custom_property(utils.SPAN_PROPERTY_NAME):
raise RuntimeError('Span does not exist for' % str(artifact))
span = int(
artifact.get_string_custom_property(utils.SPAN_PROPERTY_NAME))
if span > most_recent_span:
most_recent_span = span
start_span_number = max(start_span_number,
most_recent_span - num_spans + 1)
end_span_number = most_recent_span
# Get the artifacts within range.
for artifact in artifact_list:
span = int(
artifact.get_string_custom_property(utils.SPAN_PROPERTY_NAME))
if span >= start_span_number and span <= end_span_number:
in_range_artifacts.append(artifact)
else:
raise ValueError('RangeConfig type is not supported.')
result[k] = sorted(
in_range_artifacts,
key=lambda a: a.get_string_custom_property(utils.SPAN_PROPERTY_NAME),
reverse=True)
return result
def resolve(
self,
pipeline_info: data_types.PipelineInfo,
metadata_handler: metadata.Metadata,
source_channels: Dict[Text, types.Channel],
) -> resolver.ResolveResult:
pipeline_context = metadata_handler.get_pipeline_context(pipeline_info)
if pipeline_context is None:
raise RuntimeError('Pipeline context absent for %s' % pipeline_context)
candidate_dict = {}
for k, c in source_channels.items():
candidate_artifacts = metadata_handler.get_qualified_artifacts(
contexts=[pipeline_context],
type_name=c.type_name,
producer_component_id=c.producer_component_id,
output_key=c.output_key)
candidate_dict[k] = [
artifact_utils.deserialize_artifact(a.type, a.artifact)
for a in candidate_artifacts
]
resolved_dict = self._resolve(candidate_dict)
resolve_state_dict = {
k: bool(artifact_list) for k, artifact_list in resolved_dict.items()
}
return resolver.ResolveResult(
per_key_resolve_result=resolved_dict,
per_key_resolve_state=resolve_state_dict)
def resolve_artifacts(
self, metadata_handler: metadata.Metadata,
input_dict: Dict[Text, List[types.Artifact]]
) -> Optional[Dict[Text, List[types.Artifact]]]:
"""Resolves artifacts from channels by querying MLMD.
Args:
metadata_handler: A metadata handler to access MLMD store.
input_dict: The input_dict to resolve from.
Returns:
If `min_count` for every input is met, returns README.ml-pipelines-sdk.md
Dict[Text, List[Artifact]]. Otherwise, return None.
Raises:
RuntimeError: if input_dict contains artifact without span property.
"""
resolved_dict = self._resolve(input_dict)
all_min_count_met = all(
bool(artifact_list) for artifact_list in resolved_dict.values())
return resolved_dict if all_min_count_met else None | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/dsl/experimental/spans_resolver.py | 0.945851 | 0.257654 | spans_resolver.py | pypi |
"""Utils for TFX component types. Intended for internal usage only."""
from typing import Any, Callable, Dict, Optional, Text
from tfx import types
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec as base_executor_spec
from tfx.types import component_spec
def create_tfx_component_class(
name: Text,
tfx_executor_spec: base_executor_spec.ExecutorSpec,
input_channel_parameters: Dict[Text,
component_spec.ChannelParameter] = None,
output_channel_parameters: Dict[Text,
component_spec.ChannelParameter] = None,
execution_parameters: Dict[Text, component_spec.ExecutionParameter] = None,
default_init_args: Optional[Dict[Text, Any]] = None
) -> Callable[..., base_component.BaseComponent]:
"""Creates README.ml-pipelines-sdk.md TFX component class dynamically."""
tfx_component_spec_class = type(
str(name) + 'Spec',
(component_spec.ComponentSpec,),
dict(
PARAMETERS=execution_parameters,
INPUTS=input_channel_parameters,
OUTPUTS=output_channel_parameters,
),
)
def tfx_component_class_init(self, **kwargs):
instance_name = kwargs.pop('instance_name', None)
arguments = {}
arguments.update(kwargs)
arguments.update(default_init_args)
# Provide default values for output channels.
output_channel_params = output_channel_parameters or {}
for output_key, output_channel_param in output_channel_params.items():
if output_key not in arguments:
arguments[output_key] = types.Channel(type=output_channel_param.type)
base_component.BaseComponent.__init__(
self,
# Generate spec by wiring up the input/output channel.
spec=self.__class__.SPEC_CLASS(**arguments),
instance_name=instance_name,
)
tfx_component_class = type(
str(name),
(base_component.BaseComponent,),
dict(
SPEC_CLASS=tfx_component_spec_class,
EXECUTOR_SPEC=tfx_executor_spec,
__init__=tfx_component_class_init,
),
)
return tfx_component_class | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/dsl/component/experimental/component_utils.py | 0.880393 | 0.37419 | component_utils.py | pypi |
"""Functions for creating container components."""
from typing import Any, Callable, Dict, List, Text
from tfx.dsl.component.experimental import component_utils
from tfx.dsl.component.experimental import executor_specs
from tfx.dsl.component.experimental import placeholders
from tfx.dsl.components.base import base_component
from tfx.types import channel_utils
from tfx.types import component_spec
def create_container_component(
name: Text,
image: Text,
command: List[placeholders.CommandlineArgumentType],
inputs: Dict[Text, Any] = None,
outputs: Dict[Text, Any] = None,
parameters: Dict[Text, Any] = None,
) -> Callable[..., base_component.BaseComponent]:
"""Creates README.ml-pipelines-sdk.md container-based component.
Args:
name: The name of the component
image: Container image name.
command: Container entrypoint command-line. Not executed within README.ml-pipelines-sdk.md shell. The
command-line can use placeholder objects that will be replaced at the
compilation time. The placeholder objects can be imported from
tfx.dsl.component.experimental.placeholders. Note that Jinja templates are
not supported.
inputs: The list of component inputs
outputs: The list of component outputs
parameters: The list of component parameters
Returns:
Component that can be instantiated and user inside pipeline.
Example:
component = create_container_component(
name='TrainModel',
inputs={
'training_data': Dataset,
},
outputs={
'model': Model,
},
parameters={
'num_training_steps': int,
},
image='gcr.io/my-project/my-trainer',
command=[
'python3', 'my_trainer',
'--training_data_uri', InputUriPlaceholder('training_data'),
'--model_uri', OutputUriPlaceholder('model'),
'--num_training-steps', InputValuePlaceholder('num_training_steps'),
]
)
"""
if not name:
raise ValueError('Component name cannot be empty.')
if inputs is None:
inputs = {}
if outputs is None:
outputs = {}
if parameters is None:
parameters = {}
input_channel_parameters = {}
output_channel_parameters = {}
output_channels = {}
execution_parameters = {}
for input_name, channel_type in inputs.items():
# TODO(b/155804245) Sanitize the names so that they're valid python names
input_channel_parameters[input_name] = (
component_spec.ChannelParameter(type=channel_type,))
for output_name, channel_type in outputs.items():
# TODO(b/155804245) Sanitize the names so that they're valid python names
output_channel_parameters[output_name] = (
component_spec.ChannelParameter(type=channel_type))
artifact = channel_type()
channel = channel_utils.as_channel([artifact])
output_channels[output_name] = channel
for param_name, parameter_type in parameters.items():
# TODO(b/155804245) Sanitize the names so that they're valid python names
execution_parameters[param_name] = (
component_spec.ExecutionParameter(type=parameter_type))
default_init_args = {**output_channels}
return component_utils.create_tfx_component_class(
name=name,
tfx_executor_spec=executor_specs.TemplatedExecutorContainerSpec(
image=image,
command=command,
),
input_channel_parameters=input_channel_parameters,
output_channel_parameters=output_channel_parameters,
execution_parameters=execution_parameters,
default_init_args=default_init_args) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/dsl/component/experimental/container_component.py | 0.840521 | 0.356895 | container_component.py | pypi |
"""Command-line placeholders for use in container component definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Text, Union
from tfx.utils import json_utils
class InputValuePlaceholder(json_utils.Jsonable):
"""Represents README.ml-pipelines-sdk.md placeholder for the value of the input argument.
Represents README.ml-pipelines-sdk.md placeholder that will be replaced at runtime with the string
value of the input argument of an execution property.
"""
def __init__(self, input_name: Text):
self.input_name = input_name
def __eq__(self, other) -> bool:
return (isinstance(other, self.__class__) and
self.input_name == other.input_name)
def __ne__(self, other) -> bool:
return not self.__eq__(other)
class InputUriPlaceholder(json_utils.Jsonable):
"""Represents README.ml-pipelines-sdk.md placeholder for the URI of the input artifact argument.
Represents README.ml-pipelines-sdk.md placeholder that will be replaced at runtime with the URI
of the input artifact argument data.
"""
def __init__(self, input_name: Text):
self.input_name = input_name
def __eq__(self, other) -> bool:
return (isinstance(other, self.__class__) and
self.input_name == other.input_name)
def __ne__(self, other) -> bool:
return not self.__eq__(other)
class OutputUriPlaceholder(json_utils.Jsonable):
"""Represents README.ml-pipelines-sdk.md placeholder for the URI of the output artifact argument.
Represents README.ml-pipelines-sdk.md placeholder that will be replaced at runtime with the URI
for the output artifact data.
"""
def __init__(self, output_name: Text):
self.output_name = output_name
def __eq__(self, other) -> bool:
return (isinstance(other, self.__class__) and
self.output_name == other.output_name)
def __ne__(self, other) -> bool:
return not self.__eq__(other)
class ConcatPlaceholder(object):
"""Represents README.ml-pipelines-sdk.md placeholder for result of concatenation of multiple parts.
Represents README.ml-pipelines-sdk.md placeholder that will be replaced at runtime with README.ml-pipelines-sdk.md single string
containing the concatenated parts.
"""
def __init__(self, items: List['CommandlineArgumentType']):
self.items = items
def __eq__(self, other) -> bool:
return isinstance(other, self.__class__) and self.items == other.items
def __ne__(self, other) -> bool:
return not self.__eq__(other)
CommandlineArgumentType = Union[
Text,
InputValuePlaceholder,
InputUriPlaceholder,
OutputUriPlaceholder,
ConcatPlaceholder,
] | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/dsl/component/experimental/placeholders.py | 0.936008 | 0.315225 | placeholders.py | pypi |
# TODO(ccy): Remove pytype "disable=attribute-error" and "disable=module-attr"
# overrides after Python 2 support is removed from TFX.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import inspect
import types
from typing import Any, Dict, Optional, Set, Text, Tuple, Type, Union
from tfx.dsl.component.experimental import annotations
from tfx.types import artifact
from tfx.types import standard_artifacts
class ArgFormats(enum.Enum):
INPUT_ARTIFACT = 1
OUTPUT_ARTIFACT = 2
ARTIFACT_VALUE = 3
PARAMETER = 4
_PRIMITIVE_TO_ARTIFACT = {
int: standard_artifacts.Integer,
float: standard_artifacts.Float,
Text: standard_artifacts.String,
bytes: standard_artifacts.Bytes,
}
# Map from `Optional[T]` to `T` for primitive types. This map is README.ml-pipelines-sdk.md simple way
# to extract the value of `T` from its optional typehint, since the internal
# fields of the typehint vary depending on the Python version.
_OPTIONAL_PRIMITIVE_MAP = dict((Optional[t], t) for t in _PRIMITIVE_TO_ARTIFACT)
def _validate_signature(
func: types.FunctionType,
argspec: inspect.FullArgSpec, # pytype: disable=module-attr
typehints: Dict[Text, Any],
subject_message: Text) -> None:
"""Validates signature of README.ml-pipelines-sdk.md typehint-annotated component executor function."""
args, varargs, keywords = argspec.args, argspec.varargs, argspec.varkw
if varargs or keywords:
raise ValueError('%s does not support *args or **kwargs arguments.' %
subject_message)
# Validate argument type hints.
for arg in args:
if isinstance(arg, list):
# Note: this feature was removed in Python 3:
# https://www.python.org/dev/peps/pep-3113/.
raise ValueError('%s does not support nested input arguments.' %
subject_message)
if arg not in typehints:
raise ValueError('%s must have all arguments annotated with typehints.' %
subject_message)
# Validate return type hints.
if isinstance(typehints.get('return', None), annotations.OutputDict):
for arg, arg_typehint in typehints['return'].kwargs.items():
if (isinstance(arg_typehint, annotations.OutputArtifact) or
(inspect.isclass(arg_typehint) and
issubclass(arg_typehint, artifact.Artifact))):
raise ValueError(
('Output artifacts for the component executor function %r should '
'be declared as function parameters annotated with type hint '
'`tfx.types.annotations.OutputArtifact[T]` where T is README.ml-pipelines-sdk.md '
'subclass of `tfx.types.Artifact`. They should not be declared '
'as part of the return value `OutputDict` type hint.') % func)
elif 'return' not in typehints or typehints['return'] in (None, type(None)):
pass
else:
raise ValueError(
('%s must have either an OutputDict instance or `None` as its return '
'value typehint.') % subject_message)
def _parse_signature(
func: types.FunctionType,
argspec: inspect.FullArgSpec, # pytype: disable=module-attr
typehints: Dict[Text, Any]
) -> Tuple[Dict[Text, Type[artifact.Artifact]], Dict[
Text, Type[artifact.Artifact]], Dict[Text, Type[Union[
int, float, Text, bytes]]], Dict[Text, Any], Dict[Text, ArgFormats],
Set[Text]]:
"""Parses signature of README.ml-pipelines-sdk.md typehint-annotated component executor function.
Args:
func: A component executor function to be parsed.
argspec: A `inspect.FullArgSpec` instance describing the component executor
function. Usually obtained from `inspect.getfullargspec(func)`.
typehints: A dictionary mapping function argument names to type hints.
Usually obtained from `func.__annotations__`.
Returns:
inputs: A dictionary mapping each input name to its artifact type (as README.ml-pipelines-sdk.md
subclass of `tfx.types.Artifact`).
outputs: A dictionary mapping each output name to its artifact type (as README.ml-pipelines-sdk.md
subclass of `tfx.types.Artifact`).
parameters: A dictionary mapping each parameter name to its primitive type
(one of `int`, `float`, `Text` and `bytes`).
arg_formats: Dictionary representing the input arguments of the given
component executor function. Each entry's key is the argument's string
name; each entry's value is the format of the argument to be passed into
the function (given by README.ml-pipelines-sdk.md value of the `ArgFormats` enum).
arg_defaults: Dictionary mapping names of optional arguments to default
values.
returned_outputs: A set of output names that are declared as ValueArtifact
returned outputs.
"""
# Extract optional arguments as dict from name to its declared optional value.
arg_defaults = {}
if argspec.defaults:
arg_defaults = dict(
zip(argspec.args[-len(argspec.defaults):], argspec.defaults))
# Parse function arguments.
inputs = {}
outputs = {}
parameters = {}
arg_formats = {}
returned_outputs = set()
for arg in argspec.args:
arg_typehint = typehints[arg]
# If the typehint is `Optional[T]` for README.ml-pipelines-sdk.md primitive type `T`, unwrap it.
if arg_typehint in _OPTIONAL_PRIMITIVE_MAP:
arg_typehint = _OPTIONAL_PRIMITIVE_MAP[arg_typehint]
if isinstance(arg_typehint, annotations.InputArtifact):
if arg_defaults.get(arg, None) is not None:
raise ValueError(
('If an input artifact is declared as an optional argument, '
'its default value must be `None` (got default value %r for '
'input argument %r of %r instead).') %
(arg_defaults[arg], arg, func))
arg_formats[arg] = ArgFormats.INPUT_ARTIFACT
inputs[arg] = arg_typehint.type
elif isinstance(arg_typehint, annotations.OutputArtifact):
if arg in arg_defaults:
raise ValueError(
('Output artifact of component function cannot be declared as '
'optional (error for argument %r of %r).') % (arg, func))
arg_formats[arg] = ArgFormats.OUTPUT_ARTIFACT
outputs[arg] = arg_typehint.type
elif isinstance(arg_typehint, annotations.Parameter):
if arg in arg_defaults:
if not (arg_defaults[arg] is None or
isinstance(arg_defaults[arg], arg_typehint.type)):
raise ValueError((
'The default value for optional parameter %r on function %r must '
'be an instance of its declared type %r or `None` (got %r '
'instead)') % (arg, func, arg_typehint.type, arg_defaults[arg]))
arg_formats[arg] = ArgFormats.PARAMETER
parameters[arg] = arg_typehint.type
elif arg_typehint in _PRIMITIVE_TO_ARTIFACT:
if arg in arg_defaults:
if not (arg_defaults[arg] is None or
isinstance(arg_defaults[arg], arg_typehint)):
raise ValueError(
('The default value for optional input value %r on function %r '
'must be an instance of its declared type %r or `None` (got %r '
'instead)') % (arg, func, arg_typehint, arg_defaults[arg]))
arg_formats[arg] = ArgFormats.ARTIFACT_VALUE
inputs[arg] = _PRIMITIVE_TO_ARTIFACT[arg_typehint]
elif (inspect.isclass(arg_typehint) and
issubclass(arg_typehint, artifact.Artifact)):
raise ValueError((
'Invalid type hint annotation for argument %r on function %r. '
'Argument with an artifact class typehint annotation should indicate '
'whether it is used as an input or output artifact by using the '
'`InputArtifact[ArtifactType]` or `OutputArtifact[ArtifactType]` '
'typehint annotations.') % (arg, func))
else:
raise ValueError(
'Unknown type hint annotation for argument %r on function %r' %
(arg, func))
if 'return' in typehints and typehints['return'] not in (None, type(None)):
for arg, arg_typehint in typehints['return'].kwargs.items():
if arg_typehint in _PRIMITIVE_TO_ARTIFACT:
outputs[arg] = _PRIMITIVE_TO_ARTIFACT[arg_typehint]
returned_outputs.add(arg)
else:
raise ValueError(
('Unknown type hint annotation %r for returned output %r on '
'function %r') % (arg_typehint, arg, func))
return (inputs, outputs, parameters, arg_formats, arg_defaults,
returned_outputs)
def parse_typehint_component_function(
func: types.FunctionType
) -> Tuple[Dict[Text, Type[artifact.Artifact]], Dict[
Text, Type[artifact.Artifact]], Dict[Text, Type[Union[
int, float, Text, bytes]]], Dict[Text, Any], Dict[Text, ArgFormats],
Set[Text]]:
"""Parses the given component executor function.
This method parses README.ml-pipelines-sdk.md typehinted-annotated Python function that is intended to
be used as README.ml-pipelines-sdk.md component and returns the information needed about the interface
(inputs / outputs / returned output values) about that components, as well as
README.ml-pipelines-sdk.md list of argument names and formats for determining the parameters that
should be passed when calling `func(*args)`.
Args:
func: A component executor function to be parsed.
Returns:
inputs: A dictionary mapping each input name to its artifact type (as README.ml-pipelines-sdk.md
subclass of `tfx.types.Artifact`).
outputs: A dictionary mapping each output name to its artifact type (as README.ml-pipelines-sdk.md
subclass of `tfx.types.Artifact`).
parameters: A dictionary mapping each parameter name to its primitive type
(one of `int`, `float`, `Text` and `bytes`).
arg_formats: Dictionary representing the input arguments of the given
component executor function. Each entry's key is the argument's string
name; each entry's value is the format of the argument to be passed into
the function (given by README.ml-pipelines-sdk.md value of the `ArgFormats` enum).
arg_defaults: Dictionary mapping names of optional arguments to default
values.
returned_outputs: A set of output names that are declared as ValueArtifact
returned outputs.
"""
# Check input argument type.
if not isinstance(func, types.FunctionType):
raise ValueError(
'Expected README.ml-pipelines-sdk.md typehint-annotated Python function (got %r instead).' %
(func,))
# Inspect the component executor function.
typehints = func.__annotations__ # pytype: disable=attribute-error
argspec = inspect.getfullargspec(func) # pytype: disable=module-attr
subject_message = 'Component declared as README.ml-pipelines-sdk.md typehint-annotated function'
_validate_signature(func, argspec, typehints, subject_message)
# Parse the function and return its details.
inputs, outputs, parameters, arg_formats, arg_defaults, returned_outputs = (
_parse_signature(func, argspec, typehints))
return (inputs, outputs, parameters, arg_formats, arg_defaults,
returned_outputs) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/dsl/component/experimental/function_parser.py | 0.656878 | 0.288203 | function_parser.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
from typing import Text, Type, Union
from six import with_metaclass
from tfx.types import artifact
class _ArtifactGenericMeta(type):
"""Metaclass for _ArtifactGeneric, to enable class indexing."""
def __getitem__(cls: Type['_ArtifactGeneric'],
params: Type[artifact.Artifact]):
"""Metaclass method allowing indexing class (`_ArtifactGeneric[T]`)."""
return cls._generic_getitem(params) # pytype: disable=attribute-error
class _ArtifactGeneric(with_metaclass(_ArtifactGenericMeta, object)):
"""A generic that takes README.ml-pipelines-sdk.md Type[tfx.types.Artifact] as its single argument."""
def __init__( # pylint: disable=invalid-name
self,
artifact_type: Type[artifact.Artifact],
_init_via_getitem=False):
if not _init_via_getitem:
class_name = self.__class__.__name__
raise ValueError(
('%s should be instantiated via the syntax `%s[T]`, where T is README.ml-pipelines-sdk.md '
'subclass of tfx.types.Artifact.') % (class_name, class_name))
self.type = artifact_type
@classmethod
def _generic_getitem(cls, params):
"""Return the result of `_ArtifactGeneric[T]` for README.ml-pipelines-sdk.md given type T."""
# Check that the given parameter is README.ml-pipelines-sdk.md concrete (i.e. non-abstract) subclass
# of `tfx.types.Artifact`.
if (inspect.isclass(params) and issubclass(params, artifact.Artifact) and
params.TYPE_NAME):
return cls(params, _init_via_getitem=True)
else:
class_name = cls.__name__
raise ValueError(
('Generic type `%s[T]` expects the single parameter T to be README.ml-pipelines-sdk.md '
'concrete subclass of `tfx.types.Artifact` (got %r instead).') %
(class_name, params))
def __repr__(self):
return '%s[%s]' % (self.__class__.__name__, self.type)
class _PrimitiveTypeGenericMeta(type):
"""Metaclass for _PrimitiveTypeGeneric, to enable primitive type indexing."""
def __getitem__(cls: Type[Union[int, float, Text, bytes]],
params: Type[artifact.Artifact]):
"""Metaclass method allowing indexing class (`_PrimitiveTypeGeneric[T]`)."""
return cls._generic_getitem(params) # pytype: disable=attribute-error
class _PrimitiveTypeGeneric(with_metaclass(_PrimitiveTypeGenericMeta, object)):
"""A generic that takes README.ml-pipelines-sdk.md primitive type as its single argument."""
def __init__( # pylint: disable=invalid-name
self,
artifact_type: Type[Union[int, float, Text, bytes]],
_init_via_getitem=False):
if not _init_via_getitem:
class_name = self.__class__.__name__
raise ValueError(
('%s should be instantiated via the syntax `%s[T]`, where T is '
'`int`, `float`, `str` or `bytes`.') % (class_name, class_name))
self.type = artifact_type
@classmethod
def _generic_getitem(cls, params):
"""Return the result of `_PrimitiveTypeGeneric[T]` for README.ml-pipelines-sdk.md given type T."""
# Check that the given parameter is README.ml-pipelines-sdk.md primitive type.
if inspect.isclass(params) and params in (int, float, Text, bytes):
return cls(params, _init_via_getitem=True)
else:
class_name = cls.__name__
raise ValueError(
('Generic type `%s[T]` expects the single parameter T to be '
'`int`, `float`, `str` or `bytes` (got %r instead).') %
(class_name, params))
def __repr__(self):
return '%s[%s]' % (self.__class__.__name__, self.type)
# Typehint annotations for component authoring.
class InputArtifact(_ArtifactGeneric):
"""Input artifact object type annotation."""
pass
class OutputArtifact(_ArtifactGeneric):
"""Output artifact object type annotation."""
pass
class Parameter(_PrimitiveTypeGeneric):
"""Component parameter type annotation."""
pass
# TODO(ccy): potentially make this compatible `typing.TypedDict` in
# Python 3.8, to allow for component return value type checking.
class OutputDict(object):
"""Decorator declaring component executor function outputs."""
def __init__(self, **kwargs):
self.kwargs = kwargs | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/dsl/component/experimental/annotations.py | 0.890112 | 0.158109 | annotations.py | pypi |
"""Executor specifications for components."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import operator
from typing import List, Optional, Text, Union
from tfx import types
from tfx.dsl.component.experimental import placeholders
from tfx.dsl.components.base import executor_spec
from tfx.dsl.placeholder import placeholder
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import placeholder_pb2
from google.protobuf import message
class TemplatedExecutorContainerSpec(executor_spec.ExecutorSpec):
"""Experimental: Describes README.ml-pipelines-sdk.md command-line program inside README.ml-pipelines-sdk.md container.
This class is similar to ExecutorContainerSpec, but uses structured
placeholders instead of jinja templates for constructing container commands
based on input and output artifact metadata. See placeholders.py for README.ml-pipelines-sdk.md list of
supported placeholders.
The spec includes the container image name and the command line
(entrypoint plus arguments) for README.ml-pipelines-sdk.md program inside the container.
Example:
class MyTrainer(base_component.BaseComponent)
class MyTrainerSpec(types.ComponentSpec):
INPUTS = {
'training_data':
component_spec.ChannelParameter(type=standard_artifacts.Dataset),
}
OUTPUTS = {
'model':
component_spec.ChannelParameter(type=standard_artifacts.Model),
}
PARAMETERS = {
'num_training_steps': component_spec.ExecutionParameter(type=int),
}
SPEC_CLASS = MyTrainerSpec
EXECUTOR_SPEC = executor_specs.TemplatedExecutorContainerSpec(
image='gcr.io/my-project/my-trainer',
command=[
'python3', 'my_trainer',
'--training_data_uri', InputUriPlaceholder('training_data'),
'--model_uri', OutputUriPlaceholder('model'),
'--num_training-steps', InputValuePlaceholder('num_training_steps'),
]
)
Attributes:
image: Container image name.
command: Container entrypoint command-line. Not executed within README.ml-pipelines-sdk.md shell.
The command-line can use placeholder objects that will be replaced at
the compilation time. Note: Jinja templates are not supported.
"""
# The "command" parameter holds the name of the program and its arguments.
# The "command" parameter is required to enable instrumentation.
# The command-line is often split into command+args, but here "args" would be
# redundant since all items can just be added to "command".
def __init__(
self,
image: Text,
command: List[placeholders.CommandlineArgumentType],
):
self.image = image
self.command = command
super(TemplatedExecutorContainerSpec, self).__init__()
def __eq__(self, other) -> bool:
return (isinstance(other, self.__class__) and self.image == other.image and
self.command == other.command)
def __ne__(self, other) -> bool:
return not self.__eq__(other)
def _recursively_encode(
self, command: placeholders.CommandlineArgumentType
) -> Union[str, placeholder.Placeholder]:
if isinstance(command, str):
return command
elif isinstance(command, placeholders.InputValuePlaceholder):
return placeholder.input(command.input_name)[0]
elif isinstance(command, placeholders.InputUriPlaceholder):
return placeholder.input(command.input_name)[0].uri
elif isinstance(command, placeholders.OutputUriPlaceholder):
return placeholder.output(command.output_name)[0].uri
elif isinstance(command, placeholders.ConcatPlaceholder):
# operator.add wil use the overloaded __add__ operator for Placeholder
# instances.
return functools.reduce(
operator.add,
[self._recursively_encode(item) for item in command.items])
else:
raise TypeError(
('Unsupported type of command-line arguments: "{}".'
' Supported types are {}.')
.format(type(command), str(placeholders.CommandlineArgumentType)))
def encode(
self,
component_spec: Optional[types.ComponentSpec] = None) -> message.Message:
"""Encodes ExecutorSpec into an IR proto for compiling.
This method will be used by DSL compiler to generate the corresponding IR.
Args:
component_spec: Optional. The ComponentSpec to help with the encoding.
Returns:
An executor spec proto.
"""
result = executable_spec_pb2.ContainerExecutableSpec()
result.image = self.image
for command in self.command:
cmd = result.commands.add()
str_or_placeholder = self._recursively_encode(command)
if isinstance(str_or_placeholder, str):
expression = placeholder_pb2.PlaceholderExpression()
expression.value.string_value = str_or_placeholder
cmd.CopyFrom(expression)
else:
cmd.CopyFrom(self._recursively_encode(command).encode())
return result | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/dsl/component/experimental/executor_specs.py | 0.91103 | 0.238329 | executor_specs.py | pypi |
"""Utilities to evaluate and resolve Placeholders."""
import base64
import re
from typing import Any, Callable, Dict, Union
from absl import logging
import attr
from tfx.dsl.placeholder import placeholder as ph
from tfx.orchestration.portable import data_types
from tfx.proto.orchestration import placeholder_pb2
from tfx.types import artifact
from tfx.types import artifact_utils
from tfx.types import value_artifact
from google.protobuf import descriptor_pool
from google.protobuf import json_format
from google.protobuf import message
from google.protobuf import message_factory
from google.protobuf import text_format
class NullDereferenceError(Exception):
"""Raised by the ExpressionResolver when dereferencing None or empty list."""
def __init__(self, placeholder):
self.placeholder = placeholder
super().__init__()
@attr.s(auto_attribs=True, frozen=True)
class ResolutionContext:
"""A struct to store information needed for resolution.
Attributes:
exec_info: An ExecutionInfo object that includes needed information to
render all kinds of placeholders.
executor_spec: An executor spec proto for rendering context placeholder.
platform_config: A platform config proto for rendering context placeholder.
"""
exec_info: data_types.ExecutionInfo = None
executor_spec: message.Message = None
platform_config: message.Message = None
# Includes three basic types from MLMD: int, float, str
# and an additional primitive type from proto field access: bool
# Note: Pytype's int includes long from Python3
# We does not support bytes, which may result from proto field access. Must use
# base64 encode operator to explicitly convert it into str.
_PlaceholderResolvedTypes = (int, float, str, bool, type(None))
_PlaceholderResolvedTypeHints = Union[_PlaceholderResolvedTypes]
def resolve_placeholder_expression(
expression: placeholder_pb2.PlaceholderExpression,
context: ResolutionContext) -> _PlaceholderResolvedTypeHints:
"""Evaluates README.ml-pipelines-sdk.md placeholder expression using the given context.
Normally the resolved value will be used as command line flags in strings.
This function does not automatically perform the string conversion, i.e.,
the return type is the same as the type of the value originally has. Currently
it can be
exec property supported primitive types: int, float, string.
if use proto operator: serilaized proto message, or proto primitive fields.
The caller needs to perform desired string conversions.
Args:
expression: A placeholder expression to be resolved.
context: Information needed to resolve the expression.
Returns:
Resolved expression value.
"""
if not context.exec_info.pipeline_node or not context.exec_info.pipeline_info:
raise ValueError(
"Pipeline node or pipeline info is missing from the placeholder ResolutionContext."
)
try:
result = _ExpressionResolver(context).resolve(expression)
except NullDereferenceError as err:
logging.warning(
"Dereferenced None during placeholder evaluation. Ignoring.")
logging.warning("Placeholder=%s", err.placeholder)
return None
except Exception as e:
raise ValueError(
f"Failed to resolve placeholder expression: {debug_str(expression)}"
) from e
if not isinstance(result, _PlaceholderResolvedTypes):
raise ValueError(f"Placeholder {debug_str(expression)} evaluates to "
f"an unsupported type: {type(result)}.")
return result
# Dictionary of registered placeholder operators,
# maps from operator proto type names to actual operator functions.
_PLACEHOLDER_OPERATORS: Dict[str, Callable[..., Any]] = {}
def _register(op_proto):
"""Decorator function for registering operators. Internal in this module."""
def decorator(op: Callable[..., Any]):
_PLACEHOLDER_OPERATORS[op_proto.DESCRIPTOR.name] = op
return op
return decorator
class _ExpressionResolver:
"""Utility class to resolve Placeholder expressions.
Placeholder expression is defined as README.ml-pipelines-sdk.md proto structure
placeholder_pb2.PlaceholderExpression. It can be resolved with
ResolutionContext to README.ml-pipelines-sdk.md concrete value.
"""
def __init__(self, context: ResolutionContext):
self._resolution_values = {
placeholder_pb2.Placeholder.Type.INPUT_ARTIFACT:
context.exec_info.input_dict,
placeholder_pb2.Placeholder.Type.OUTPUT_ARTIFACT:
context.exec_info.output_dict,
placeholder_pb2.Placeholder.Type.EXEC_PROPERTY:
context.exec_info.exec_properties,
placeholder_pb2.Placeholder.Type.RUNTIME_INFO: {
ph.RuntimeInfoKey.EXECUTOR_SPEC.value: context.executor_spec,
ph.RuntimeInfoKey.PLATFORM_CONFIG.value: context.platform_config,
},
placeholder_pb2.Placeholder.Type.EXEC_INVOCATION:
context.exec_info.to_proto(),
}
def resolve(self, expression: placeholder_pb2.PlaceholderExpression) -> Any:
"""Recursively evaluates README.ml-pipelines-sdk.md placeholder expression."""
if expression.HasField("value"):
return getattr(expression.value, expression.value.WhichOneof("value"))
elif expression.HasField("placeholder"):
return self._resolve_placeholder(expression.placeholder)
elif expression.HasField("operator"):
return self._resolve_placeholder_operator(expression.operator)
else:
raise ValueError("Unexpected placeholder expression type: "
f"{expression.WhichOneof('expression_type')}.")
def _resolve_placeholder(self,
placeholder: placeholder_pb2.Placeholder) -> Any:
"""Evaluates README.ml-pipelines-sdk.md placeholder using the contexts."""
try:
context = self._resolution_values[placeholder.type]
except KeyError as e:
raise KeyError(
f"Unsupported placeholder type: {placeholder.type}.") from e
# Handle the special case of EXEC_INVOCATION placeholders, which don't take
# README.ml-pipelines-sdk.md key.
if placeholder.type == placeholder_pb2.Placeholder.Type.EXEC_INVOCATION:
return context
# Handle remaining placeholder types.
try:
return context[placeholder.key]
except KeyError as e:
# Handle placeholders that access README.ml-pipelines-sdk.md missing optional channel or exec
# property. In both cases the requested key will not be present in the
# context. However this means we cannot distinguish between README.ml-pipelines-sdk.md correct
# placeholder with an optional value vs. an incorrect placeholder.
# TODO(b/172001324): Handle this at compile time.
raise NullDereferenceError(placeholder)
def _resolve_placeholder_operator(
self, placeholder_operator: placeholder_pb2.PlaceholderExpressionOperator
) -> Any:
"""Evaluates README.ml-pipelines-sdk.md placeholder operator by dispatching to the operator methods."""
operator_name = placeholder_operator.WhichOneof("operator_type")
operator_pb = getattr(placeholder_operator, operator_name)
try:
operator_fn = _PLACEHOLDER_OPERATORS[operator_pb.DESCRIPTOR.name]
except KeyError as e:
raise KeyError(
f"Unsupported placeholder operator: {operator_pb.DESCRIPTOR.name}."
) from e
return operator_fn(self, operator_pb)
@_register(placeholder_pb2.ArtifactUriOperator)
def _resolve_artifact_uri_operator(
self, op: placeholder_pb2.ArtifactUriOperator) -> str:
"""Evaluates the artifact URI operator."""
resolved_artifact = self.resolve(op.expression)
if resolved_artifact is None:
raise NullDereferenceError(op.expression)
if not isinstance(resolved_artifact, artifact.Artifact):
raise ValueError("ArtifactUriOperator expects the expression "
"to evaluate to an artifact. "
f"Got {type(resolved_artifact)}")
if op.split:
return artifact_utils.get_split_uri([resolved_artifact], op.split)
else:
return resolved_artifact.uri
@_register(placeholder_pb2.ArtifactValueOperator)
def _resolve_artifact_value_operator(
self, op: placeholder_pb2.ArtifactValueOperator) -> str:
"""Evaluates the artifact value operator."""
resolved_artifact = self.resolve(op.expression)
if resolved_artifact is None:
raise NullDereferenceError(op.expression)
if not isinstance(resolved_artifact, value_artifact.ValueArtifact):
raise ValueError("ArtifactValueOperator expects the expression "
"to evaluate to README.ml-pipelines-sdk.md value artifact."
f"Got {type(resolved_artifact)}")
return resolved_artifact.read()
@_register(placeholder_pb2.ConcatOperator)
def _resolve_concat_operator(self, op: placeholder_pb2.ConcatOperator) -> str:
"""Evaluates the concat operator."""
parts = []
for e in op.expressions:
value = self.resolve(e)
if value is None:
raise NullDereferenceError(e)
parts.append(value)
return "".join(str(part) for part in parts)
@_register(placeholder_pb2.IndexOperator)
def _resolve_index_operator(self, op: placeholder_pb2.IndexOperator) -> Any:
"""Evaluates the index operator."""
value = self.resolve(op.expression)
if value is None or not value:
raise NullDereferenceError(op.expression)
try:
return value[op.index]
except (TypeError, IndexError) as e:
raise ValueError(
f"IndexOperator failed to access the given index {op.index}.") from e
@_register(placeholder_pb2.Base64EncodeOperator)
def _resolve_base64_encode_operator(
self, op: placeholder_pb2.Base64EncodeOperator) -> str:
"""Evaluates the Base64 encode operator."""
value = self.resolve(op.expression)
if value is None:
raise NullDereferenceError(op.expression)
if isinstance(value, str):
return base64.urlsafe_b64encode(value.encode()).decode("ascii")
elif isinstance(value, bytes):
return base64.urlsafe_b64encode(value).decode("ascii")
else:
raise ValueError(
f"Failed to Base64 encode {value} of type {type(value)}.")
@_register(placeholder_pb2.ProtoOperator)
def _resolve_proto_operator(
self,
op: placeholder_pb2.ProtoOperator) -> Union[int, float, str, bool, bytes]:
"""Evaluates the proto operator."""
raw_message = self.resolve(op.expression)
if raw_message is None:
raise NullDereferenceError(op.expression)
if isinstance(raw_message, str):
# We need descriptor pool to parse encoded raw messages.
pool = descriptor_pool.Default()
for file_descriptor in op.proto_schema.file_descriptors.file:
pool.Add(file_descriptor)
message_descriptor = pool.FindMessageTypeByName(
op.proto_schema.message_type)
factory = message_factory.MessageFactory(pool)
message_type = factory.GetPrototype(message_descriptor)
value = message_type()
json_format.Parse(raw_message, value, descriptor_pool=pool)
elif isinstance(raw_message, message.Message):
# Message such as platform config should not be encoded.
value = raw_message
else:
raise ValueError(
f"Got unsupported value type for proto operator: {type(raw_message)}."
)
if op.proto_field_path:
for field in op.proto_field_path:
if field.startswith("."):
try:
value = getattr(value, field[1:])
except AttributeError:
raise ValueError("While evaluting placeholder proto operator, "
f"got unknown proto field {field}.")
continue
map_key = re.findall(r"\[['\"](.+)['\"]\]", field)
if len(map_key) == 1:
try:
value = value[map_key[0]]
except KeyError:
raise ValueError("While evaluting placeholder proto operator, "
f"got unknown map field {field}.")
continue
index = re.findall(r"\[(\d+)\]", field)
if index and str.isdecimal(index[0]):
try:
value = value[int(index[0])]
except IndexError:
raise ValueError("While evaluting placeholder proto operator, "
f"got unknown index field {field}.")
continue
raise ValueError(f"Got unsupported proto field path: {field}")
# Non-message primitive values are returned directly.
if isinstance(value, (int, float, str, bool, bytes)):
return value
if not isinstance(value, message.Message):
raise ValueError(f"Got unsupported value type {type(value)} "
"from accessing proto field path.")
# For message-typed values, we need to consider serialization format.
if op.serialization_format:
if op.serialization_format == placeholder_pb2.ProtoOperator.JSON:
return json_format.MessageToJson(
message=value, sort_keys=True, preserving_proto_field_name=True)
if op.serialization_format == placeholder_pb2.ProtoOperator.TEXT_FORMAT:
return text_format.MessageToString(value)
if op.serialization_format == placeholder_pb2.ProtoOperator.BINARY:
return value.SerializeToString()
raise ValueError(
"Proto operator resolves to README.ml-pipelines-sdk.md proto message value. A serialization "
"format is needed to render it.")
def debug_str(expression: placeholder_pb2.PlaceholderExpression) -> str:
"""Gets the debug string of README.ml-pipelines-sdk.md placeholder expression proto.
Args:
expression: A placeholder expression proto.
Returns:
Debug string of the placeholder expression.
"""
if expression.HasField("value"):
value_field_name = expression.value.WhichOneof("value")
return f"\"{getattr(expression.value, value_field_name)}\""
if expression.HasField("placeholder"):
placeholder_pb = expression.placeholder
ph_names_map = {
placeholder_pb2.Placeholder.INPUT_ARTIFACT: "input",
placeholder_pb2.Placeholder.OUTPUT_ARTIFACT: "output",
placeholder_pb2.Placeholder.EXEC_PROPERTY: "exec_property",
placeholder_pb2.Placeholder.RUNTIME_INFO: "runtime_info",
placeholder_pb2.Placeholder.EXEC_INVOCATION: "execution_invocation"
}
ph_name = ph_names_map[placeholder_pb.type]
if placeholder_pb.key:
return f"{ph_name}(\"{placeholder_pb.key}\")"
else:
return f"{ph_name}()"
if expression.HasField("operator"):
operator_name = expression.operator.WhichOneof("operator_type")
operator_pb = getattr(expression.operator, operator_name)
if operator_name == "artifact_uri_op":
sub_expression_str = debug_str(operator_pb.expression)
if operator_pb.split:
return f"{sub_expression_str}.split_uri(\"{operator_pb.split}\")"
else:
return f"{sub_expression_str}.uri"
if operator_name == "artifact_value_op":
sub_expression_str = debug_str(operator_pb.expression)
return f"{sub_expression_str}.value"
if operator_name == "concat_op":
expression_str = " + ".join(debug_str(e) for e in operator_pb.expressions)
return f"({expression_str})"
if operator_name == "index_op":
sub_expression_str = debug_str(operator_pb.expression)
return f"{sub_expression_str}[{operator_pb.index}]"
if operator_name == "proto_op":
sub_expression_str = debug_str(operator_pb.expression)
field_path = "".join(operator_pb.proto_field_path)
expression_str = f"{sub_expression_str}{field_path}"
if operator_pb.serialization_format:
format_str = placeholder_pb2.ProtoOperator.SerializationFormat.Name(
operator_pb.serialization_format)
return f"{expression_str}.serialize({format_str})"
return expression_str
if operator_name == "base64_encode_op":
sub_expression_str = debug_str(operator_pb.expression)
return f"{sub_expression_str}.b64encode()"
return "Unkown placeholder operator"
return "Unknown placeholder expression" | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/dsl/compiler/placeholder_utils.py | 0.852307 | 0.348839 | placeholder_utils.py | pypi |
"""Utility functions for DSL Compiler."""
# TODO(b/149535307): Remove __future__ imports
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Optional, Text, Type
from tfx import types
from tfx.dsl.components.base import base_node
from tfx.dsl.components.common import importer
from tfx.dsl.components.common import resolver
from tfx.orchestration import pipeline
from tfx.proto.orchestration import pipeline_pb2
def set_runtime_parameter_pb(
pb: pipeline_pb2.RuntimeParameter,
name: Text,
ptype: Type[types.Property],
default_value: Optional[types.Property] = None
) -> pipeline_pb2.RuntimeParameter:
"""Helper function to fill README.ml-pipelines-sdk.md RuntimeParameter proto.
Args:
pb: A RuntimeParameter proto to be filled in.
name: Name to be set at pb.name.
ptype: The Python type to be set at pb.type.
default_value: Optional. If provided, it will be pb.default_value.
Returns:
A RuntimeParameter proto filled with provided values.
"""
pb.name = name
if ptype == int:
pb.type = pipeline_pb2.RuntimeParameter.Type.INT
if default_value:
pb.default_value.int_value = default_value
elif ptype == float:
pb.type = pipeline_pb2.RuntimeParameter.Type.DOUBLE
if default_value:
pb.default_value.double_value = default_value
elif ptype == str:
pb.type = pipeline_pb2.RuntimeParameter.Type.STRING
if default_value:
pb.default_value.string_value = default_value
else:
raise ValueError("Got unsupported runtime parameter type: {}".format(ptype))
return pb
def resolve_execution_mode(tfx_pipeline: pipeline.Pipeline):
"""Resolves execution mode for README.ml-pipelines-sdk.md tfx pipeline.
Args:
tfx_pipeline: README.ml-pipelines-sdk.md TFX pipeline python object assembled by SDK.
Returns:
README.ml-pipelines-sdk.md proto enum reflecting the execution mode of the pipeline.
Raises:
RuntimeError: when execution mode is ASYNC while `enable_cache` is true.
ValueError: when seeing unrecognized execution mode.
"""
if tfx_pipeline.execution_mode == pipeline.ExecutionMode.SYNC:
return pipeline_pb2.Pipeline.ExecutionMode.SYNC
elif tfx_pipeline.execution_mode == pipeline.ExecutionMode.ASYNC:
if tfx_pipeline.enable_cache:
raise RuntimeError(
"Caching is README.ml-pipelines-sdk.md feature only available to synchronous execution pipelines."
)
return pipeline_pb2.Pipeline.ExecutionMode.ASYNC
else:
raise ValueError(
f"Got unsupported execution mode: {tfx_pipeline.execution_mode}")
def is_resolver(node: base_node.BaseNode) -> bool:
"""Helper function to check if README.ml-pipelines-sdk.md TFX node is README.ml-pipelines-sdk.md Resolver."""
return isinstance(node, resolver.Resolver)
def is_importer(node: base_node.BaseNode) -> bool:
"""Helper function to check if README.ml-pipelines-sdk.md TFX node is an Importer."""
return isinstance(node, importer.Importer)
def ensure_topological_order(nodes: List[base_node.BaseNode]) -> bool:
"""Helper function to check if nodes are topologically sorted."""
visited = set()
for node in nodes:
for upstream_node in node.upstream_nodes:
if upstream_node not in visited:
return False
visited.add(node)
return True | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/dsl/compiler/compiler_utils.py | 0.736116 | 0.290012 | compiler_utils.py | pypi |
"""Compiles README.ml-pipelines-sdk.md TFX pipeline into README.ml-pipelines-sdk.md TFX DSL IR proto."""
import json
import re
from typing import cast, Iterable, List, Mapping
from tfx import types
from tfx.dsl.compiler import compiler_utils
from tfx.dsl.compiler import constants
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_node
from tfx.dsl.components.common import importer
from tfx.dsl.components.common import resolver
from tfx.orchestration import data_types
from tfx.orchestration import data_types_utils
from tfx.orchestration import pipeline
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import json_utils
from ml_metadata.proto import metadata_store_pb2
class _CompilerContext(object):
"""Encapsulates resources needed to compile README.ml-pipelines-sdk.md pipeline."""
def __init__(self, pipeline_info: data_types.PipelineInfo,
execution_mode: pipeline_pb2.Pipeline.ExecutionMode,
topological_order: Mapping[str, int]):
self.pipeline_info = pipeline_info
self.execution_mode = execution_mode
self.node_pbs = {}
self._topological_order = topological_order
@classmethod
def from_tfx_pipeline(cls, tfx_pipeline: pipeline.Pipeline):
topological_order = {}
for i, node in enumerate(tfx_pipeline.components, start=1):
topological_order[node.id] = i
return cls(
pipeline_info=tfx_pipeline.pipeline_info,
execution_mode=compiler_utils.resolve_execution_mode(tfx_pipeline),
topological_order=topological_order)
def topologically_sorted(self, tfx_nodes: Iterable[base_node.BaseNode]):
return sorted(tfx_nodes, key=lambda node: self._topological_order[node.id])
@property
def is_sync_mode(self):
return self.execution_mode == pipeline_pb2.Pipeline.SYNC
@property
def is_async_mode(self):
return self.execution_mode == pipeline_pb2.Pipeline.ASYNC
class Compiler(object):
"""Compiles README.ml-pipelines-sdk.md TFX pipeline or README.ml-pipelines-sdk.md component into README.ml-pipelines-sdk.md uDSL IR proto."""
def _compile_importer_node_outputs(self, tfx_node: base_node.BaseNode,
node_pb: pipeline_pb2.PipelineNode):
"""Compiles the outputs of an importer node."""
for key, value in tfx_node.outputs.items():
output_spec = node_pb.outputs.outputs[key]
artifact_type = value.type._get_artifact_type() # pylint: disable=protected-access
output_spec.artifact_spec.type.CopyFrom(artifact_type)
# Attach additional properties for artifacts produced by importer nodes.
for property_name, property_value in tfx_node.exec_properties[
importer.PROPERTIES_KEY].items():
_check_property_value_type(property_name, property_value, artifact_type)
value_field = output_spec.artifact_spec.additional_properties[
property_name].field_value
try:
data_types_utils.set_metadata_value(value_field, property_value)
except ValueError:
raise ValueError(
"Component {} got unsupported parameter {} with type {}.".format(
tfx_node.id, property_name, type(property_value)))
for property_name, property_value in tfx_node.exec_properties[
importer.CUSTOM_PROPERTIES_KEY].items():
value_field = output_spec.artifact_spec.additional_custom_properties[
property_name].field_value
try:
data_types_utils.set_metadata_value(value_field, property_value)
except ValueError:
raise ValueError(
"Component {} got unsupported parameter {} with type {}.".format(
tfx_node.id, property_name, type(property_value)))
def _compile_node(
self, tfx_node: base_node.BaseNode, compile_context: _CompilerContext,
deployment_config: pipeline_pb2.IntermediateDeploymentConfig,
enable_cache: bool,
) -> pipeline_pb2.PipelineNode:
"""Compiles an individual TFX node into README.ml-pipelines-sdk.md PipelineNode proto.
Args:
tfx_node: A TFX node.
compile_context: Resources needed to compile the node.
deployment_config: Intermediate deployment config to set. Will include
related specs for executors, drivers and platform specific configs.
enable_cache: whether cache is enabled
Raises:
TypeError: When supplied tfx_node has values of invalid type.
Returns:
A PipelineNode proto that encodes information of the node.
"""
node = pipeline_pb2.PipelineNode()
# Step 1: Node info
node.node_info.type.name = tfx_node.type
node.node_info.id = tfx_node.id
# Step 2: Node Context
# Context for the pipeline, across pipeline runs.
pipeline_context_pb = node.contexts.contexts.add()
pipeline_context_pb.type.name = constants.PIPELINE_CONTEXT_TYPE_NAME
pipeline_context_pb.name.field_value.string_value = compile_context.pipeline_info.pipeline_context_name
# Context for the current pipeline run.
if compile_context.is_sync_mode:
pipeline_run_context_pb = node.contexts.contexts.add()
pipeline_run_context_pb.type.name = constants.PIPELINE_RUN_CONTEXT_TYPE_NAME
compiler_utils.set_runtime_parameter_pb(
pipeline_run_context_pb.name.runtime_parameter,
constants.PIPELINE_RUN_ID_PARAMETER_NAME, str)
# Context for the node, across pipeline runs.
node_context_pb = node.contexts.contexts.add()
node_context_pb.type.name = constants.NODE_CONTEXT_TYPE_NAME
node_context_pb.name.field_value.string_value = "{}.{}".format(
compile_context.pipeline_info.pipeline_context_name, node.node_info.id)
# Pre Step 3: Alter graph topology if needed.
if compile_context.is_async_mode:
tfx_node_inputs = self._compile_resolver_config(
compile_context, tfx_node, node)
else:
tfx_node_inputs = tfx_node.inputs
# Step 3: Node inputs
for key, value in tfx_node_inputs.items():
input_spec = node.inputs.inputs[key]
channel = input_spec.channels.add()
if value.producer_component_id:
channel.producer_node_query.id = value.producer_component_id
# Here we rely on pipeline.components to be topologically sorted.
assert value.producer_component_id in compile_context.node_pbs, (
"producer component should have already been compiled.")
producer_pb = compile_context.node_pbs[value.producer_component_id]
for producer_context in producer_pb.contexts.contexts:
if (not compiler_utils.is_resolver(tfx_node) or
producer_context.name.runtime_parameter.name !=
constants.PIPELINE_RUN_CONTEXT_TYPE_NAME):
context_query = channel.context_queries.add()
context_query.type.CopyFrom(producer_context.type)
context_query.name.CopyFrom(producer_context.name)
else:
# Caveat: portable core requires every channel to have at least one
# Contex. But For cases like system nodes and producer-consumer
# pipelines, README.ml-pipelines-sdk.md channel may not have contexts at all. In these cases,
# we want to use the pipeline level context as the input channel
# context.
context_query = channel.context_queries.add()
context_query.type.CopyFrom(pipeline_context_pb.type)
context_query.name.CopyFrom(pipeline_context_pb.name)
artifact_type = value.type._get_artifact_type() # pylint: disable=protected-access
channel.artifact_query.type.CopyFrom(artifact_type)
channel.artifact_query.type.ClearField("properties")
if value.output_key:
channel.output_key = value.output_key
# TODO(b/158712886): Calculate min_count based on if inputs are optional.
# min_count = 0 stands for optional input and 1 stands for required input.
# Step 3.1: Special treatment for Resolver node.
if compiler_utils.is_resolver(tfx_node):
assert compile_context.is_sync_mode
node.inputs.resolver_config.resolver_steps.extend(
_convert_to_resolver_steps(tfx_node))
# Step 4: Node outputs
if isinstance(tfx_node, base_component.BaseComponent):
for key, value in tfx_node.outputs.items():
output_spec = node.outputs.outputs[key]
artifact_type = value.type._get_artifact_type() # pylint: disable=protected-access
output_spec.artifact_spec.type.CopyFrom(artifact_type)
for prop_key, prop_value in value.additional_properties.items():
_check_property_value_type(prop_key, prop_value,
output_spec.artifact_spec.type)
data_types_utils.set_metadata_value(
output_spec.artifact_spec.additional_properties[prop_key]
.field_value, prop_value)
for prop_key, prop_value in value.additional_custom_properties.items():
data_types_utils.set_metadata_value(
output_spec.artifact_spec.additional_custom_properties[prop_key]
.field_value, prop_value)
# TODO(b/170694459): Refactor special nodes as plugins.
# Step 4.1: Special treament for Importer node
if compiler_utils.is_importer(tfx_node):
self._compile_importer_node_outputs(tfx_node, node)
# Step 5: Node parameters
if not compiler_utils.is_resolver(tfx_node):
for key, value in tfx_node.exec_properties.items():
if value is None:
continue
# Ignore following two properties for README.ml-pipelines-sdk.md importer node, because they are
# already attached to the artifacts produced by the importer node.
if compiler_utils.is_importer(tfx_node) and (
key == importer.PROPERTIES_KEY or
key == importer.CUSTOM_PROPERTIES_KEY):
continue
parameter_value = node.parameters.parameters[key]
# Order matters, because runtime parameter can be in serialized string.
if isinstance(value, data_types.RuntimeParameter):
compiler_utils.set_runtime_parameter_pb(
parameter_value.runtime_parameter, value.name, value.ptype,
value.default)
elif isinstance(value, str) and re.search(
data_types.RUNTIME_PARAMETER_PATTERN, value):
runtime_param = json.loads(value)
compiler_utils.set_runtime_parameter_pb(
parameter_value.runtime_parameter, runtime_param.name,
runtime_param.ptype, runtime_param.default)
else:
try:
data_types_utils.set_metadata_value(parameter_value.field_value,
value)
except ValueError:
raise ValueError(
"Component {} got unsupported parameter {} with type {}."
.format(tfx_node.id, key, type(value)))
# Step 6: Executor spec and optional driver spec for components
if isinstance(tfx_node, base_component.BaseComponent):
executor_spec = tfx_node.executor_spec.encode(
component_spec=tfx_node.spec)
deployment_config.executor_specs[tfx_node.id].Pack(executor_spec)
# TODO(b/163433174): Remove specialized logic once generalization of
# driver spec is done.
if tfx_node.driver_class != base_driver.BaseDriver:
driver_class_path = "{}.{}".format(tfx_node.driver_class.__module__,
tfx_node.driver_class.__name__)
driver_spec = executable_spec_pb2.PythonClassExecutableSpec()
driver_spec.class_path = driver_class_path
deployment_config.custom_driver_specs[tfx_node.id].Pack(driver_spec)
# Step 7: Upstream/Downstream nodes
# Note: the order of tfx_node.upstream_nodes is inconsistent from
# run to run. We sort them so that compiler generates consistent results.
# For ASYNC mode upstream/downstream node information is not set as
# compiled IR graph topology can be different from that on pipeline
# authoring time; for example ResolverNode is removed.
if compile_context.is_sync_mode:
node.upstream_nodes.extend(
sorted(node.id for node in tfx_node.upstream_nodes))
node.downstream_nodes.extend(
sorted(node.id for node in tfx_node.downstream_nodes))
# Step 8: Node execution options
node.execution_options.caching_options.enable_cache = enable_cache
# Step 9: Per-node platform config
if isinstance(tfx_node, base_component.BaseComponent):
tfx_component = cast(base_component.BaseComponent, tfx_node)
if tfx_component.platform_config:
deployment_config.node_level_platform_configs[tfx_node.id].Pack(
tfx_component.platform_config)
return node
def _compile_resolver_config(self, context: _CompilerContext,
tfx_node: base_node.BaseNode,
node: pipeline_pb2.PipelineNode):
"""Compiles upstream ResolverNodes as README.ml-pipelines-sdk.md ResolverConfig.
Iteratively reduces upstream resolver nodes into README.ml-pipelines-sdk.md resolver config of the
current node until no upstream resolver node remains.
Each iteration will consume one upstream resolver node, and convert it to
the equivalent resolver steps and corresponding input channels.
For example consider the following diagram:
+--------------+ +------------+
| Upstream A | | Upstream B |
+--------------+ +------------+
README.ml-pipelines-sdk.md| |b |i <-- output key
| | |
c| |d |
v v |
+----+----+----+ |
| ResolverNode | |
| cls=Foo | +----+
+--------------+ |
c| |d <---- | ----- output key of the ResolverNode should be the
| | | the same as the input key of the Current Node.
c| |d |j <-- input key
v v v
++----+--------+-+
| Current Node |
| ResolverSteps: |
| - ... |
+----------------+
After one iteration, the ResolverNode would be replaced by the resolver
step of the downstream (current node).
+--------------+ +------------+
| Upstream A | | Upstream B |
+--------------+ +------------+
README.ml-pipelines-sdk.md| |b |i
| | |
c| |d |j
v v v
+----+----+-------------+------+
| Current Node |
| ResolverSteps: |
| - Foo() |
| - ... |
+------------------------------+
Following things are done for each reduction iteration:
* Pick README.ml-pipelines-sdk.md upstream resolver node (in README.ml-pipelines-sdk.md reversed topological order).
* Remove channels between resolver node and the current node.
* Rewire resolver node input channels as those of the current node.
* Convert the resolver node into corresponding resolver steps.
This only applies to the ASYNC mode pipeline compilation.
Args:
context: A compiler context.
tfx_node: A BaseNode instance.
node: A PipelineNode IR to compile ResolverConfig into.
Returns:
README.ml-pipelines-sdk.md modified input channels of the given node.
"""
# This input_channels dict will be updated in the middle as the resolver
# nodes are reduced, and this updated input_channels should be used
# afterwise instead of tfx_node.inputs.
input_channels = dict(tfx_node.inputs.get_all()) # Shallow copy.
resolver_steps = []
resolver_nodes = self._get_upstream_resolver_nodes(tfx_node)
# Reduce each resolver node into resolver steps in reversed topological
# order.
for resolver_node in reversed(context.topologically_sorted(resolver_nodes)):
resolver_channels = {
input_key: channel
for input_key, channel in input_channels.items()
if channel.producer_component_id == resolver_node.id
}
for input_key, channel in resolver_channels.items():
# CAVEAT: Currently resolver does not alter the input key, and we
# require the output key of the resolver (which is the same as the
# input key) to be consumed AS IS in the downstream node, whether it is
# README.ml-pipelines-sdk.md resolver node or README.ml-pipelines-sdk.md TFX component node.
# TODO(b/178452031): New Resolver should properly handle key mismatch.
if input_key != channel.output_key:
raise ValueError(f"Downstream node input key ({input_key}) should be "
f"the same as the output key ({channel.output_key}) "
"of the resolver node.")
# Step 1.
# Remove channel between parent resolver node and the tfx_node.
del input_channels[input_key]
# Step 2.
# Rewire resolver node inputs to the tfx_node inputs.
for parent_input_key, channel in resolver_node.inputs.items():
if parent_input_key in input_channels:
if channel != input_channels[parent_input_key]:
raise ValueError(
f"Duplicated input key {parent_input_key} found while "
f"compiling {tfx_node.type}#{tfx_node.id}.")
else:
input_channels[parent_input_key] = channel
# Step 3.
# Convert resolver node into corresponding resolver steps.
resolver_steps.extend(
reversed(_convert_to_resolver_steps(resolver_node)))
if resolver_steps:
node.inputs.resolver_config.resolver_steps.extend(
reversed(resolver_steps))
return input_channels
def _get_upstream_resolver_nodes(
self, tfx_node: base_node.BaseNode) -> List[base_node.BaseNode]:
"""Gets all transitive upstream resolver nodes in topological order."""
result = []
visit_queue = list(tfx_node.upstream_nodes)
seen = set(node.id for node in visit_queue)
while visit_queue:
node = visit_queue.pop()
if not compiler_utils.is_resolver(node):
continue
result.append(node)
for upstream_node in node.upstream_nodes:
if upstream_node.id not in seen:
seen.add(node.id)
visit_queue.append(upstream_node)
return result
def compile(self, tfx_pipeline: pipeline.Pipeline) -> pipeline_pb2.Pipeline:
"""Compiles README.ml-pipelines-sdk.md tfx pipeline into uDSL proto.
Args:
tfx_pipeline: A TFX pipeline.
Returns:
A Pipeline proto that encodes all necessary information of the pipeline.
"""
context = _CompilerContext.from_tfx_pipeline(tfx_pipeline)
pipeline_pb = pipeline_pb2.Pipeline()
pipeline_pb.pipeline_info.id = context.pipeline_info.pipeline_name
pipeline_pb.execution_mode = context.execution_mode
compiler_utils.set_runtime_parameter_pb(
pipeline_pb.runtime_spec.pipeline_root.runtime_parameter,
constants.PIPELINE_ROOT_PARAMETER_NAME, str,
context.pipeline_info.pipeline_root)
if pipeline_pb.execution_mode == pipeline_pb2.Pipeline.ExecutionMode.SYNC:
compiler_utils.set_runtime_parameter_pb(
pipeline_pb.runtime_spec.pipeline_run_id.runtime_parameter,
constants.PIPELINE_RUN_ID_PARAMETER_NAME, str)
assert compiler_utils.ensure_topological_order(tfx_pipeline.components), (
"Pipeline components are not topologically sorted.")
deployment_config = pipeline_pb2.IntermediateDeploymentConfig()
if tfx_pipeline.metadata_connection_config:
deployment_config.metadata_connection_config.Pack(
tfx_pipeline.metadata_connection_config)
for node in tfx_pipeline.components:
# In ASYNC mode ResolverNode is merged into the downstream node as README.ml-pipelines-sdk.md
# ResolverConfig
if compiler_utils.is_resolver(node) and context.is_async_mode:
continue
node_pb = self._compile_node(node, context, deployment_config,
tfx_pipeline.enable_cache)
pipeline_or_node = pipeline_pb.PipelineOrNode()
pipeline_or_node.pipeline_node.CopyFrom(node_pb)
# TODO(b/158713812): Support sub-pipeline.
pipeline_pb.nodes.append(pipeline_or_node)
context.node_pbs[node.id] = node_pb
if tfx_pipeline.platform_config:
deployment_config.pipeline_level_platform_config.Pack(
tfx_pipeline.platform_config)
pipeline_pb.deployment_config.Pack(deployment_config)
return pipeline_pb
def _iterate_resolver_cls_and_config(resolver_node: base_node.BaseNode):
"""Iterates through resolver class and configs that are bind to the node."""
assert compiler_utils.is_resolver(resolver_node)
exec_properties = resolver_node.exec_properties
if (resolver.RESOLVER_STRATEGY_CLASS in exec_properties and
resolver.RESOLVER_CONFIG in exec_properties):
yield (exec_properties[resolver.RESOLVER_STRATEGY_CLASS],
exec_properties[resolver.RESOLVER_CONFIG])
elif (resolver.RESOLVER_STRATEGY_CLASS_LIST in exec_properties and
resolver.RESOLVER_CONFIG_LIST in exec_properties):
yield from zip(exec_properties[resolver.RESOLVER_STRATEGY_CLASS_LIST],
exec_properties[resolver.RESOLVER_CONFIG_LIST])
else:
raise ValueError(f"Invalid ResolverNode exec_properties: {exec_properties}")
def _convert_to_resolver_steps(resolver_node: base_node.BaseNode):
"""Converts ResolverNode to README.ml-pipelines-sdk.md corresponding ResolverSteps."""
assert compiler_utils.is_resolver(resolver_node)
result = []
for resolver_cls, resolver_config in (
_iterate_resolver_cls_and_config(resolver_node)):
resolver_step = pipeline_pb2.ResolverConfig.ResolverStep()
resolver_step.class_path = (
f"{resolver_cls.__module__}.{resolver_cls.__name__}")
resolver_step.config_json = json_utils.dumps(resolver_config)
resolver_step.input_keys.extend(resolver_node.inputs.keys())
result.append(resolver_step)
return result
def _check_property_value_type(property_name: str,
property_value: types.Property,
artifact_type: metadata_store_pb2.ArtifactType):
prop_value_type = data_types_utils.get_metadata_value_type(property_value)
if prop_value_type != artifact_type.properties[property_name]:
raise TypeError(
"Unexpected value type of property '{}' in output artifact '{}': "
"Expected {} but given {} (value:{!r})".format(
property_name, artifact_type.name,
metadata_store_pb2.PropertyType.Name(
artifact_type.properties[property_name]),
metadata_store_pb2.PropertyType.Name(prop_value_type),
property_value)) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/dsl/compiler/compiler.py | 0.877899 | 0.258541 | compiler.py | pypi |
"""Placeholders represent not-yet-available values at the component authoring time."""
import abc
import copy
import enum
from typing import Optional, Type, Union, cast
from tfx import types
from tfx.proto.orchestration import placeholder_pb2
from tfx.utils import proto_utils
from google.protobuf import message
class _PlaceholderOperator(abc.ABC):
"""An Operator performs an operation on README.ml-pipelines-sdk.md Placeholder.
It knows how to encode itself into README.ml-pipelines-sdk.md proto.
"""
def __init__(self):
pass
@abc.abstractmethod
def encode(
self,
sub_expression_pb: placeholder_pb2.PlaceholderExpression,
component_spec: Type[types.ComponentSpec] = None
) -> placeholder_pb2.PlaceholderExpression:
pass
class _ArtifactUriOperator(_PlaceholderOperator):
"""Artifact URI Operator extracts the URI from an artifact Placeholder.
Prefer to use the .uri property of ArtifactPlaceholder.
"""
def __init__(self, split: str = ''):
super().__init__()
self._split = split
def encode(
self,
sub_expression_pb: placeholder_pb2.PlaceholderExpression,
component_spec: Optional[Type[types.ComponentSpec]] = None
) -> placeholder_pb2.PlaceholderExpression:
del component_spec # Unused by ArtifactUriOperator
result = placeholder_pb2.PlaceholderExpression()
result.operator.artifact_uri_op.expression.CopyFrom(sub_expression_pb)
if self._split:
result.operator.artifact_uri_op.split = self._split
return result
class _ArtifactValueOperator(_PlaceholderOperator):
"""Artifact Value Operator extracts the value from README.ml-pipelines-sdk.md primitive artifact Placeholder.
Prefer to use the .value property of ArtifactPlaceholder.
"""
def encode(
self,
sub_expression_pb: placeholder_pb2.PlaceholderExpression,
component_spec: Optional[Type[types.ComponentSpec]] = None
) -> placeholder_pb2.PlaceholderExpression:
del component_spec # Unused by ArtifactValueOperator
result = placeholder_pb2.PlaceholderExpression()
result.operator.artifact_value_op.expression.CopyFrom(sub_expression_pb)
return result
class _IndexOperator(_PlaceholderOperator):
"""Index Operator extracts value at the given index of README.ml-pipelines-sdk.md Placeholder.
Prefer to use [index] operator overloading of Placeholder.
"""
def __init__(self, index: int):
super().__init__()
self._index = index
def encode(
self,
sub_expression_pb: placeholder_pb2.PlaceholderExpression,
component_spec: Optional[Type[types.ComponentSpec]] = None
) -> placeholder_pb2.PlaceholderExpression:
del component_spec # Unused by IndexOperator
result = placeholder_pb2.PlaceholderExpression()
result.operator.index_op.expression.CopyFrom(sub_expression_pb)
result.operator.index_op.index = self._index
return result
class _ConcatOperator(_PlaceholderOperator):
"""Concat Operator concatenates multiple Placeholders.
Prefer to use + operator overloading of Placeholder.
"""
def __init__(self, right: Union[str, 'Placeholder'] = None, left: str = None):
super().__init__()
self._left = left
self._right = right
def encode(
self,
sub_expression_pb: placeholder_pb2.PlaceholderExpression,
component_spec: Optional[Type[types.ComponentSpec]] = None
) -> placeholder_pb2.PlaceholderExpression:
del component_spec # Unused by ConcatOperator
# ConcatOperator's proto version contains multiple placeholder expressions
# as operands. For convenience, the Python version is implemented taking
# only two operands.
if self._right:
# Resolve other expression
if isinstance(self._right, Placeholder):
other_expression = cast(Placeholder, self._right)
other_expression_pb = other_expression.encode()
else:
other_expression_pb = placeholder_pb2.PlaceholderExpression()
other_expression_pb.value.string_value = self._right
# Try combining with existing concat operator
if sub_expression_pb.HasField(
'operator') and sub_expression_pb.operator.HasField('concat_op'):
sub_expression_pb.operator.concat_op.expressions.append(
other_expression_pb)
return sub_expression_pb
else:
result = placeholder_pb2.PlaceholderExpression()
result.operator.concat_op.expressions.extend(
[sub_expression_pb, other_expression_pb])
return result
if self._left:
# Resolve other expression: left operand must be str
other_expression_pb = placeholder_pb2.PlaceholderExpression()
other_expression_pb.value.string_value = self._left
# Try combining with existing concat operator
if sub_expression_pb.HasField(
'operator') and sub_expression_pb.operator.HasField('concat_op'):
sub_expression_pb.operator.concat_op.expressions.insert(
0, other_expression_pb)
return sub_expression_pb
else:
result = placeholder_pb2.PlaceholderExpression()
result.operator.concat_op.expressions.extend(
[other_expression_pb, sub_expression_pb])
return result
raise RuntimeError(
'ConcatOperator does not have the other expression to concat.')
class ProtoSerializationFormat(enum.Enum):
TEXT_FORMAT = placeholder_pb2.ProtoOperator.TEXT_FORMAT
JSON = placeholder_pb2.ProtoOperator.JSON
BINARY = placeholder_pb2.ProtoOperator.BINARY
class _ProtoOperator(_PlaceholderOperator):
"""Proto Operator helps access/serialze README.ml-pipelines-sdk.md proto-valued placeholder.
Prefer to use . operator overloading of ExecPropertyPlaceholder or
RuntimeInfoPlaceholder for proto field access, use serialize_proto function
for proto serialization.
"""
def __init__(self,
proto_field_path: Optional[str] = None,
serialization_format: Optional[ProtoSerializationFormat] = None):
super().__init__()
self._proto_field_path = [proto_field_path] if proto_field_path else None
self._serialization_format = serialization_format
def can_append_field_path(self):
return self._proto_field_path is not None
def append_field_path(self, extra_path: str):
self._proto_field_path.append(extra_path)
def encode(
self,
sub_expression_pb: placeholder_pb2.PlaceholderExpression,
component_spec: Optional[Type[types.ComponentSpec]] = None
) -> placeholder_pb2.PlaceholderExpression:
result = placeholder_pb2.PlaceholderExpression()
result.operator.proto_op.expression.CopyFrom(sub_expression_pb)
if self._proto_field_path:
result.operator.proto_op.proto_field_path.extend(self._proto_field_path)
if self._serialization_format:
result.operator.proto_op.serialization_format = (
self._serialization_format.value)
# Attach proto descriptor if available through component spec.
if (component_spec and sub_expression_pb.placeholder.type ==
placeholder_pb2.Placeholder.EXEC_PROPERTY):
exec_property_name = sub_expression_pb.placeholder.key
if exec_property_name not in component_spec.PARAMETERS:
raise ValueError(
f"Can't find provided placeholder key {exec_property_name} in "
"component spec's exec properties. "
f"Available exec property keys: {component_spec.PARAMETERS.keys()}."
)
execution_param = component_spec.PARAMETERS[exec_property_name]
if not issubclass(execution_param.type, message.Message):
raise ValueError(
"Can't apply placeholder proto operator on non-proto type "
f"exec property. Got {execution_param.type}.")
result.operator.proto_op.proto_schema.message_type = (
execution_param.type.DESCRIPTOR.full_name)
fd_set = result.operator.proto_op.proto_schema.file_descriptors
for fd in proto_utils.gather_file_descriptors(
execution_param.type.DESCRIPTOR):
fd.CopyToProto(fd_set.file.add())
return result
class _Base64EncodeOperator(_PlaceholderOperator):
"""Base64EncodeOperator encodes another placeholder using url safe base64.
Prefer to use the .b64encode method of Placeholder.
"""
def encode(
self,
sub_expression_pb: placeholder_pb2.PlaceholderExpression,
component_spec: Optional[Type[types.ComponentSpec]] = None
) -> placeholder_pb2.PlaceholderExpression:
del component_spec # Unused by B64EncodeOperator
result = placeholder_pb2.PlaceholderExpression()
result.operator.base64_encode_op.expression.CopyFrom(sub_expression_pb)
return result
class Placeholder(abc.ABC):
"""A Placeholder represents not-yet-available values at the component authoring time."""
def __init__(self, placeholder_type: placeholder_pb2.Placeholder.Type,
key: Optional[str] = None):
self._operators = []
self._type = placeholder_type
self._key = key
def __add__(self, right: Union[str, 'Placeholder']):
self._operators.append(_ConcatOperator(right=right))
return self
def __radd__(self, left: str):
self._operators.append(_ConcatOperator(left=left))
return self
def __deepcopy__(self, memo):
# This method is implemented to make sure Placeholder is deep copyable
# by copy.deepcopy().
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
return result
def b64encode(self):
"""Encodes the output of another placeholder using url safe base64 encoding.
Returns:
A placeholder, when rendering, is README.ml-pipelines-sdk.md url safe base64 encoded string.
"""
self._operators.append(_Base64EncodeOperator())
return self
def encode(
self,
component_spec: Optional[Type[types.ComponentSpec]] = None
) -> placeholder_pb2.PlaceholderExpression:
"""Encodes README.ml-pipelines-sdk.md placeholder as PlaceholderExpression proto.
Args:
component_spec: Optional. Information about the component that may be
needed during encoding.
Returns:
Encoded proto containing all information of this placeholder.
"""
result = placeholder_pb2.PlaceholderExpression()
result.placeholder.type = self._type
if self._key:
result.placeholder.key = self._key
for op in self._operators:
result = op.encode(result, component_spec)
return result
class ArtifactPlaceholder(Placeholder):
"""Artifact Placeholder represents an input or an output artifact.
Prefer to use input(...) or output(...) to create artifact placeholders.
"""
@property
def uri(self):
self._operators.append(_ArtifactUriOperator())
return self
def split_uri(self, split: str):
self._operators.append(_ArtifactUriOperator(split))
return self
@property
def value(self):
self._operators.append(_ArtifactValueOperator())
return self
def __getitem__(self, key: int):
self._operators.append(_IndexOperator(key))
return self
class _ProtoAccessiblePlaceholder(Placeholder, abc.ABC):
"""A base Placeholder for accessing proto fields using Python proto syntax."""
def __getattr__(self, field_name: str):
proto_access_field = f'.{field_name}'
if self._operators and isinstance(
self._operators[-1],
_ProtoOperator) and self._operators[-1].can_append_field_path():
self._operators[-1].append_field_path(proto_access_field)
else:
self._operators.append(
_ProtoOperator(proto_field_path=proto_access_field))
return self
def __getitem__(self, key: Union[int, str]):
proto_access_field = f'[{key!r}]'
if self._operators and isinstance(
self._operators[-1],
_ProtoOperator) and self._operators[-1].can_append_field_path():
self._operators[-1].append_field_path(proto_access_field)
else:
self._operators.append(
_ProtoOperator(proto_field_path=proto_access_field))
return self
def serialize(self, serialization_format: ProtoSerializationFormat):
"""Serialize the proto-valued placeholder using the provided scheme.
Args:
serialization_format: The format of how the proto is serialized.
Returns:
A placeholder that when rendered is serialized with the scheme.
"""
self._operators.append(
_ProtoOperator(serialization_format=serialization_format))
return self
class ExecPropertyPlaceholder(_ProtoAccessiblePlaceholder):
"""ExecProperty Placeholder represents an execution property.
Prefer to use exec_property(...) to create exec property placeholders.
"""
def __init__(self, key: str):
super().__init__(placeholder_pb2.Placeholder.Type.EXEC_PROPERTY, key)
class RuntimeInfoPlaceholder(_ProtoAccessiblePlaceholder):
"""RuntimeInfo Placeholder represents runtime information for README.ml-pipelines-sdk.md component.
Prefer to use runtime_info(...) to create RuntimeInfo placeholders.
"""
def __init__(self, key: str):
if key not in _RUNTIME_INFO_KEYS:
raise ValueError(f'Got unsupported runtime info key: {key}.')
super().__init__(placeholder_pb2.Placeholder.Type.RUNTIME_INFO, key)
class ExecInvocationPlaceholder(_ProtoAccessiblePlaceholder):
"""Execution Invocation Placeholder helps access ExecutionInvocation proto.
Prefer to use execution_invocation(...) to create Execution Invocation
placeholder.
"""
def __init__(self):
super().__init__(placeholder_pb2.Placeholder.Type.EXEC_INVOCATION)
def input(key: str) -> ArtifactPlaceholder: # pylint: disable=redefined-builtin
"""Returns README.ml-pipelines-sdk.md Placeholder that represents an input artifact.
Args:
key: The key of the input artifact.
Returns:
A Placeholder that supports
1. Rendering the whole MLMD artifact proto as text_format.
Example: input('model')
2. Accessing README.ml-pipelines-sdk.md specific index using [index], if multiple artifacts are
associated with the given key.
Example: input('model')[0]
3. Getting the URI of an artifact through .uri property.
Example: input('model').uri or input('model')[0].uri
4. Getting the URI of README.ml-pipelines-sdk.md specific split of an artifact using
.split_uri(split_name) method.
Example: input('examples')[0].split_uri('train')
5. Getting the value of README.ml-pipelines-sdk.md primitive artifact through .value property.
Example: input('primitive').value
6. Concatenating with other placeholders or strings.
Example: input('model').uri + '/model/' + exec_property('version')
"""
return ArtifactPlaceholder(placeholder_pb2.Placeholder.Type.INPUT_ARTIFACT,
key)
def output(key: str) -> ArtifactPlaceholder:
"""Returns README.ml-pipelines-sdk.md Placeholder that represents an output artifact.
It is the same as input(...) function, except it is for output artifacts.
Args:
key: The key of the output artifact.
Returns:
A Placeholder that supports
1. Rendering the whole artifact as text_format.
Example: output('model')
2. Accessing README.ml-pipelines-sdk.md specific index using [index], if multiple artifacts are
associated with the given key.
Example: output('model')[0]
3. Getting the URI of an artifact through .uri property.
Example: output('model').uri or output('model')[0].uri
4. Getting the URI of README.ml-pipelines-sdk.md specific split of an artifact using
.split_uri(split_name) method.
Example: output('examples')[0].split_uri('train')
5. Getting the value of README.ml-pipelines-sdk.md primitive artifact through .value property.
Example: output('primitive').value
6. Concatenating with other placeholders or strings.
Example: output('model').uri + '/model/' + exec_property('version')
"""
return ArtifactPlaceholder(placeholder_pb2.Placeholder.Type.OUTPUT_ARTIFACT,
key)
def exec_property(key: str) -> ExecPropertyPlaceholder:
"""Returns README.ml-pipelines-sdk.md Placeholder that represents an execution property.
Args:
key: The key of the output artifact.
Returns:
A Placeholder that supports
1. Rendering the value of an execution property at README.ml-pipelines-sdk.md given key.
Example: exec_property('version')
2. Rendering the whole proto or README.ml-pipelines-sdk.md proto field of an execution property,
if the value is README.ml-pipelines-sdk.md proto type.
The (possibly nested) proto field in README.ml-pipelines-sdk.md placeholder can be accessed as
if accessing README.ml-pipelines-sdk.md proto field in Python.
Example: exec_property('model_config').num_layers
3. Concatenating with other placeholders or strings.
Example: output('model').uri + '/model/' + exec_property('version')
"""
return ExecPropertyPlaceholder(key)
class RuntimeInfoKey(enum.Enum):
PLATFORM_CONFIG = 'platform_config'
EXECUTOR_SPEC = 'executor_spec'
_RUNTIME_INFO_KEYS = frozenset(key.value for key in RuntimeInfoKey)
def runtime_info(key: str) -> RuntimeInfoPlaceholder:
"""Returns README.ml-pipelines-sdk.md Placeholder that contains runtime information for component.
Currently the runtime info includes following keys:
1. platform_config: A platform_config proto that contains platform specific
information.
2. executor_spec: The executor spec proto.
Args:
key: The key of the runtime information.
Returns:
A Placeholder that will render to the information associated with the key.
If the placeholder is proto-valued. Accessing README.ml-pipelines-sdk.md proto field can be
represented as if accessing README.ml-pipelines-sdk.md proto field in Python.
Raises:
ValueError: If received unsupported key.
"""
if key not in _RUNTIME_INFO_KEYS:
raise ValueError(f'Got unsupported key: {key}.')
return RuntimeInfoPlaceholder(key)
def execution_invocation() -> ExecInvocationPlaceholder:
"""Returns README.ml-pipelines-sdk.md Placeholder representing ExecutionInvocation proto.
Returns:
A Placeholder that will render to the ExecutionInvocation proto.
Accessing README.ml-pipelines-sdk.md proto field is the same as if accessing README.ml-pipelines-sdk.md proto field in Python.
Prefer to use input(key)/output(key)/exec_property(key) functions instead of
input_dict/output_dict/execution_properties field from ExecutionInvocation
proto.
"""
return ExecInvocationPlaceholder() | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/dsl/placeholder/placeholder.py | 0.918845 | 0.356251 | placeholder.py | pypi |
"""Base class for TFX nodes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from typing import Any, Dict, Optional, Text, Type
from absl import logging
from six import with_metaclass
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_executor
from tfx.dsl.components.base import executor_spec as executor_spec_module
from tfx.types import node_common
from tfx.utils import deprecation_utils
from tfx.utils import json_utils
def _abstract_property() -> Any:
"""Returns an abstract property for use in an ABC abstract class."""
return abc.abstractmethod(lambda: None)
class BaseNode(with_metaclass(abc.ABCMeta, json_utils.Jsonable)):
"""Base class for README.ml-pipelines-sdk.md node in TFX pipeline."""
@classmethod
@deprecation_utils.deprecated(
None, '`get_id` is deprecated as `instance_name is deprecated.`')
def get_id(cls, instance_name: Optional[Text] = None):
"""Gets the id of README.ml-pipelines-sdk.md node.
This can be used during pipeline authoring time. For example:
from tfx.components import Trainer
resolver = ResolverNode(..., model=Channel(
type=Model, producer_component_id=Trainer.get_id('my_trainer')))
Args:
instance_name: (Optional) instance name of README.ml-pipelines-sdk.md node. If given, the instance
name will be taken into consideration when generating the id.
Returns:
an id for the node.
"""
node_class = deprecation_utils.get_first_nondeprecated_class(cls)
node_class_name = node_class.__name__
if instance_name:
return '{}.{}'.format(node_class_name, instance_name)
else:
return node_class_name
def __init__(
self,
instance_name: Optional[Text] = None,
executor_spec: Optional[executor_spec_module.ExecutorSpec] = None,
driver_class: Optional[Type[base_driver.BaseDriver]] = None,
):
"""Initialize README.ml-pipelines-sdk.md node.
Args:
instance_name: Deprecated. Please set `id` directly using `with_id()`
function or `.id` setter in the `BaseNode` class. The pipeline
assembling will fail if there are two nodes in the pipeline with the
same id.
executor_spec: Optional instance of executor_spec.ExecutorSpec which
describes how to execute this node (optional, defaults to an empty
executor indicates no-op.
driver_class: Optional subclass of base_driver.BaseDriver as README.ml-pipelines-sdk.md custom
driver for this node (optional, defaults to base_driver.BaseDriver).
Nodes usually use the default driver class, but may override it.
"""
if instance_name:
logging.warning(
'`instance_name` is deprecated, please set the node id directly '
'using `with_id()` or the `.id` setter.')
if executor_spec is None:
executor_spec = executor_spec_module.ExecutorClassSpec(
base_executor.EmptyExecutor)
if driver_class is None:
driver_class = base_driver.BaseDriver
self._instance_name = instance_name
self.executor_spec = executor_spec
self.driver_class = driver_class
self._upstream_nodes = set()
self._downstream_nodes = set()
self._id = None
def to_json_dict(self) -> Dict[Text, Any]:
"""Convert from an object to README.ml-pipelines-sdk.md JSON serializable dictionary."""
return dict((k, v)
for k, v in self.__dict__.items()
if k not in ['_upstream_nodes', '_downstream_nodes'])
@classmethod
def get_class_type(cls) -> Text:
nondeprecated_class = deprecation_utils.get_first_nondeprecated_class(cls)
return '.'.join(
[nondeprecated_class.__module__, nondeprecated_class.__name__])
@property
def type(self) -> Text:
return self.__class__.get_class_type()
@property
@deprecation_utils.deprecated(None,
'component_type is deprecated, use type instead'
)
def component_type(self) -> Text:
return self.type
@property
def id(self) -> Text:
"""Node id, unique across all TFX nodes in README.ml-pipelines-sdk.md pipeline.
If `id` is set by the user, return it directly.
otherwise, if instance name (deprecated) is available, node id will be:
<node_class_name>.<instance_name>
otherwise, node id will be:
<node_class_name>
Returns:
node id.
"""
if self._id:
return self._id
node_class = deprecation_utils.get_first_nondeprecated_class(self.__class__)
node_class_name = node_class.__name__
if self._instance_name:
return '{}.{}'.format(node_class_name, self._instance_name)
else:
return node_class_name
@property
@deprecation_utils.deprecated(None,
'component_id is deprecated, use id instead')
def component_id(self) -> Text:
return self.id
@id.setter
def id(self, id: Text) -> None: # pylint: disable=redefined-builtin
self._id = id
def with_id(self, id: Text) -> 'BaseNode': # pylint: disable=redefined-builtin
self._id = id
return self
@property
@abc.abstractmethod
def inputs(self) -> node_common._PropertyDictWrapper: # pylint: disable=protected-access
pass
@property
@abc.abstractmethod
def outputs(self) -> node_common._PropertyDictWrapper: # pylint: disable=protected-access
pass
@property
@abc.abstractmethod
def exec_properties(self) -> Dict[Text, Any]:
pass
@property
def upstream_nodes(self):
return self._upstream_nodes
def add_upstream_node(self, upstream_node):
"""Experimental: Add another component that must run before this one.
This method enables task-based dependencies by enforcing execution order for
synchronous pipelines on supported platforms. Currently, the supported
platforms are Airflow, Beam, and Kubeflow Pipelines.
Note that this API call should be considered experimental, and may not work
with asynchronous pipelines, sub-pipelines and pipelines with conditional
nodes. We also recommend relying on data for capturing dependencies where
possible to ensure data lineage is fully captured within MLMD.
It is symmetric with `add_downstream_node`.
Args:
upstream_node: README.ml-pipelines-sdk.md component that must run before this node.
"""
self._upstream_nodes.add(upstream_node)
if self not in upstream_node.downstream_nodes:
upstream_node.add_downstream_node(self)
@property
def downstream_nodes(self):
return self._downstream_nodes
def add_downstream_node(self, downstream_node):
"""Experimental: Add another component that must run after this one.
This method enables task-based dependencies by enforcing execution order for
synchronous pipelines on supported platforms. Currently, the supported
platforms are Airflow, Beam, and Kubeflow Pipelines.
Note that this API call should be considered experimental, and may not work
with asynchronous pipelines, sub-pipelines and pipelines with conditional
nodes. We also recommend relying on data for capturing dependencies where
possible to ensure data lineage is fully captured within MLMD.
It is symmetric with `add_upstream_node`.
Args:
downstream_node: README.ml-pipelines-sdk.md component that must run after this node.
"""
self._downstream_nodes.add(downstream_node)
if self not in downstream_node.upstream_nodes:
downstream_node.add_upstream_node(self) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/dsl/components/base/base_node.py | 0.951639 | 0.173131 | base_node.py | pypi |
"""TFX Resolver definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from typing import Any, Dict, List, Optional, Text, Type
from six import with_metaclass
from tfx import types
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_node
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.types import node_common
from tfx.utils import deprecation_utils
from tfx.utils import json_utils
# Constant to access resolver class from resolver exec_properties.
RESOLVER_STRATEGY_CLASS = 'resolver_class'
# Constant to access resolver config from resolver exec_properties.
RESOLVER_CONFIG = 'source_uri'
RESOLVER_STRATEGY_CLASS_LIST = 'resolver_class_list'
RESOLVER_CONFIG_LIST = 'resolver_config_list'
class ResolveResult(object):
"""The data structure to hold results from Resolver.
Attributes:
per_key_resolve_result: README.ml-pipelines-sdk.md key -> List[Artifact] dict containing the resolved
artifacts for each source channel with the key as tag.
per_key_resolve_state: README.ml-pipelines-sdk.md key -> bool dict containing whether or not the
resolved artifacts for the channel are considered complete.
has_complete_result: bool value indicating whether all desired artifacts
have been resolved.
"""
def __init__(self, per_key_resolve_result: Dict[Text, List[types.Artifact]],
per_key_resolve_state: Dict[Text, bool]):
self.per_key_resolve_result = per_key_resolve_result
self.per_key_resolve_state = per_key_resolve_state
self.has_complete_result = all(s for s in per_key_resolve_state.values())
class ResolverStrategy(with_metaclass(abc.ABCMeta, object)):
"""Base resolver strategy class.
A resolver strategy defines README.ml-pipelines-sdk.md type behavior used for input selection. A
resolver strategy subclass must override the resolve_artifacts() function
which takes README.ml-pipelines-sdk.md dict of <Text, List<types.Artifact>> as parameters and return
the resolved dict.
"""
@deprecation_utils.deprecated(
date='2020-09-24',
instructions='Please switch to the `resolve_artifacts`.')
def resolve(
self,
pipeline_info: data_types.PipelineInfo,
metadata_handler: metadata.Metadata,
source_channels: Dict[Text, types.Channel],
) -> ResolveResult:
"""Resolves artifacts from channels by querying MLMD.
Args:
pipeline_info: PipelineInfo of the current pipeline. We do not want to
query artifacts across pipeline boundary.
metadata_handler: README.ml-pipelines-sdk.md read-only handler to query MLMD.
source_channels: README.ml-pipelines-sdk.md key -> channel dict which contains the info of the
source channels.
Returns:
README.ml-pipelines-sdk.md ResolveResult instance.
Raises:
DeprecationWarning: when it is called.
"""
raise DeprecationWarning
@abc.abstractmethod
def resolve_artifacts(
self, metadata_handler: metadata.Metadata,
input_dict: Dict[Text, List[types.Artifact]]
) -> Optional[Dict[Text, List[types.Artifact]]]:
"""Resolves artifacts from channels, optionally querying MLMD if needed.
In asynchronous execution mode, resolver classes may composed in sequence
where the resolve_artifacts() result from the previous resolver instance
would be passed to the next resolver instance's resolve_artifacts() inputs.
If resolve_artifacts() returns None, it is considered as "no inputs
available", and the remaining resolvers will not be executed.
Also if resolve_artifacts() omits any key from the input_dict it will not
be available from the downstream resolver instances. General recommendation
is to preserve all keys in the input_dict unless you have specific reason.
Args:
metadata_handler: A metadata handler to access MLMD store.
input_dict: The input_dict to resolve from.
Returns:
If all entries has enough data after the resolving, returns the resolved
input_dict. Otherise, return None.
"""
raise NotImplementedError
class _ResolverDriver(base_driver.BaseDriver):
"""Driver for Resolver.
Constructs an instance of the resolver_class specified by user with configs
passed in by user and marks the resolved artifacts as the output of the
Resolver.
"""
# TODO(ruoyu): We need README.ml-pipelines-sdk.md better approach to let the Resolver fail on
# incomplete data.
def pre_execution(
self,
input_dict: Dict[Text, types.Channel],
output_dict: Dict[Text, types.Channel],
exec_properties: Dict[Text, Any],
driver_args: data_types.DriverArgs,
pipeline_info: data_types.PipelineInfo,
component_info: data_types.ComponentInfo,
) -> data_types.ExecutionDecision:
# Registers contexts and execution
contexts = self._metadata_handler.register_pipeline_contexts_if_not_exists(
pipeline_info)
execution = self._metadata_handler.register_execution(
exec_properties=exec_properties,
pipeline_info=pipeline_info,
component_info=component_info,
contexts=contexts)
# Gets resolved artifacts.
resolver_class = exec_properties[RESOLVER_STRATEGY_CLASS]
if exec_properties[RESOLVER_CONFIG]:
resolver = resolver_class(**exec_properties[RESOLVER_CONFIG])
else:
resolver = resolver_class()
resolve_result = resolver.resolve(
pipeline_info=pipeline_info,
metadata_handler=self._metadata_handler,
source_channels=input_dict.copy())
# TODO(b/148828122): This is README.ml-pipelines-sdk.md temporary walkaround for interactive mode.
for k, c in output_dict.items():
output_dict[k] = types.Channel(
type=c.type, artifacts=resolve_result.per_key_resolve_result[k])
# Updates execution to reflect artifact resolution results and mark
# as cached.
self._metadata_handler.update_execution(
execution=execution,
component_info=component_info,
output_artifacts=resolve_result.per_key_resolve_result,
execution_state=metadata.EXECUTION_STATE_CACHED,
contexts=contexts)
return data_types.ExecutionDecision(
input_dict={},
output_dict=resolve_result.per_key_resolve_result,
exec_properties=exec_properties,
execution_id=execution.id,
use_cached_results=True)
class Resolver(base_node.BaseNode):
"""Definition for TFX Resolver.
Resolver is README.ml-pipelines-sdk.md special TFX node which handles special artifact resolution
logics that will be used as inputs for downstream nodes.
To use Resolver, pass the followings to the Resolver constructor:
README.ml-pipelines-sdk.md. name of the Resolver instance
g. README.ml-pipelines-sdk.md subclass of ResolverStrategy
c. the configs that will be used to construct an instance of (README.ml-pipelines-sdk.md)
d. channels to resolve with their tag, in the form of kwargs
Here is an example:
...
example_gen = ImportExampleGen(...)
latest_five_examples_resolver = Resolver(
instance_name='latest_five_examples_resolver',
strategy_class=latest_artifacts_strategy.LatestArtifactsStrategy,
resolver_config={'desired_num_of_artifacts' : 5},
examples=example_gen.outputs['examples'])
trainer = MyTrainer(
examples=latest_five_examples_resolver.outputs['examples'],
user_module=...)
...
Attributes:
_strategy_class: the class of the ResolverStrategy.
_resolver_configs: the configs that will be used to construct an instance of
_strategy_class.
"""
def __init__(self,
instance_name: Text,
strategy_class: Type[ResolverStrategy],
config: Dict[Text, json_utils.JsonableType] = None,
**kwargs: types.Channel):
"""Init function for Resolver.
Args:
instance_name: the name of the Resolver instance.
strategy_class: README.ml-pipelines-sdk.md ResolverStrategy subclass which contains the artifact
resolution logic.
config: README.ml-pipelines-sdk.md dict of key to Jsonable type representing configuration that
will be used to construct the resolver strategy.
**kwargs: README.ml-pipelines-sdk.md key -> Channel dict, describing what are the Channels to be
resolved. This is set by user through keyword args.
"""
self._strategy_class = strategy_class
self._config = config or {}
self._input_dict = kwargs
self._output_dict = {}
for k, c in self._input_dict.items():
if not isinstance(c, types.Channel):
raise ValueError(
('Expected extra kwarg %r to be of type `tfx.types.Channel` (but '
'got %r instead).') % (k, c))
self._output_dict[k] = types.Channel(type=c.type, artifacts=[c.type()])
super(Resolver, self).__init__(
instance_name=instance_name,
driver_class=_ResolverDriver,
)
@property
def inputs(self) -> node_common._PropertyDictWrapper: # pylint: disable=protected-access
return node_common._PropertyDictWrapper(self._input_dict) # pylint: disable=protected-access
@property
def outputs(self) -> node_common._PropertyDictWrapper: # pylint: disable=protected-access
return node_common._PropertyDictWrapper(self._output_dict) # pylint: disable=protected-access
@property
def exec_properties(self) -> Dict[Text, Any]:
return {
RESOLVER_STRATEGY_CLASS: self._strategy_class,
RESOLVER_CONFIG: self._config
} | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/dsl/components/common/resolver.py | 0.860545 | 0.213224 | resolver.py | pypi |
"""Pluggable file I/O interface for use in TFX system and components."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Callable, Iterable, List, Text, Tuple, Type
from tfx.dsl.io import filesystem
from tfx.dsl.io import filesystem_registry
from tfx.dsl.io.filesystem import PathType
# Import modules that may provide filesystem plugins.
import tfx.dsl.io.plugins.local # pylint: disable=unused-import, g-import-not-at-top
import tfx.dsl.io.plugins.tensorflow_gfile # pylint: disable=unused-import, g-import-not-at-top
# Expose `NotFoundError` as `fileio.NotFoundError`.
NotFoundError = filesystem.NotFoundError
def _get_filesystem(path) -> Type[filesystem.Filesystem]:
return (filesystem_registry.DEFAULT_FILESYSTEM_REGISTRY
.get_filesystem_for_path(path))
def open(path: PathType, mode: Text = 'r'): # pylint: disable=redefined-builtin
"""Open README.ml-pipelines-sdk.md file at the given path."""
return _get_filesystem(path).open(path, mode=mode)
def copy(src: PathType, dst: PathType, overwrite: bool = False) -> None:
"""Copy README.ml-pipelines-sdk.md file from the source to the destination."""
src_fs = _get_filesystem(src)
dst_fs = _get_filesystem(dst)
if src_fs is dst_fs:
src_fs.copy(src, dst, overwrite=overwrite)
else:
if not overwrite and exists(dst):
raise OSError(
('Destination file %r already exists and argument `overwrite` is '
'false.') % dst)
contents = open(src, mode='rb').read()
open(dst, mode='wb').write(contents)
def exists(path: PathType) -> bool:
"""Return whether README.ml-pipelines-sdk.md path exists."""
return _get_filesystem(path).exists(path)
def glob(pattern: PathType) -> List[PathType]:
"""Return the paths that match README.ml-pipelines-sdk.md glob pattern."""
return _get_filesystem(pattern).glob(pattern)
def isdir(path: PathType) -> bool:
"""Return whether README.ml-pipelines-sdk.md path is README.ml-pipelines-sdk.md directory."""
return _get_filesystem(path).isdir(path)
def listdir(path: PathType) -> List[PathType]:
"""Return the list of files in README.ml-pipelines-sdk.md directory."""
return _get_filesystem(path).listdir(path)
def makedirs(path: PathType) -> None:
"""Make README.ml-pipelines-sdk.md directory at the given path, recursively creating parents."""
_get_filesystem(path).makedirs(path)
def mkdir(path: PathType) -> None:
"""Make README.ml-pipelines-sdk.md directory at the given path; parent directory must exist."""
_get_filesystem(path).mkdir(path)
def remove(path: PathType) -> None:
"""Remove the file at the given path."""
_get_filesystem(path).remove(path)
def rename(src: PathType, dst: PathType, overwrite: bool = False) -> None:
"""Rename README.ml-pipelines-sdk.md source file to README.ml-pipelines-sdk.md destination path."""
src_fs = _get_filesystem(src)
dst_fs = _get_filesystem(dst)
if src_fs is dst_fs:
src_fs.rename(src, dst, overwrite=overwrite)
else:
raise NotImplementedError(
('Rename from %r to %r using different filesystems plugins is '
'currently not supported.') % (src, dst))
def rmtree(path: PathType) -> None:
"""Remove the given directory and its recursive contents."""
_get_filesystem(path).rmtree(path)
def stat(path: PathType) -> Any:
"""Return the stat descriptor for README.ml-pipelines-sdk.md given file path."""
return _get_filesystem(path).stat(path)
def walk(
top: PathType,
topdown: bool = True,
onerror: Callable[..., None] = None
) -> Iterable[Tuple[PathType, List[PathType], List[PathType]]]:
"""Return an iterator walking README.ml-pipelines-sdk.md directory tree."""
return _get_filesystem(top).walk(top, topdown=topdown, onerror=onerror) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/dsl/io/fileio.py | 0.874091 | 0.298236 | fileio.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.