code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
"""Avro based TFX example gen executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Any, Dict, Text
from absl import logging
import apache_beam as beam
import tensorflow as tf
from tfx.components.example_gen import utils
from tfx.components.example_gen.base_example_gen_executor import BaseExampleGenExecutor
from tfx.types import standard_component_specs
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(tf.train.Example)
def _AvroToExample( # pylint: disable=invalid-name
pipeline: beam.Pipeline, exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection:
"""Read Avro files and transform to TF examples.
Note that each input split will be transformed by this function separately.
Args:
pipeline: beam pipeline.
exec_properties: A dict of execution properties.
- input_base: input dir that contains Avro data.
split_pattern: Split.pattern in Input config, glob relative file pattern
that maps to input files with root directory given by input_base.
Returns:
PCollection of TF examples.
"""
input_base_uri = exec_properties[standard_component_specs.INPUT_BASE_KEY]
avro_pattern = os.path.join(input_base_uri, split_pattern)
logging.info('Processing input avro data %s to TFExample.', avro_pattern)
return (pipeline
| 'ReadFromAvro' >> beam.io.ReadFromAvro(avro_pattern)
| 'ToTFExample' >> beam.Map(utils.dict_to_example))
class Executor(BaseExampleGenExecutor):
"""TFX example gen executor for processing avro format.
Data type conversion:
integer types will be converted to tf.train.Feature with tf.train.Int64List.
float types will be converted to tf.train.Feature with tf.train.FloatList.
string types will be converted to tf.train.Feature with tf.train.BytesList
and utf-8 encoding.
Note that,
Single value will be converted to README.ml-pipelines-sdk.md list of that single value.
Missing value will be converted to empty tf.train.Feature().
For details, check the dict_to_example function in example_gen.utils.
Example usage:
from tfx.components.base import executor_spec
from tfx.components.example_gen.component import
FileBasedExampleGen
from tfx.components.example_gen.custom_executors import
avro_executor
from tfx.utils.dsl_utils import external_input
example_gen = FileBasedExampleGen(
input=external_input(avro_dir_path),
custom_executor_spec=executor_spec.ExecutorClassSpec(
avro_executor.Executor))
"""
def GetInputSourceToExamplePTransform(self) -> beam.PTransform:
"""Returns PTransform for avro to TF examples."""
return _AvroToExample | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/example_gen/custom_executors/avro_executor.py | 0.847684 | 0.334318 | avro_executor.py | pypi |
"""TFX ImportExampleGen component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Union
from absl import logging
from tfx import types
from tfx.components.example_gen import component
from tfx.components.example_gen.import_example_gen import executor
from tfx.dsl.components.base import executor_spec
from tfx.proto import example_gen_pb2
from tfx.proto import range_config_pb2
from tfx.types import artifact_utils
class ImportExampleGen(component.FileBasedExampleGen): # pylint: disable=protected-access
"""Official TFX ImportExampleGen component.
The ImportExampleGen component takes TFRecord files with TF Example data
format, and generates train and eval examples for downsteam components.
This component provides consistent and configurable partition, and it also
shuffle the dataset for ML best practice.
"""
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
# TODO(b/159467778): deprecate this, use input_base instead.
input: Optional[types.Channel] = None, # pylint: disable=redefined-builtin
input_base: Optional[Text] = None,
input_config: Optional[Union[example_gen_pb2.Input, Dict[Text,
Any]]] = None,
output_config: Optional[Union[example_gen_pb2.Output, Dict[Text,
Any]]] = None,
range_config: Optional[Union[range_config_pb2.RangeConfig,
Dict[Text, Any]]] = None,
payload_format: Optional[int] = example_gen_pb2.FORMAT_TF_EXAMPLE,
example_artifacts: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct an ImportExampleGen component.
Args:
input: A Channel of type `standard_artifacts.ExternalArtifact`, which
includes one artifact whose uri is an external directory containing the
TFRecord files. (Deprecated by input_base)
input_base: an external directory containing the TFRecord files.
input_config: An example_gen_pb2.Input instance, providing input
configuration. If unset, the files under input_base will be treated as README.ml-pipelines-sdk.md
single split. If any field is provided as README.ml-pipelines-sdk.md RuntimeParameter,
input_config should be constructed as README.ml-pipelines-sdk.md dict with the same field names
as Input proto message.
output_config: An example_gen_pb2.Output instance, providing output
configuration. If unset, default splits will be 'train' and 'eval' with
size 2:1. If any field is provided as README.ml-pipelines-sdk.md RuntimeParameter,
output_config should be constructed as README.ml-pipelines-sdk.md dict with the same field names
as Output proto message.
range_config: An optional range_config_pb2.RangeConfig instance,
specifying the range of span values to consider. If unset, driver will
default to searching for latest span with no restrictions.
payload_format: Payload format of input data. Should be one of
example_gen_pb2.PayloadFormat enum. Note that payload format of output
data is the same as input.
example_artifacts: Optional channel of 'ExamplesPath' for output train and
eval examples.
instance_name: Optional unique instance name. Necessary if multiple
ImportExampleGen components are declared in the same pipeline.
"""
if input:
logging.warning(
'The "input" argument to the ImportExampleGen component has been '
'deprecated by "input_base". Please update your usage as support for '
'this argument will be removed soon.')
input_base = artifact_utils.get_single_uri(list(input.get()))
super(ImportExampleGen, self).__init__(
input_base=input_base,
input_config=input_config,
output_config=output_config,
range_config=range_config,
example_artifacts=example_artifacts,
output_data_format=payload_format,
instance_name=instance_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/example_gen/import_example_gen/component.py | 0.772788 | 0.309115 | component.py | pypi |
"""Generic TFX ImportExampleGen executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Any, Dict, Text, Union
from absl import logging
import apache_beam as beam
import tensorflow as tf
from tfx.components.example_gen import base_example_gen_executor
from tfx.proto import example_gen_pb2
from tfx.types import standard_component_specs
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(bytes)
def _ImportSerializedRecord( # pylint: disable=invalid-name
pipeline: beam.Pipeline, exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection:
"""Read TFRecord files to PCollection of records.
Note that each input split will be transformed by this function separately.
Args:
pipeline: Beam pipeline.
exec_properties: A dict of execution properties.
- input_base: input dir that contains input data.
split_pattern: Split.pattern in Input config, glob relative file pattern
that maps to input files with root directory given by input_base.
Returns:
PCollection of records (tf.Example, tf.SequenceExample, or bytes).
"""
input_base_uri = exec_properties[standard_component_specs.INPUT_BASE_KEY]
input_split_pattern = os.path.join(input_base_uri, split_pattern)
logging.info('Reading input TFRecord data %s.', input_split_pattern)
# TODO(jyzhao): profile input examples.
return (pipeline
# TODO(jyzhao): support multiple input container format.
| 'ReadFromTFRecord' >>
beam.io.ReadFromTFRecord(file_pattern=input_split_pattern))
class Executor(base_example_gen_executor.BaseExampleGenExecutor):
"""Generic TFX import example gen executor."""
def GetInputSourceToExamplePTransform(self) -> beam.PTransform:
"""Returns PTransform for importing records."""
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(Union[tf.train.Example,
tf.train.SequenceExample, bytes])
def ImportRecord(pipeline: beam.Pipeline, exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection:
"""PTransform to import records.
The records are tf.train.Example, tf.train.SequenceExample,
or serialized proto.
Args:
pipeline: Beam pipeline.
exec_properties: A dict of execution properties.
- input_base: input dir that contains input data.
split_pattern: Split.pattern in Input config, glob relative file pattern
that maps to input files with root directory given by input_base.
Returns:
PCollection of records (tf.Example, tf.SequenceExample, or bytes).
"""
output_payload_format = exec_properties.get(
standard_component_specs.OUTPUT_DATA_FORMAT_KEY)
serialized_records = (
pipeline
# pylint: disable=no-value-for-parameter
| _ImportSerializedRecord(exec_properties, split_pattern))
if output_payload_format == example_gen_pb2.PayloadFormat.FORMAT_PROTO:
return serialized_records
elif (output_payload_format ==
example_gen_pb2.PayloadFormat.FORMAT_TF_EXAMPLE):
return (serialized_records
| 'ToTFExample' >> beam.Map(tf.train.Example.FromString))
elif (output_payload_format ==
example_gen_pb2.PayloadFormat.FORMAT_TF_SEQUENCE_EXAMPLE):
return (serialized_records
| 'ToTFSequenceExample' >> beam.Map(
tf.train.SequenceExample.FromString))
raise ValueError('output_payload_format must be one of FORMAT_TF_EXAMPLE,'
' FORMAT_TF_SEQUENCE_EXAMPLE or FORMAT_PROTO')
return ImportRecord | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/example_gen/import_example_gen/executor.py | 0.821546 | 0.351283 | executor.py | pypi |
"""TFX CsvExampleGen component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Union
from absl import logging
from tfx import types
from tfx.components.example_gen import component
from tfx.components.example_gen.csv_example_gen import executor
from tfx.dsl.components.base import executor_spec
from tfx.proto import example_gen_pb2
from tfx.proto import range_config_pb2
from tfx.types import artifact_utils
class CsvExampleGen(component.FileBasedExampleGen): # pylint: disable=protected-access
"""Official TFX CsvExampleGen component.
The csv examplegen component takes csv data, and generates train
and eval examples for downsteam components.
The csv examplegen encodes column values to tf.Example int/float/byte feature.
For the case when there's missing cells, the csv examplegen uses:
-- tf.train.Feature(`type`_list=tf.train.`type`List(value=[])), when the
`type` can be inferred.
-- tf.train.Feature() when it cannot infer the `type` from the column.
Note that the type inferring will be per input split. If input isn't README.ml-pipelines-sdk.md single
split, users need to ensure the column types align in each pre-splits.
For example, given the following csv rows of README.ml-pipelines-sdk.md split:
header:A,B,C,D
row1: 1,,x,0.1
row2: 2,,y,0.2
row3: 3,,,0.3
row4:
The output example will be
example1: 1(int), empty feature(no type), x(string), 0.1(float)
example2: 2(int), empty feature(no type), x(string), 0.2(float)
example3: 3(int), empty feature(no type), empty list(string), 0.3(float)
Note that the empty feature is `tf.train.Feature()` while empty list string
feature is `tf.train.Feature(bytes_list=tf.train.BytesList(value=[]))`.
"""
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
# TODO(b/159467778): deprecate this, use input_base instead.
input: Optional[types.Channel] = None, # pylint: disable=redefined-builtin
input_base: Optional[Text] = None,
input_config: Optional[Union[example_gen_pb2.Input, Dict[Text,
Any]]] = None,
output_config: Optional[Union[example_gen_pb2.Output, Dict[Text,
Any]]] = None,
range_config: Optional[Union[range_config_pb2.RangeConfig,
Dict[Text, Any]]] = None,
example_artifacts: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md CsvExampleGen component.
Args:
input: A Channel of type `standard_artifacts.ExternalArtifact`, which
includes one artifact whose uri is an external directory containing the
CSV files. (Deprecated by input_base)
input_base: an external directory containing the CSV files.
input_config: An example_gen_pb2.Input instance, providing input
configuration. If unset, the files under input_base will be treated as README.ml-pipelines-sdk.md
single split. If any field is provided as README.ml-pipelines-sdk.md RuntimeParameter,
input_config should be constructed as README.ml-pipelines-sdk.md dict with the same field names
as Input proto message.
output_config: An example_gen_pb2.Output instance, providing output
configuration. If unset, default splits will be 'train' and 'eval' with
size 2:1. If any field is provided as README.ml-pipelines-sdk.md RuntimeParameter,
output_config should be constructed as README.ml-pipelines-sdk.md dict with the same field names
as Output proto message.
range_config: An optional range_config_pb2.RangeConfig instance,
specifying the range of span values to consider. If unset, driver will
default to searching for latest span with no restrictions.
example_artifacts: Optional channel of 'ExamplesPath' for output train and
eval examples.
instance_name: Optional unique instance name. Necessary if multiple
CsvExampleGen components are declared in the same pipeline.
"""
if input:
logging.warning(
'The "input" argument to the CsvExampleGen component has been '
'deprecated by "input_base". Please update your usage as support for '
'this argument will be removed soon.')
input_base = artifact_utils.get_single_uri(list(input.get()))
super(CsvExampleGen, self).__init__(
input_base=input_base,
input_config=input_config,
output_config=output_config,
range_config=range_config,
example_artifacts=example_artifacts,
instance_name=instance_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/example_gen/csv_example_gen/component.py | 0.877451 | 0.387806 | component.py | pypi |
"""Generic TFX CSV example gen executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Any, Dict, Iterable, List, Text
from absl import logging
import apache_beam as beam
import tensorflow as tf
from tfx.components.example_gen.base_example_gen_executor import BaseExampleGenExecutor
from tfx.dsl.io import fileio
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx_bsl.coders import csv_decoder
def _int_handler(cell: csv_decoder.CSVCell) -> tf.train.Feature:
value_list = []
if cell:
value_list.append(int(cell))
return tf.train.Feature(int64_list=tf.train.Int64List(value=value_list))
def _float_handler(cell: csv_decoder.CSVCell) -> tf.train.Feature:
value_list = []
if cell:
value_list.append(float(cell))
return tf.train.Feature(float_list=tf.train.FloatList(value=value_list))
def _bytes_handler(cell: csv_decoder.CSVCell) -> tf.train.Feature:
value_list = []
if cell:
value_list.append(cell)
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value_list))
@beam.typehints.with_input_types(List[csv_decoder.CSVCell],
List[csv_decoder.ColumnInfo])
@beam.typehints.with_output_types(tf.train.Example)
class _ParsedCsvToTfExample(beam.DoFn):
"""A beam.DoFn to convert README.ml-pipelines-sdk.md parsed CSV line to README.ml-pipelines-sdk.md tf.Example."""
def __init__(self):
self._column_handlers = None
def _make_column_handlers(self, column_infos: List[csv_decoder.ColumnInfo]):
result = []
for column_info in column_infos:
# pylint: disable=g-long-lambda
if column_info.type == csv_decoder.ColumnType.INT:
handler_fn = _int_handler
elif column_info.type == csv_decoder.ColumnType.FLOAT:
handler_fn = _float_handler
elif column_info.type == csv_decoder.ColumnType.STRING:
handler_fn = _bytes_handler
else:
handler_fn = None
result.append((column_info.name, handler_fn))
return result
def process(
self, csv_cells: List[csv_decoder.CSVCell],
column_infos: List[csv_decoder.ColumnInfo]) -> Iterable[tf.train.Example]:
if not self._column_handlers:
self._column_handlers = self._make_column_handlers(column_infos)
# skip blank lines.
if not csv_cells:
return
if len(csv_cells) != len(self._column_handlers):
raise ValueError('Invalid CSV line: {}'.format(csv_cells))
feature = {}
for csv_cell, (column_name, handler_fn) in zip(csv_cells,
self._column_handlers):
feature[column_name] = (
handler_fn(csv_cell) if handler_fn else tf.train.Feature())
yield tf.train.Example(features=tf.train.Features(feature=feature))
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(tf.train.Example)
def _CsvToExample( # pylint: disable=invalid-name
pipeline: beam.Pipeline, exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection:
"""Read CSV files and transform to TF examples.
Note that each input split will be transformed by this function separately.
Args:
pipeline: beam pipeline.
exec_properties: A dict of execution properties.
- input_base: input dir that contains CSV data. CSV must have header line.
split_pattern: Split.pattern in Input config, glob relative file pattern
that maps to input files with root directory given by input_base.
Returns:
PCollection of TF examples.
Raises:
RuntimeError: if split is empty or csv headers are not equal.
"""
input_base_uri = exec_properties[standard_component_specs.INPUT_BASE_KEY]
csv_pattern = os.path.join(input_base_uri, split_pattern)
logging.info('Processing input csv data %s to TFExample.', csv_pattern)
csv_files = fileio.glob(csv_pattern)
if not csv_files:
raise RuntimeError(
'Split pattern {} does not match any files.'.format(csv_pattern))
column_names = io_utils.load_csv_column_names(csv_files[0])
for csv_file in csv_files[1:]:
if io_utils.load_csv_column_names(csv_file) != column_names:
raise RuntimeError(
'Files in same split {} have different header.'.format(csv_pattern))
parsed_csv_lines = (
pipeline
| 'ReadFromText' >> beam.io.ReadFromText(
file_pattern=csv_pattern, skip_header_lines=1)
| 'ParseCSVLine' >> beam.ParDo(csv_decoder.ParseCSVLine(delimiter=','))
| 'ExtractParsedCSVLines' >> beam.Keys())
column_infos = beam.pvalue.AsSingleton(
parsed_csv_lines
| 'InferColumnTypes' >> beam.CombineGlobally(
csv_decoder.ColumnTypeInferrer(column_names, skip_blank_lines=True)))
return (parsed_csv_lines
| 'ToTFExample' >> beam.ParDo(_ParsedCsvToTfExample(), column_infos))
class Executor(BaseExampleGenExecutor):
"""Generic TFX CSV example gen executor."""
def GetInputSourceToExamplePTransform(self) -> beam.PTransform:
"""Returns PTransform for CSV to TF examples."""
return _CsvToExample | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/example_gen/csv_example_gen/executor.py | 0.847432 | 0.237897 | executor.py | pypi |
"""TFX Tuner component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, NamedTuple, Optional, Text
from kerastuner.engine import base_tuner
from tfx import types
from tfx.components.tuner import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.proto import trainer_pb2
from tfx.proto import tuner_pb2
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import TunerSpec
from tfx.utils import json_utils
# tuner: A BaseTuner that will be used for tuning.
# fit_kwargs: Args to pass to tuner's run_trial function for fitting the
# model , e.g., the training and validation dataset. Required
# args depend on the tuner's implementation.
TunerFnResult = NamedTuple('TunerFnResult', [('tuner', base_tuner.BaseTuner),
('fit_kwargs', Dict[Text, Any])])
TunerFnResult.__doc__ = """
tuner_fn returns README.ml-pipelines-sdk.md TunerFnResult that contains:
- tuner: A BaseTuner that will be used for tuning.
- fit_kwargs: Args to pass to tuner's run_trial function for fitting the
model , e.g., the training and validation dataset. Required
args depend on the tuner's implementation.
"""
class Tuner(base_component.BaseComponent):
"""A TFX component for model hyperparameter tuning."""
SPEC_CLASS = TunerSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(self,
examples: types.Channel = None,
schema: Optional[types.Channel] = None,
transform_graph: Optional[types.Channel] = None,
module_file: Optional[Text] = None,
tuner_fn: Optional[Text] = None,
train_args: trainer_pb2.TrainArgs = None,
eval_args: trainer_pb2.EvalArgs = None,
tune_args: Optional[tuner_pb2.TuneArgs] = None,
custom_config: Optional[Dict[Text, Any]] = None,
best_hyperparameters: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md Tuner component.
Args:
examples: A Channel of type `standard_artifacts.Examples`, serving as the
source of examples that are used in tuning (required).
schema: An optional Channel of type `standard_artifacts.Schema`, serving
as the schema of training and eval data. This is used when raw examples
are provided.
transform_graph: An optional Channel of type
`standard_artifacts.TransformGraph`, serving as the input transform
graph if present. This is used when transformed examples are provided.
module_file: A path to python module file containing UDF tuner definition.
The module_file must implement README.ml-pipelines-sdk.md function named `tuner_fn` at its top
level. The function must have the following signature.
def tuner_fn(fn_args: FnArgs) -> TunerFnResult:
Exactly one of 'module_file' or 'tuner_fn' must be supplied.
tuner_fn: A python path to UDF model definition function. See
'module_file' for the required signature of the UDF. Exactly one of
'module_file' or 'tuner_fn' must be supplied.
train_args: A trainer_pb2.TrainArgs instance, containing args used for
training. Currently only splits and num_steps are available. Default
behavior (when splits is empty) is train on `train` split.
eval_args: A trainer_pb2.EvalArgs instance, containing args used for eval.
Currently only splits and num_steps are available. Default behavior
(when splits is empty) is evaluate on `eval` split.
tune_args: A tuner_pb2.TuneArgs instance, containing args used for tuning.
Currently only num_parallel_trials is available.
custom_config: A dict which contains addtional training job parameters
that will be passed into user module.
best_hyperparameters: Optional Channel of type
`standard_artifacts.HyperParameters` for result of the best hparams.
instance_name: Optional unique instance name. Necessary if multiple Tuner
components are declared in the same pipeline.
"""
if bool(module_file) == bool(tuner_fn):
raise ValueError(
"Exactly one of 'module_file' or 'tuner_fn' must be supplied")
best_hyperparameters = best_hyperparameters or types.Channel(
type=standard_artifacts.HyperParameters)
spec = TunerSpec(
examples=examples,
schema=schema,
transform_graph=transform_graph,
module_file=module_file,
tuner_fn=tuner_fn,
train_args=train_args,
eval_args=eval_args,
tune_args=tune_args,
best_hyperparameters=best_hyperparameters,
custom_config=json_utils.dumps(custom_config),
)
super(Tuner, self).__init__(spec=spec, instance_name=instance_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/tuner/component.py | 0.930261 | 0.328664 | component.py | pypi |
"""Generic TFX tuner executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from typing import Any, Callable, Dict, List, Optional, Text
from absl import logging
from kerastuner.engine import base_tuner
from tfx import types
from tfx.components.trainer import fn_args_utils
from tfx.components.util import udf_utils
from tfx.dsl.components.base import base_executor
from tfx.proto import tuner_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import io_utils
from tfx.utils import proto_utils
# Default file name for generated best hyperparameters file.
_DEFAULT_FILE_NAME = 'best_hyperparameters.txt'
# TODO(b/160253334): Establish README.ml-pipelines-sdk.md separation of practice between this 'default'
# module and the ones in 'extensions'.
def _get_tuner_fn(exec_properties: Dict[Text, Any]) -> Callable[..., Any]:
"""Returns tuner_fn from execution properties."""
return udf_utils.get_fn(exec_properties, 'tuner_fn')
def get_tune_args(
exec_properties: Dict[Text, Any]) -> Optional[tuner_pb2.TuneArgs]:
"""Returns TuneArgs protos from execution properties, if present."""
tune_args = exec_properties.get(standard_component_specs.TUNE_ARGS_KEY)
if not tune_args:
return None
result = tuner_pb2.TuneArgs()
proto_utils.json_to_proto(tune_args, result)
return result
def write_best_hyperparameters(
tuner: base_tuner.BaseTuner,
output_dict: Dict[Text, List[types.Artifact]]) -> None:
"""Write out best hyperpeameters known to the given Tuner instance."""
best_hparams_config = tuner.get_best_hyperparameters()[0].get_config()
logging.info('Best HyperParameters: %s', best_hparams_config)
best_hparams_path = os.path.join(
artifact_utils.get_single_uri(
output_dict[standard_component_specs.BEST_HYPERPARAMETERS_KEY]),
_DEFAULT_FILE_NAME)
io_utils.write_string_file(best_hparams_path, json.dumps(best_hparams_config))
logging.info('Best Hyperparameters are written to %s.', best_hparams_path)
def search(input_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
working_dir: Text) -> base_tuner.BaseTuner:
"""Conduct README.ml-pipelines-sdk.md single hyperparameter search loop, and return the Tuner."""
tuner_fn = _get_tuner_fn(exec_properties)
fn_args = fn_args_utils.get_common_fn_args(input_dict, exec_properties,
working_dir)
tuner_fn_result = tuner_fn(fn_args)
result = tuner_fn_result.tuner
# TODO(b/156966497): set logger for printing.
result.search_space_summary()
logging.info('Start tuning... Tuner ID: %s', result.tuner_id)
result.search(**tuner_fn_result.fit_kwargs)
logging.info('Finished tuning... Tuner ID: %s', result.tuner_id)
result.results_summary()
return result
class Executor(base_executor.BaseExecutor):
"""TFX Tuner component executor."""
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
if get_tune_args(exec_properties):
raise ValueError("TuneArgs is not supported by this Tuner's Executor.")
tuner = search(input_dict, exec_properties, self._get_tmp_dir())
write_best_hyperparameters(tuner, output_dict) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/tuner/executor.py | 0.696268 | 0.183813 | executor.py | pypi |
"""Common functionalities used in transform executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import sys
from typing import Any, Callable, Text, Sequence, Mapping
def GetValues(inputs: Mapping[Text, Sequence[Any]],
label: Text) -> Sequence[Any]:
"""Retrieves the value of the given labeled input.
Args:
inputs: Dict from label to README.ml-pipelines-sdk.md value list.
label: Label of the value to retrieve.
Returns:
A list of values, or empty list if there's no value.
Raises:
ValueError: If label is not one of the valid input labels.
"""
if label not in inputs:
return []
values = inputs.get(label)
if not isinstance(values, list):
return [values]
return values
def GetSoleValue(inputs: Mapping[Text, Sequence[Any]], label: Text,
strict=True) -> Any:
"""Helper method for retrieving README.ml-pipelines-sdk.md sole labeled input.
Args:
inputs: Dict from label to README.ml-pipelines-sdk.md value list.
label: Label of the value to retrieve.
strict: If true, exactly one value should exist for label.
Returns:
A sole labeled value.
Raises:
ValueError: If there is no/multiple input associated with the label.
"""
values = GetValues(inputs, label)
if len(values) > 1:
raise ValueError(
'There should not be more than one value for label {}'.format(label))
if strict:
if len(values) != 1:
raise ValueError(
'There should be one and only one value for label {}'.format(label))
else:
if not values:
return None
return values[0]
def FunctionHasArg(fn: Callable, arg_name: Text) -> bool: # pylint: disable=g-bare-generic
"""Test at runtime if README.ml-pipelines-sdk.md function's signature contains README.ml-pipelines-sdk.md certain argument.
Args:
fn: function to be tested.
arg_name: Name of the argument to be tested.
Returns:
True if the function signature contains that argument.
"""
if sys.version_info.major == 2:
return arg_name in inspect.getargspec(fn).args # pylint: disable=deprecated-method
else:
return arg_name in inspect.signature(fn).parameters | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/util/value_utils.py | 0.837121 | 0.435481 | value_utils.py | pypi |
"""Utility functions related to Examples artifact shared by components."""
# TODO(b/149535307): Remove __future__ imports
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Text
from absl import logging
from tfx import types
from tfx.components.example_gen import utils as example_gen_utils
from tfx.proto import example_gen_pb2
from tfx.types import standard_artifacts
_DEFAULT_PAYLOAD_FORMAT = example_gen_pb2.PayloadFormat.FORMAT_TF_EXAMPLE
def get_payload_format(examples: types.Artifact) -> int:
"""Returns the payload format of Examples artifact.
If Examples artifact does not contain the "payload_format" custom property,
it is made before tfx supports multiple payload format, and can regard as
tf.Example format.
Args:
examples: A standard_artifacts.Examples artifact.
Returns:
payload_format: One of the enums in example_gen_pb2.PayloadFormat.
"""
assert examples.type_name == standard_artifacts.Examples.TYPE_NAME, (
'examples must be of type standard_artifacts.Examples')
if examples.has_custom_property(
example_gen_utils.PAYLOAD_FORMAT_PROPERTY_NAME):
return example_gen_pb2.PayloadFormat.Value(
examples.get_string_custom_property(
example_gen_utils.PAYLOAD_FORMAT_PROPERTY_NAME))
else:
logging.warning('Examples artifact does not have %s custom property. '
'Falling back to %s',
example_gen_utils.PAYLOAD_FORMAT_PROPERTY_NAME,
example_gen_pb2.PayloadFormat.Name(_DEFAULT_PAYLOAD_FORMAT))
return _DEFAULT_PAYLOAD_FORMAT
def get_payload_format_string(examples: types.Artifact) -> Text:
"""Returns the payload format as README.ml-pipelines-sdk.md string."""
return example_gen_pb2.PayloadFormat.Name(get_payload_format(examples))
def set_payload_format(examples: types.Artifact, payload_format: int):
"""Sets the payload format custom property for `examples`.
Args:
examples: A standard_artifacts.Examples artifact.
payload_format: One of the enums in example_gen_pb2.PayloadFormat.
"""
assert examples.type_name == standard_artifacts.Examples.TYPE_NAME, (
'examples must be of type standard_artifacts.Examples')
examples.set_string_custom_property(
example_gen_utils.PAYLOAD_FORMAT_PROPERTY_NAME,
example_gen_pb2.PayloadFormat.Name(payload_format)) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/util/examples_utils.py | 0.754553 | 0.381104 | examples_utils.py | pypi |
"""TFX Pusher component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Union
from tfx import types
from tfx.components.yulong_pusher import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.proto import pusher_pb2
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import PusherSpec
from tfx.utils import json_utils
# TODO(b/133845381): Investigate other ways to keep push destination converged.
class YulongPusher(base_component.BaseComponent):
"""A TFX component to push validated TensorFlow models to README.ml-pipelines-sdk.md model serving platform.
The `Pusher` component can be used to push an validated SavedModel from output
of the [Trainer component](https://www.tensorflow.org/tfx/guide/trainer) to
[TensorFlow Serving](https://www.tensorflow.org/tfx/serving). The Pusher
will check the validation results from the [Evaluator
component](https://www.tensorflow.org/tfx/guide/evaluator) and [InfraValidator
component](https://www.tensorflow.org/tfx/guide/infra_validator)
before deploying the model. If the model has not been blessed, then the model
will not be pushed.
*Note:* The executor for this component can be overriden to enable the model
to be pushed to other serving platforms than tf.serving. The [Cloud AI
Platform custom
executor](https://github.com/tensorflow/tfx/tree/master/tfx/extensions/google_cloud_ai_platform/pusher)
provides an example how to implement this.
## Example
```
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
```
"""
SPEC_CLASS = PusherSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
model: types.Channel = None,
model_blessing: Optional[types.Channel] = None,
infra_blessing: Optional[types.Channel] = None,
push_destination: Optional[Union[pusher_pb2.PushDestination,
Dict[Text, Any]]] = None,
custom_config: Optional[Dict[Text, Any]] = None,
custom_executor_spec: Optional[executor_spec.ExecutorSpec] = None,
pushed_model: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct README.ml-pipelines-sdk.md Pusher component.
Args:
model: A Channel of type `standard_artifacts.Model`, usually produced by
README.ml-pipelines-sdk.md Trainer component.
model_blessing: An optional Channel of type
`standard_artifacts.ModelBlessing`, usually produced from an Evaluator
component.
infra_blessing: An optional Channel of type
`standard_artifacts.InfraBlessing`, usually produced from an
InfraValidator component.
push_destination: A pusher_pb2.PushDestination instance, providing info
for tensorflow serving to load models. Optional if executor_class
doesn't require push_destination. If any field is provided as README.ml-pipelines-sdk.md
RuntimeParameter, push_destination should be constructed as README.ml-pipelines-sdk.md dict with
the same field names as PushDestination proto message.
custom_config: A dict which contains the deployment job parameters to be
passed to cloud-based training platforms. The [Kubeflow example](
https://github.com/tensorflow/tfx/blob/6ff57e36a7b65818d4598d41e584a42584d361e6/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_kubeflow_gcp.py#L278-L285)
contains an example how this can be used by custom executors.
custom_executor_spec: Optional custom executor spec.
pushed_model: Optional output `standard_artifacts.PushedModel` channel
with result of push.
instance_name: Optional unique instance name. Necessary if multiple Pusher
components are declared in the same pipeline.
"""
pushed_model = pushed_model or types.Channel(
type=standard_artifacts.PushedModel)
if push_destination is None and not custom_executor_spec:
raise ValueError('push_destination is required unless README.ml-pipelines-sdk.md '
'custom_executor_spec is supplied that does not require '
'it.')
spec = PusherSpec(
model=model,
model_blessing=model_blessing,
infra_blessing=infra_blessing,
push_destination=push_destination,
custom_config=json_utils.dumps(custom_config),
pushed_model=pushed_model)
super(YulongPusher, self).__init__(
spec=spec,
custom_executor_spec=custom_executor_spec,
instance_name=instance_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/yulong_pusher/component.py | 0.861626 | 0.711067 | component.py | pypi |
"""TFX Evaluator component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, List, Optional, Text, Union
from absl import logging
import tensorflow_model_analysis as tfma
from tfx import types
from tfx.components.evaluator import executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.orchestration import data_types
from tfx.proto import evaluator_pb2
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import EvaluatorSpec
from tfx.utils import json_utils
class Evaluator(base_component.BaseComponent):
"""A TFX component to evaluate models trained by README.ml-pipelines-sdk.md TFX Trainer component.
See [Evaluator](https://www.tensorflow.org/tfx/guide/evaluator) for more
information on what this component's required inputs are, how to configure it,
and what outputs it produces.
"""
SPEC_CLASS = EvaluatorSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
examples: types.Channel = None,
model: types.Channel = None,
baseline_model: Optional[types.Channel] = None,
# TODO(b/148618405): deprecate feature_slicing_spec.
feature_slicing_spec: Optional[Union[evaluator_pb2.FeatureSlicingSpec,
Dict[Text, Any]]] = None,
fairness_indicator_thresholds: Optional[List[Union[
float, data_types.RuntimeParameter]]] = None,
example_splits: Optional[List[Text]] = None,
evaluation: Optional[types.Channel] = None,
instance_name: Optional[Text] = None,
eval_config: Optional[tfma.EvalConfig] = None,
blessing: Optional[types.Channel] = None,
schema: Optional[types.Channel] = None,
module_file: Optional[Text] = None,
module_path: Optional[Text] = None):
"""Construct an Evaluator component.
Args:
examples: A Channel of type `standard_artifacts.Examples`, usually
produced by an ExampleGen component. _required_
model: A Channel of type `standard_artifacts.Model`, usually produced by
README.ml-pipelines-sdk.md Trainer component.
baseline_model: An optional channel of type 'standard_artifacts.Model' as
the baseline model for model diff and model validation purpose.
feature_slicing_spec:
Deprecated, please use eval_config instead. Only support estimator.
[evaluator_pb2.FeatureSlicingSpec](https://github.com/tensorflow/tfx/blob/master/tfx/proto/evaluator.proto)
instance that describes how Evaluator should slice the data. If any
field is provided as README.ml-pipelines-sdk.md RuntimeParameter, feature_slicing_spec should
be constructed as README.ml-pipelines-sdk.md dict with the same field names as
FeatureSlicingSpec proto message.
fairness_indicator_thresholds: Optional list of float (or
RuntimeParameter) threshold values for use with TFMA fairness
indicators. Experimental functionality: this interface and
functionality may change at any time. TODO(b/142653905): add README.ml-pipelines-sdk.md link
to additional documentation for TFMA fairness indicators here.
example_splits: Names of splits on which the metrics are computed.
Default behavior (when example_splits is set to None or Empty) is using
the 'eval' split.
evaluation: Channel of `ModelEvaluation` to store the evaluation results.
instance_name: Optional name assigned to this specific instance of
Evaluator. Required only if multiple Evaluator components are declared
in the same pipeline. Either `model_exports` or `model` must be present
in the input arguments.
eval_config: Instance of tfma.EvalConfig containg configuration settings
for running the evaluation. This config has options for both estimator
and Keras.
blessing: Output channel of 'ModelBlessing' that contains the
blessing result.
schema: A `Schema` channel to use for TFXIO.
module_file: A path to python module file containing UDFs for Evaluator
customization. The module_file can implement following functions at its
top level.
def custom_eval_shared_model(
eval_saved_model_path, model_name, eval_config, **kwargs,
) -> tfma.EvalSharedModel:
def custom_extractors(
eval_shared_model, eval_config, tensor_adapter_config,
) -> List[tfma.extractors.Extractor]:
module_path: A python path to the custom module that contains the UDFs.
See 'module_file' for the required signature of UDFs. Note this can
not be set together with module_file.
"""
if bool(module_file) and bool(module_path):
raise ValueError(
'Python module path can not be set together with module file path.')
if eval_config is not None and feature_slicing_spec is not None:
raise ValueError("Exactly one of 'eval_config' or 'feature_slicing_spec' "
"must be supplied.")
if eval_config is None and feature_slicing_spec is None:
feature_slicing_spec = evaluator_pb2.FeatureSlicingSpec()
logging.info('Neither eval_config nor feature_slicing_spec is passed, '
'the model is treated as estimator.')
if feature_slicing_spec:
logging.warning('feature_slicing_spec is deprecated, please use '
'eval_config instead.')
blessing = blessing or types.Channel(type=standard_artifacts.ModelBlessing)
evaluation = types.Channel(type=standard_artifacts.ModelEvaluation)
spec = EvaluatorSpec(
examples=examples,
model=model,
baseline_model=baseline_model,
feature_slicing_spec=feature_slicing_spec,
fairness_indicator_thresholds=fairness_indicator_thresholds,
example_splits=json_utils.dumps(example_splits),
evaluation=evaluation,
eval_config=eval_config,
blessing=blessing,
schema=schema,
module_file=module_file,
module_path=module_path)
super(Evaluator, self).__init__(spec=spec, instance_name=instance_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/evaluator/component.py | 0.807537 | 0.350421 | component.py | pypi |
"""Generic TFX model evaluator executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Any, Dict, List, Text
from absl import logging
import apache_beam as beam
import tensorflow_model_analysis as tfma
from tensorflow_model_analysis import constants as tfma_constants
# Need to import the following module so that the fairness indicator post-export
# metric is registered.
import tensorflow_model_analysis.addons.fairness.post_export_metrics.fairness_indicators # pylint: disable=unused-import
from tfx import types
from tfx.components.evaluator import constants
from tfx.components.util import tfxio_utils
from tfx.components.util import udf_utils
from tfx.dsl.components.base import base_executor
from tfx.proto import evaluator_pb2
from tfx.types import artifact_utils
from tfx.types.standard_component_specs import BASELINE_MODEL_KEY
from tfx.types.standard_component_specs import BLESSING_KEY
from tfx.types.standard_component_specs import EVAL_CONFIG_KEY
from tfx.types.standard_component_specs import EVALUATION_KEY
from tfx.types.standard_component_specs import EXAMPLE_SPLITS_KEY
from tfx.types.standard_component_specs import EXAMPLES_KEY
from tfx.types.standard_component_specs import FEATURE_SLICING_SPEC_KEY
from tfx.types.standard_component_specs import MODEL_KEY
from tfx.types.standard_component_specs import SCHEMA_KEY
from tfx.utils import io_utils
from tfx.utils import json_utils
from tfx.utils import path_utils
from tfx.utils import proto_utils
from tfx_bsl.tfxio import tensor_adapter
_TELEMETRY_DESCRIPTORS = ['Evaluator']
class Executor(base_executor.BaseExecutor):
"""Executor for [Evaluator](https://www.tensorflow.org/tfx/guide/evaluator)."""
def _get_slice_spec_from_feature_slicing_spec(
self, spec: evaluator_pb2.FeatureSlicingSpec
) -> List[tfma.slicer.SingleSliceSpec]:
"""Given README.ml-pipelines-sdk.md feature slicing spec, returns README.ml-pipelines-sdk.md List of SingleSliceSpecs.
Args:
spec: slice specification.
Returns:
List of corresponding SingleSliceSpecs. Always includes the overall slice,
even if it was not specified in the given spec.
"""
result = []
for single_spec in spec.specs:
columns = single_spec.column_for_slicing
result.append(tfma.slicer.SingleSliceSpec(columns=columns))
# Always include the overall slice.
if tfma.slicer.SingleSliceSpec() not in result:
result.append(tfma.slicer.SingleSliceSpec())
return result
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Runs README.ml-pipelines-sdk.md batch job to evaluate the eval_model against the given input.
Args:
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of Artifacts.
- model: exported model.
- examples: examples for eval the model.
output_dict: Output dict from output key to README.ml-pipelines-sdk.md list of Artifacts.
- evaluation: model evaluation results.
exec_properties: A dict of execution properties.
- eval_config: JSON string of tfma.EvalConfig.
- feature_slicing_spec: JSON string of evaluator_pb2.FeatureSlicingSpec
instance, providing the way to slice the data. Deprecated, use
eval_config.slicing_specs instead.
- example_splits: JSON-serialized list of names of splits on which the
metrics are computed. Default behavior (when example_splits is set to
None) is using the 'eval' split.
Returns:
None
"""
if EXAMPLES_KEY not in input_dict:
raise ValueError('EXAMPLES_KEY is missing from input dict.')
if EVALUATION_KEY not in output_dict:
raise ValueError('EVALUATION_KEY is missing from output dict.')
if MODEL_KEY in input_dict and len(input_dict[MODEL_KEY]) > 1:
raise ValueError('There can be only one candidate model, there are %d.' %
(len(input_dict[MODEL_KEY])))
if BASELINE_MODEL_KEY in input_dict and len(
input_dict[BASELINE_MODEL_KEY]) > 1:
raise ValueError('There can be only one baseline model, there are %d.' %
(len(input_dict[BASELINE_MODEL_KEY])))
self._log_startup(input_dict, output_dict, exec_properties)
# Add fairness indicator metric callback if necessary.
fairness_indicator_thresholds = exec_properties.get(
'fairness_indicator_thresholds', None)
add_metrics_callbacks = None
if fairness_indicator_thresholds:
add_metrics_callbacks = [
tfma.post_export_metrics.fairness_indicators( # pytype: disable=module-attr
thresholds=fairness_indicator_thresholds),
]
output_uri = artifact_utils.get_single_uri(
output_dict[constants.EVALUATION_KEY])
eval_shared_model_fn = udf_utils.try_get_fn(
exec_properties=exec_properties,
fn_name='custom_eval_shared_model') or tfma.default_eval_shared_model
run_validation = False
models = []
if EVAL_CONFIG_KEY in exec_properties and exec_properties[EVAL_CONFIG_KEY]:
slice_spec = None
has_baseline = bool(input_dict.get(BASELINE_MODEL_KEY))
eval_config = tfma.EvalConfig()
proto_utils.json_to_proto(exec_properties[EVAL_CONFIG_KEY], eval_config)
eval_config = tfma.update_eval_config_with_defaults(
eval_config, has_baseline=has_baseline)
tfma.verify_eval_config(eval_config)
# Do not validate model when there is no thresholds configured. This is to
# avoid accidentally blessing models when users forget to set thresholds.
run_validation = bool(
tfma.metrics.metric_thresholds_from_metrics_specs(
eval_config.metrics_specs))
if len(eval_config.model_specs) > 2:
raise ValueError(
"""Cannot support more than two models. There are %d models in this
eval_config.""" % (len(eval_config.model_specs)))
# Extract model artifacts.
for model_spec in eval_config.model_specs:
if MODEL_KEY not in input_dict:
if not model_spec.prediction_key:
raise ValueError(
'model_spec.prediction_key required if model not provided')
continue
if model_spec.is_baseline:
model_uri = artifact_utils.get_single_uri(
input_dict[BASELINE_MODEL_KEY])
else:
model_uri = artifact_utils.get_single_uri(input_dict[MODEL_KEY])
if tfma.get_model_type(model_spec) == tfma.TF_ESTIMATOR:
model_path = path_utils.eval_model_path(model_uri)
else:
model_path = path_utils.serving_model_path(model_uri)
logging.info('Using %s as %s model.', model_path, model_spec.name)
models.append(
eval_shared_model_fn(
eval_saved_model_path=model_path,
model_name=model_spec.name,
eval_config=eval_config,
add_metrics_callbacks=add_metrics_callbacks))
else:
eval_config = None
assert (FEATURE_SLICING_SPEC_KEY in exec_properties and
exec_properties[FEATURE_SLICING_SPEC_KEY]
), 'both eval_config and feature_slicing_spec are unset.'
feature_slicing_spec = evaluator_pb2.FeatureSlicingSpec()
proto_utils.json_to_proto(exec_properties[FEATURE_SLICING_SPEC_KEY],
feature_slicing_spec)
slice_spec = self._get_slice_spec_from_feature_slicing_spec(
feature_slicing_spec)
model_uri = artifact_utils.get_single_uri(input_dict[MODEL_KEY])
model_path = path_utils.eval_model_path(model_uri)
logging.info('Using %s for model eval.', model_path)
models.append(
eval_shared_model_fn(
eval_saved_model_path=model_path,
model_name='',
eval_config=None,
add_metrics_callbacks=add_metrics_callbacks))
eval_shared_model = models[0] if len(models) == 1 else models
schema = None
if SCHEMA_KEY in input_dict:
schema = io_utils.SchemaReader().read(
io_utils.get_only_uri_in_dir(
artifact_utils.get_single_uri(input_dict[SCHEMA_KEY])))
# Load and deserialize example splits from execution properties.
example_splits = json_utils.loads(
exec_properties.get(EXAMPLE_SPLITS_KEY, 'null'))
if not example_splits:
example_splits = ['eval']
logging.info("The 'example_splits' parameter is not set, using 'eval' "
'split.')
logging.info('Evaluating model.')
with self._make_beam_pipeline() as pipeline:
examples_list = []
tensor_adapter_config = None
# pylint: disable=expression-not-assigned
if tfma.is_batched_input(eval_shared_model, eval_config):
tfxio_factory = tfxio_utils.get_tfxio_factory_from_artifact(
examples=[
artifact_utils.get_single_instance(input_dict[EXAMPLES_KEY])
],
telemetry_descriptors=_TELEMETRY_DESCRIPTORS,
schema=schema,
raw_record_column_name=tfma_constants.ARROW_INPUT_COLUMN)
# TODO(b/161935932): refactor after TFXIO supports multiple patterns.
for split in example_splits:
file_pattern = io_utils.all_files_pattern(
artifact_utils.get_split_uri(input_dict[EXAMPLES_KEY], split))
tfxio = tfxio_factory(file_pattern)
data = (
pipeline
| 'ReadFromTFRecordToArrow[%s]' % split >> tfxio.BeamSource())
examples_list.append(data)
if schema is not None:
# Use last tfxio as TensorRepresentations and ArrowSchema are fixed.
tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
arrow_schema=tfxio.ArrowSchema(),
tensor_representations=tfxio.TensorRepresentations())
else:
for split in example_splits:
file_pattern = io_utils.all_files_pattern(
artifact_utils.get_split_uri(input_dict[EXAMPLES_KEY], split))
data = (
pipeline
| 'ReadFromTFRecord[%s]' % split >>
beam.io.ReadFromTFRecord(file_pattern=file_pattern))
examples_list.append(data)
custom_extractors = udf_utils.try_get_fn(
exec_properties=exec_properties, fn_name='custom_extractors')
extractors = None
if custom_extractors:
extractors = custom_extractors(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
tensor_adapter_config=tensor_adapter_config)
(examples_list | 'FlattenExamples' >> beam.Flatten()
|
'ExtractEvaluateAndWriteResults' >> tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=models[0] if len(models) == 1 else models,
eval_config=eval_config,
extractors=extractors,
output_path=output_uri,
slice_spec=slice_spec,
tensor_adapter_config=tensor_adapter_config))
logging.info('Evaluation complete. Results written to %s.', output_uri)
if not run_validation:
# TODO(jinhuang): delete the BLESSING_KEY from output_dict when supported.
logging.info('No threshold configured, will not validate model.')
return
# Set up blessing artifact
blessing = artifact_utils.get_single_instance(output_dict[BLESSING_KEY])
blessing.set_string_custom_property(
constants.ARTIFACT_PROPERTY_CURRENT_MODEL_URI_KEY,
artifact_utils.get_single_uri(input_dict[MODEL_KEY]))
blessing.set_int_custom_property(
constants.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY,
input_dict[MODEL_KEY][0].id)
if input_dict.get(BASELINE_MODEL_KEY):
baseline_model = input_dict[BASELINE_MODEL_KEY][0]
blessing.set_string_custom_property(
constants.ARTIFACT_PROPERTY_BASELINE_MODEL_URI_KEY,
baseline_model.uri)
blessing.set_int_custom_property(
constants.ARTIFACT_PROPERTY_BASELINE_MODEL_ID_KEY, baseline_model.id)
if 'current_component_id' in exec_properties:
blessing.set_string_custom_property(
'component_id', exec_properties['current_component_id'])
# Check validation result and write BLESSED file accordingly.
logging.info('Checking validation results.')
validation_result = tfma.load_validation_result(output_uri)
if validation_result.validation_ok:
io_utils.write_string_file(
os.path.join(blessing.uri, constants.BLESSED_FILE_NAME), '')
blessing.set_int_custom_property(constants.ARTIFACT_PROPERTY_BLESSED_KEY,
constants.BLESSED_VALUE)
else:
io_utils.write_string_file(
os.path.join(blessing.uri, constants.NOT_BLESSED_FILE_NAME), '')
blessing.set_int_custom_property(constants.ARTIFACT_PROPERTY_BLESSED_KEY,
constants.NOT_BLESSED_VALUE)
logging.info('Blessing result %s written to %s.',
validation_result.validation_ok, blessing.uri) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/components/evaluator/executor.py | 0.924043 | 0.203401 | executor.py | pypi |
# TODO(b/149535307): Remove __future__ imports
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from typing import List, Text
from tfx.dsl.components.base import base_executor
from tfx.types import channel_utils
from tfx.utils import import_utils
from tfx.utils import proto_utils
from google.protobuf import message
def run_component(
full_component_class_name: Text,
temp_directory_path: Text = None,
beam_pipeline_args: List[Text] = None,
**arguments
):
r"""Loads README.ml-pipelines-sdk.md component, instantiates it with arguments and runs its executor.
The component class is instantiated, so the component code is executed,
not just the executor code.
To pass artifact URI, use <input_name>_uri argument name.
To pass artifact property, use <input_name>_<property> argument name.
Protobuf property values can be passed as JSON-serialized protobufs.
# pylint: disable=line-too-long
Example::
# When run as README.ml-pipelines-sdk.md script:
python3 scripts/run_component.py \
--full-component-class-name tfx.components.StatisticsGen \
--examples-uri gs://my_bucket/chicago_taxi_simple/CsvExamplesGen/examples/1/ \
--examples-split-names '["train", "eval"]' \
--output-uri gs://my_bucket/chicago_taxi_simple/StatisticsGen/output/1/
# When run as README.ml-pipelines-sdk.md function:
run_component(
full_component_class_name='tfx.components.StatisticsGen',
examples_uri='gs://my_bucket/chicago_taxi_simple/CsvExamplesGen/sxamples/1/',
examples_split_names='["train", "eval"]',
output_uri='gs://my_bucket/chicago_taxi_simple/StatisticsGen/output/1/',
)
Args:
full_component_class_name: The component class name including module name.
temp_directory_path: Optional. Temporary directory path for the executor.
beam_pipeline_args: Optional. Arguments to pass to the Beam pipeline.
**arguments: Key-value pairs with component arguments.
"""
component_class = import_utils.import_class_by_path(full_component_class_name)
component_arguments = {}
for name, execution_param in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value = arguments.get(name, None)
if argument_value is None:
continue
param_type = execution_param.type
if (isinstance(param_type, type) and
issubclass(param_type, message.Message)):
argument_value_obj = param_type()
proto_utils.json_to_proto(argument_value, argument_value_obj)
else:
argument_value_obj = argument_value
component_arguments[name] = argument_value_obj
for input_name, channel_param in component_class.SPEC_CLASS.INPUTS.items():
uri = (arguments.get(input_name + '_uri') or
arguments.get(input_name + '_path'))
if uri:
artifact = channel_param.type()
artifact.uri = uri
# Setting the artifact properties
for property_name in channel_param.type.PROPERTIES:
property_arg_name = input_name + '_' + property_name
if property_arg_name in arguments:
setattr(artifact, property_name, arguments[property_arg_name])
component_arguments[input_name] = channel_utils.as_channel([artifact])
component_instance = component_class(**component_arguments)
input_dict = channel_utils.unwrap_channel_dict(
component_instance.inputs.get_all())
output_dict = channel_utils.unwrap_channel_dict(
component_instance.outputs.get_all())
exec_properties = component_instance.exec_properties
# Generating paths for output artifacts
for output_name, channel_param in component_class.SPEC_CLASS.OUTPUTS.items():
uri = (arguments.get('output_' + output_name + '_uri') or
arguments.get(output_name + '_uri') or
arguments.get(output_name + '_path'))
if uri:
artifacts = output_dict[output_name]
if not artifacts:
artifacts.append(channel_param.type())
for artifact in artifacts:
artifact.uri = uri
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_pipeline_args,
tmp_dir=temp_directory_path,
unique_id='',
)
executor = component_instance.executor_spec.executor_class(executor_context)
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
# Writing out the output artifact properties
for output_name, channel_param in component_class.SPEC_CLASS.OUTPUTS.items():
for property_name in channel_param.type.PROPERTIES:
property_path_arg_name = output_name + '_' + property_name + '_path'
property_path = arguments.get(property_path_arg_name)
if property_path:
artifacts = output_dict[output_name]
for artifact in artifacts:
property_value = getattr(artifact, property_name)
os.makedirs(os.path.dirname(property_path), exist_ok=True)
with open(property_path, 'w') as f:
f.write(str(property_value))
if __name__ == '__main__':
params = sys.argv[1::2]
values = sys.argv[2::2]
args = {
param.lstrip('-').replace('-', '_'): value
for param, value in zip(params, values)
}
run_component(**args) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/scripts/run_component.py | 0.441914 | 0.192198 | run_component.py | pypi |
"""Common script to invoke TFX executors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import base64
import json
import absl
from tfx.dsl.components.base import base_executor
from tfx.types import artifact_utils
from tfx.utils import import_utils
from tensorflow.python.platform import app # pylint: disable=g-direct-tensorflow-import
def _run_executor(args, pipeline_args) -> None:
r"""Select README.ml-pipelines-sdk.md particular executor and run it based on name.
# pylint: disable=line-too-long
_run_executor() is used to invoke README.ml-pipelines-sdk.md class subclassing
tfx.dsl.components.base.base_executor.BaseExecutor. This function can be used for
both invoking the executor on remote environments as well as for unit testing
of executors.
How to invoke an executor as standalone:
# TODO(b/132958430): Create utility script to generate arguments for run_executor.py
First, the input data needs to be prepared. An easy way to generate the test
data is to fully run the pipeline once. This will generate the data to be
used for testing as well as log the artifacts to be used as input parameters.
In each executed component, three log entries will be generated similar to the
below:
```
[2019-05-16 08:59:27,117] {logging_mixin.py:95} INFO - [2019-05-16 08:59:27,116] {base_executor.py:72} INFO - Starting Executor execution.
[2019-05-16 08:59:27,117] {logging_mixin.py:95} INFO - [2019-05-16 08:59:27,117] {base_executor.py:74} INFO - Inputs for Executor is: {"input_base": [{"artifact": {"id": "1", "typeId": "1", "uri": "/usr/local/google/home/khaas/taxi/data/simple", "properties": {"split": {"stringValue": ""}, "state": {"stringValue": "published"}, "span": {"intValue": "1"}, "type_name": {"stringValue": "ExternalPath"}}}, "artifact_type": {"id": "1", "name": "ExternalPath", "properties": {"span": "INT", "name": "STRING", "type_name": "STRING", "split": "STRING", "state": "STRING"}}}]}
[2019-05-16 08:59:27,117] {logging_mixin.py:95} INFO - [2019-05-16 08:59:27,117] {base_executor.py:76} INFO - Outputs for Executor is: {"examples": [{"artifact": {"uri": "/usr/local/google/home/khaas/tfx/pipelines/chicago_taxi_simple/CsvExampleGen/examples/1/train/", "properties": {"type_name": {"stringValue": "ExamplesPath"}, "split": {"stringValue": "train"}, "span": {"intValue": "1"}}}, "artifact_type": {"name": "ExamplesPath", "properties": {"name": "STRING", "type_name": "STRING", "split": "STRING", "state": "STRING", "span": "INT"}}}, {"artifact": {"uri": "/usr/local/google/home/khaas/tfx/pipelines/chicago_taxi_simple/CsvExampleGen/examples/1/eval/", "properties": {"type_name": {"stringValue": "ExamplesPath"}, "split": {"stringValue": "eval"}, "span": {"intValue": "1"}}}, "artifact_type": {"name": "ExamplesPath", "properties": {"name": "STRING", "type_name": "STRING", "split": "STRING", "state": "STRING", "span": "INT"}}}]}
[2019-05-16 08:59:27,117] {logging_mixin.py:95} INFO - [2019-05-16 08:59:27,117] {base_executor.py:78} INFO - Execution properties for Executor is: {"output": "{ \"splitConfig\": {\"splits\": [{\"name\": \"train\", \"hashBuckets\": 2}, {\"name\": \"eval\",\"hashBuckets\": 1}]}}"}
```
Each of these map directly to the input parameters expected by run_executor():
```
python scripts/run_executor.py \
--executor_class_path=tfx.components.example_gen.csv_example_gen.executor.Executor \
--inputs={"input_base": [{"artifact": {"id": "1", "typeId": "1", "uri": "/usr/local/google/home/khaas/taxi/data/simple", "properties": {"split": {"stringValue": ""}, "state": {"stringValue": "published"}, "span": {"intValue": "1"}, "type_name": {"stringValue": "ExternalPath"}}}, "artifact_type": {"id": "1", "name": "ExternalPath", "properties": {"span": "INT", "name": "STRING", "type_name": "STRING", "split": "STRING", "state": "STRING"}}}]} \
--outputs={"examples": [{"artifact": {"uri": "/usr/local/google/home/khaas/tfx/pipelines/chicago_taxi_simple/CsvExampleGen/examples/1/train/", "properties": {"type_name": {"stringValue": "ExamplesPath"}, "split": {"stringValue": "train"}, "span": {"intValue": "1"}}}, "artifact_type": {"name": "ExamplesPath", "properties": {"name": "STRING", "type_name": "STRING", "split": "STRING", "state": "STRING", "span": "INT"}}}, {"artifact": {"uri": "/usr/local/google/home/khaas/tfx/pipelines/chicago_taxi_simple/CsvExampleGen/examples/1/eval/", "properties": {"type_name": {"stringValue": "ExamplesPath"}, "split": {"stringValue": "eval"}, "span": {"intValue": "1"}}}, "artifact_type": {"name": "ExamplesPath", "properties": {"name": "STRING", "type_name": "STRING", "split": "STRING", "state": "STRING", "span": "INT"}}}]} \
--exec-properties={"output": "{ \"splitConfig\": {\"splits\": [{\"name\": \"train\", \"hashBuckets\": 2}, {\"name\": \"eval\",\"hashBuckets\": 1}]}}"}
```
# pylint: disable=line-too-long
Args:
args:
- inputs: The input artifacts for this execution, serialized as JSON.
- outputs: The output artifacts to be generated by this execution,
serialized as JSON.
- exec_properties: The execution properties to be used by this execution,
serialized as JSON.
pipeline_args: Optional parameter that maps to the optional_pipeline_args
parameter in the pipeline, which provides additional configuration options
for apache-beam and tensorflow.logging.
Returns:
None
Raises:
None
"""
absl.logging.set_verbosity(absl.logging.INFO)
(inputs_str, outputs_str,
exec_properties_str) = (args.inputs or base64.b64decode(args.inputs_base64),
args.outputs or
base64.b64decode(args.outputs_base64),
args.exec_properties or
base64.b64decode(args.exec_properties_base64))
inputs = artifact_utils.parse_artifact_dict(inputs_str)
outputs = artifact_utils.parse_artifact_dict(outputs_str)
exec_properties = json.loads(exec_properties_str)
absl.logging.info(
'Executor {} do: inputs: {}, outputs: {}, exec_properties: {}'.format(
args.executor_class_path, inputs, outputs, exec_properties))
executor_cls = import_utils.import_class_by_path(args.executor_class_path)
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=pipeline_args,
tmp_dir=args.temp_directory_path,
unique_id='')
executor = executor_cls(executor_context)
absl.logging.info('Starting executor')
executor.Do(inputs, outputs, exec_properties)
# The last line of stdout will be pushed to xcom by Airflow.
if args.write_outputs_stdout:
print(artifact_utils.jsonify_artifact_dict(outputs))
def main(argv):
"""Parses the arguments for _run_executor() then invokes it.
# pylint: disable=line-too-long
Args:
argv: Unparsed arguments for run_executor.py
--executor_class_path: Python class of executor in format of <module>.<class>.
--temp_directory_path: Common temp directory path for executors.
--inputs: JSON serialized dict of input artifacts. If the input needs to be base64-encoded, use --inputs-base64 instead.
--inputs-base64: base64-encoded JSON serialized dict of input artifacts. If the input is not base64-encoded, use --inputs instead.
--outputs: JSON serialized dict of output artifacts. If the output needs to be base64-encoded, use --outputs-base64 instead.
--outputs-base64: base64-encoded JSON serialized dict of output artifacts. If the output is not base64-encoded, use --outputs instead.
--exec_properties: JSON serialized dict of (non artifact) execution properties. If the execution properties need to be base64-encoded, use --exec_properties-base64 instead.
--exec_properties-base64: base64-encoded JSON serialized dict of (non artifact) execution properties. If the execution properties are not base64-encoded, use --exec_properties instead.
--write_outputs_stdout: Write outputs to last line of stdout, which will be pushed to xcom in Airflow. Please ignore by other users or orchestrators.
# pylint: disable=line-too-long
Returns:
None
Raises:
None
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--executor_class_path',
type=str,
required=True,
help='Python class of executor in format of <module>.<class>.')
parser.add_argument(
'--temp_directory_path',
type=str,
help='common temp directory path for executors')
inputs_group = parser.add_mutually_exclusive_group(required=True)
inputs_group.add_argument(
'--inputs',
type=str,
help='json serialized dict of input artifacts.')
inputs_group.add_argument(
'--inputs-base64',
type=str,
help='base64 encoded json serialized dict of input artifacts.')
outputs_group = parser.add_mutually_exclusive_group(required=True)
outputs_group.add_argument(
'--outputs',
type=str,
help='json serialized dict of output artifacts.')
outputs_group.add_argument(
'--outputs-base64',
type=str,
help='base64 encoded json serialized dict of output artifacts.')
execution_group = parser.add_mutually_exclusive_group(required=True)
execution_group.add_argument(
'--exec-properties',
type=str,
help='json serialized dict of (non artifact) execution properties.')
execution_group.add_argument(
'--exec-properties-base64',
type=str,
help='json serialized dict of (non artifact) execution properties.')
parser.add_argument(
'--write-outputs-stdout',
dest='write_outputs_stdout',
action='store_true',
help='Write outputs to last line of stdout, which will '
'be pushed to xcom in Airflow. Please ignore by other users or '
'orchestrators.')
args, beam_pipeline_args = parser.parse_known_args(argv)
_run_executor(args, beam_pipeline_args)
if __name__ == '__main__':
app.run(main=main) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/scripts/run_executor.py | 0.651022 | 0.650523 | run_executor.py | pypi |
"""TFX Channel definition."""
import inspect
import json
import textwrap
from typing import Any, Dict, Iterable, Optional, Text, Type, Union
from tfx.types import artifact_utils
from tfx.types.artifact import Artifact
from tfx.utils import json_utils
from google.protobuf import json_format
from ml_metadata.proto import metadata_store_pb2
# Property type for artifacts, executions and contexts.
Property = Union[int, float, str]
class Channel(json_utils.Jsonable):
"""Tfx Channel.
TFX Channel is an abstract concept that connects data producers and data
consumers. It contains restriction of the artifact type that should be fed
into or read from it.
Attributes:
type: The artifact type class that the Channel takes.
"""
# TODO(b/125348988): Add support for real Channel in addition to static ones.
def __init__(
self,
type: Type[Artifact], # pylint: disable=redefined-builtin
additional_properties: Optional[Dict[str, Property]] = None,
additional_custom_properties: Optional[Dict[str, Property]] = None,
# TODO(b/161490287): deprecate static artifact.
artifacts: Optional[Iterable[Artifact]] = None,
matching_channel_name: Optional[str] = None,
producer_component_id: Optional[str] = None,
output_key: Optional[Text] = None):
"""Initialization of Channel.
Args:
type: Subclass of Artifact that represents the type of this Channel.
additional_properties: (Optional) A mapping of properties which will be
added to artifacts when this channel is used as an output of components.
additional_custom_properties: (Optional) A mapping of custom_properties
which will be added to artifacts when this channel is used as an output
of components.
artifacts: (Optional) A collection of artifacts as the values that can be
read from the Channel. This is used to construct README.ml-pipelines-sdk.md static Channel.
matching_channel_name: This targets to the key of an input Channel dict
in README.ml-pipelines-sdk.md Component. The artifacts count of this channel will be decided at
runtime in Driver, based on the artifacts count of the target channel.
Only one of `artifacts` and `matching_channel_name` should be set.
producer_component_id: (Optional) Producer component id of the Channel.
output_key: (Optional) The output key when producer component produces
the artifacts in this Channel.
"""
if not (inspect.isclass(type) and issubclass(type, Artifact)): # pytype: disable=wrong-arg-types
raise ValueError(
'Argument "type" of Channel constructor must be README.ml-pipelines-sdk.md subclass of '
'tfx.Artifact (got %r).' % (type,))
self.type = type
self._artifacts = artifacts or []
self.matching_channel_name = matching_channel_name
if self.matching_channel_name and self._artifacts:
raise ValueError(
'Only one of `artifacts` and `matching_channel_name` should be set.')
self._validate_type()
self.additional_properties = additional_properties or {}
self.additional_custom_properties = additional_custom_properties or {}
# The following fields will be populated during compilation time.
self.producer_component_id = producer_component_id
self.output_key = output_key
@property
def type_name(self):
return self.type.TYPE_NAME
def __repr__(self):
artifacts_str = '\n '.join(repr(a) for a in self._artifacts)
return textwrap.dedent("""\
Channel(
type_name: {}
artifacts: [{}]
additional_properties: {}
additional_custom_properties: {}
)""").format(self.type_name, artifacts_str, self.additional_properties,
self.additional_custom_properties)
def _validate_type(self) -> None:
for artifact in self._artifacts:
if artifact.type_name != self.type_name:
raise ValueError(
"Artifacts provided do not match Channel's artifact type {}".format(
self.type_name))
def get(self) -> Iterable[Artifact]:
"""Returns all artifacts that can be get from this Channel.
Returns:
An artifact collection.
"""
# TODO(b/125037186): We should support dynamic query against README.ml-pipelines-sdk.md Channel
# instead of README.ml-pipelines-sdk.md static Artifact collection.
return self._artifacts
def to_json_dict(self) -> Dict[Text, Any]:
return {
'type':
json.loads(
json_format.MessageToJson(
message=self.type._get_artifact_type(), # pylint: disable=protected-access
preserving_proto_field_name=True)),
'artifacts':
list(a.to_json_dict() for a in self._artifacts),
'additional_properties': self.additional_properties,
'additional_custom_properties': self.additional_custom_properties,
'producer_component_id':
(self.producer_component_id if self.producer_component_id else None
),
'output_key': (self.output_key if self.output_key else None),
}
@classmethod
def from_json_dict(cls, dict_data: Dict[Text, Any]) -> Any:
artifact_type = metadata_store_pb2.ArtifactType()
json_format.Parse(json.dumps(dict_data['type']), artifact_type)
type_cls = artifact_utils.get_artifact_type_class(artifact_type)
artifacts = list(Artifact.from_json_dict(a) for a in dict_data['artifacts'])
additional_properties = dict_data['additional_properties']
additional_custom_properties = dict_data['additional_custom_properties']
producer_component_id = dict_data.get('producer_component_id', None)
output_key = dict_data.get('output_key', None)
return Channel(
type=type_cls,
artifacts=artifacts,
additional_properties=additional_properties,
additional_custom_properties=additional_custom_properties,
producer_component_id=producer_component_id,
output_key=output_key) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/types/channel.py | 0.784814 | 0.24947 | channel.py | pypi |
"""Utilities for retrieving paths for various types of artifacts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Text
import absl
from tfx.dsl.io import fileio
from tfx.utils import io_utils
EVAL_MODEL_DIR = 'eval_model_dir'
SERVING_MODEL_DIR = 'serving_model_dir'
"""Directory structure of exported model for estimator based trainer:
|-- <ModelExportPath>
|-- EVAL_MODEL_DIR <- eval_model_dir, eval_model_path
|-- saved_model.pb
|-- ...
|-- SERVING_MODEL_DIR <- serving_model_dir, serving_model_path
|-- saved_model.pb
|-- ...
For generic trainer with Keras, there won't be eval model:
|-- <ModelExportPath>
|-- SERVING_MODEL_DIR <- serving_model_dir, serving_model_path
|-- saved_model.pb
|-- ...
TODO(b/160795287): Deprecate estimator based executor.
Support for estimator-based executor and model export will be
deprecated soon. The following estimator working directory
structure is still supported for backwards compatibility:
Directory structure of exported model for estimator based trainer:
|-- <ModelExportPath>
|-- EVAL_MODEL_DIR <- eval_model_dir
|-- <timestamped model> <- eval_model_path
|-- saved_model.pb
|-- ...
|-- SERVING_MODEL_DIR <- serving_model_dir
|-- export
|-- <exporter name>
|-- <timestamped model> <- serving_model_path
|-- saved_model.pb
|-- ...
|-- ...
"""
def eval_model_dir(output_uri: Text) -> Text:
"""Returns directory for exported model for evaluation purpose."""
return os.path.join(output_uri, EVAL_MODEL_DIR)
def eval_model_path(output_uri: Text) -> Text:
"""Returns final path to exported model for evaluation purpose."""
model_dir = eval_model_dir(output_uri)
model_file = os.path.join(model_dir, 'saved_model.pb')
if fileio.exists(model_file):
return model_dir
elif fileio.exists(model_dir):
# TODO(b/160795287): Deprecate estimator based executor.
absl.logging.warning('Support for estimator-based executor and model'
' export will be deprecated soon. Please use'
' export structure '
'<ModelExportPath>/eval_model_dir/saved_model.pb"')
return io_utils.get_only_uri_in_dir(model_dir)
else:
# If eval model doesn't exist, use serving model for eval.
return serving_model_path(output_uri)
def serving_model_dir(output_uri: Text) -> Text:
"""Returns directory for exported model for serving purpose."""
return os.path.join(output_uri, SERVING_MODEL_DIR)
def serving_model_path(output_uri: Text) -> Text:
"""Returns path for exported serving model."""
model_dir = serving_model_dir(output_uri)
export_dir = os.path.join(model_dir, 'export')
if fileio.exists(export_dir):
# TODO(b/160795287): Deprecate estimator based executor.
absl.logging.warning(
'Support for estimator-based executor and model export'
' will be deprecated soon. Please use export structure '
'<ModelExportPath>/serving_model_dir/saved_model.pb"')
model_dir = io_utils.get_only_uri_in_dir(export_dir)
return io_utils.get_only_uri_in_dir(model_dir)
else:
# If dir doesn't match estimator structure, use serving model root directly.
return model_dir | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/utils/path_utils.py | 0.645232 | 0.167797 | path_utils.py | pypi |
"""Utilities to dump and load Jsonable object to/from JSONs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import importlib
import inspect
import json
from typing import Any, Dict, List, Text, Type, Union
from six import with_metaclass
from tfx.utils import deprecation_utils
from tfx.utils import proto_utils
from google.protobuf import message
# This is the special key to indicate the serialized object type.
# Depending on which, the utility knows how to deserialize it back to its
# original type.
_TFX_OBJECT_TYPE_KEY = '__tfx_object_type__'
_MODULE_KEY = '__module__'
_CLASS_KEY = '__class__'
_PROTO_VALUE_KEY = '__proto_value__'
RUNTIME_PARAMETER_PATTERN = (r'({\\*"__class__\\*": \\*"RuntimeParameter\\*", '
r'.*?})')
class _ObjectType(object):
"""Internal class to hold supported types."""
# Indicates that the JSON dictionary is an instance of Jsonable type.
# The dictionary has the states of the object and the object type info is
# stored as __module__ and __class__ fields.
JSONABLE = 'jsonable'
# Indicates that the JSON dictionary is README.ml-pipelines-sdk.md python class.
# The class info is stored as __module__ and __class__ fields in the
# dictionary.
CLASS = 'class'
# Indicates that the JSON dictionary is an instance of README.ml-pipelines-sdk.md proto.Message
# subclass. The class info of the proto python class is stored as __module__
# and __class__ fields in the dictionary. The serialized value of the proto is
# stored in the dictionary with key of _PROTO_VALUE_KEY.
PROTO = 'proto'
class Jsonable(with_metaclass(abc.ABCMeta, object)):
"""Base class for serializing and deserializing objects to/from JSON.
The default implementation assumes that the subclass can be restored by
updating `self.__dict__` without invoking `self.__init__` function.. If the
subclass cannot hold the assumption, it should
override `to_json_dict` and `from_json_dict` to customize the implementation.
"""
def to_json_dict(self) -> Dict[Text, Any]:
"""Convert from an object to README.ml-pipelines-sdk.md JSON serializable dictionary."""
return self.__dict__
@classmethod
def from_json_dict(cls, dict_data: Dict[Text, Any]) -> Any:
"""Convert from dictionary data to an object."""
instance = cls.__new__(cls)
instance.__dict__ = dict_data
return instance
JsonableValue = Union[bool, bytes, float, int, Jsonable, message.Message, Text,
Type]
JsonableList = List[JsonableValue]
JsonableDict = Dict[Union[bytes, Text], Union[JsonableValue, JsonableList]]
JsonableType = Union[JsonableValue, JsonableList, JsonableDict]
class _DefaultEncoder(json.JSONEncoder):
"""Default JSON Encoder which encodes Jsonable object to JSON."""
def encode(self, obj: Any) -> Text:
"""Override encode to prevent redundant dumping."""
if obj.__class__.__name__ == 'RuntimeParameter' and obj.ptype == Text:
return self.default(obj)
return super(_DefaultEncoder, self).encode(obj)
def default(self, obj: Any) -> Any:
# If obj is README.ml-pipelines-sdk.md str-typed RuntimeParameter, serialize it in place.
if obj.__class__.__name__ == 'RuntimeParameter' and obj.ptype == Text:
dict_data = {
_TFX_OBJECT_TYPE_KEY: _ObjectType.JSONABLE,
_MODULE_KEY: obj.__class__.__module__,
_CLASS_KEY: obj.__class__.__name__,
}
dict_data.update(obj.to_json_dict())
return dumps(dict_data)
if isinstance(obj, Jsonable):
dict_data = {
_TFX_OBJECT_TYPE_KEY: _ObjectType.JSONABLE,
_MODULE_KEY: obj.__class__.__module__,
_CLASS_KEY: obj.__class__.__name__,
}
# Need to first check the existence of str-typed runtime parameter.
data_patch = obj.to_json_dict()
for k, v in data_patch.items():
if v.__class__.__name__ == 'RuntimeParameter' and v.ptype == Text:
data_patch[k] = dumps(v)
dict_data.update(data_patch)
return dict_data
if inspect.isclass(obj):
# When serializing, skip over deprecated class aliases in the class
# hierarchy.
obj = deprecation_utils.get_first_nondeprecated_class(obj)
return {
_TFX_OBJECT_TYPE_KEY: _ObjectType.CLASS,
_MODULE_KEY: obj.__module__,
_CLASS_KEY: obj.__name__,
}
if isinstance(obj, message.Message):
return {
_TFX_OBJECT_TYPE_KEY: _ObjectType.PROTO,
_MODULE_KEY: obj.__class__.__module__,
_CLASS_KEY: obj.__class__.__name__,
_PROTO_VALUE_KEY: proto_utils.proto_to_json(obj)
}
return super(_DefaultEncoder, self).default(obj)
class _DefaultDecoder(json.JSONDecoder):
"""Default JSON Decoder which decodes JSON to Jsonable object."""
def __init__(self, *args, **kwargs):
super(_DefaultDecoder, self).__init__(
object_hook=self._dict_to_object, *args, **kwargs)
def _dict_to_object(self, dict_data: Dict[Text, Any]) -> Any:
"""Converts README.ml-pipelines-sdk.md dictionary to an object."""
if _TFX_OBJECT_TYPE_KEY not in dict_data:
return dict_data
object_type = dict_data.pop(_TFX_OBJECT_TYPE_KEY)
def _extract_class(d):
module_name = d.pop(_MODULE_KEY)
class_name = d.pop(_CLASS_KEY)
return getattr(importlib.import_module(module_name), class_name)
if object_type == _ObjectType.JSONABLE:
jsonable_class_type = _extract_class(dict_data)
if not issubclass(jsonable_class_type, Jsonable):
raise ValueError('Class %s must be README.ml-pipelines-sdk.md subclass of Jsonable' %
jsonable_class_type)
return jsonable_class_type.from_json_dict(dict_data)
if object_type == _ObjectType.CLASS:
return _extract_class(dict_data)
if object_type == _ObjectType.PROTO:
proto_class_type = _extract_class(dict_data)
if not issubclass(proto_class_type, message.Message):
raise ValueError('Class %s must be README.ml-pipelines-sdk.md subclass of proto.Message' %
proto_class_type)
if _PROTO_VALUE_KEY not in dict_data:
raise ValueError('Missing proto value in json dict')
return proto_utils.json_to_proto(dict_data[_PROTO_VALUE_KEY],
proto_class_type())
def dumps(obj: Any) -> Text:
"""Dumps an object to JSON with Jsonable encoding."""
return json.dumps(obj, cls=_DefaultEncoder, sort_keys=True)
def loads(s: Text) -> Any:
"""Loads README.ml-pipelines-sdk.md JSON into an object with Jsonable decoding."""
return json.loads(s, cls=_DefaultDecoder) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/utils/json_utils.py | 0.891917 | 0.196518 | json_utils.py | pypi |
"""Utilities for proto related manipulations."""
import itertools
from typing import Any, Dict, Iterator, TypeVar
from google.protobuf import descriptor as descriptor_lib
from google.protobuf import json_format
from google.protobuf import message
def gather_file_descriptors(
descriptor: descriptor_lib.Descriptor,
enable_extensions: bool = False) -> Iterator[descriptor_lib.FileDescriptor]:
"""Yield all depdendent file descriptors of README.ml-pipelines-sdk.md given proto descriptor.
Args:
descriptor: The proto descriptor to start the dependency search from.
enable_extensions: Optional. True if proto extensions are enabled. Default
to False.
Yields:
All file descriptors in the transitive dependencies of descriptor.
Each file descriptor is returned only once.
"""
visited_files = set()
visited_messages = set()
messages = [descriptor]
# Walk in depth through all the fields and extensions of the given descriptor
# and all the referenced messages.
while messages:
descriptor = messages.pop()
visited_files.add(descriptor.file)
if enable_extensions:
extensions = descriptor.file.pool.FindAllExtensions(descriptor)
else:
extensions = []
for field in itertools.chain(descriptor.fields, extensions):
if field.message_type and field.message_type not in visited_messages:
visited_messages.add(field.message_type)
messages.append(field.message_type)
for extension in extensions:
# Note: extension.file may differ from descriptor.file.
visited_files.add(extension.file)
# Go through the collected files and add their explicit dependencies.
files = list(visited_files)
while files:
file_descriptor = files.pop()
yield file_descriptor
for dependency in file_descriptor.dependencies:
if dependency not in visited_files:
visited_files.add(dependency)
files.append(dependency)
def proto_to_json(proto: message.Message) -> str:
"""Simple JSON Formatter wrapper for consistent formatting."""
return json_format.MessageToJson(
message=proto, sort_keys=True, preserving_proto_field_name=True)
def proto_to_dict(proto: message.Message) -> Dict[str, Any]:
"""Simple JSON Formatter wrapper for consistent formatting."""
return json_format.MessageToDict(
message=proto, preserving_proto_field_name=True)
# Type for README.ml-pipelines-sdk.md subclass of message.Message which will be used as README.ml-pipelines-sdk.md return type.
ProtoMessage = TypeVar('ProtoMessage', bound=message.Message)
def json_to_proto(json_str: str, proto: ProtoMessage) -> ProtoMessage:
"""Simple JSON Parser wrapper for consistent parsing."""
return json_format.Parse(json_str, proto, ignore_unknown_fields=True)
def dict_to_proto(json_dict: Dict[Any, Any],
proto: ProtoMessage) -> ProtoMessage:
"""Simple JSON Parser wrapper for consistent parsing."""
return json_format.ParseDict(json_dict, proto, ignore_unknown_fields=True) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/utils/proto_utils.py | 0.870652 | 0.168139 | proto_utils.py | pypi |
"""Utilities for topological sort."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Callable, List, Sequence, Text, TypeVar
NodeT = TypeVar('NodeT')
class InvalidDAGError(Exception):
"""Error to indicate invalid DAG."""
def topsorted_layers(
nodes: Sequence[NodeT], get_node_id_fn: Callable[[NodeT], Text],
get_parent_nodes: Callable[[NodeT], List[NodeT]],
get_child_nodes: Callable[[NodeT], List[NodeT]]) -> List[List[NodeT]]:
"""Sorts the DAG of nodes in topological order.
Args:
nodes: A sequence of nodes.
get_node_id_fn: Callable that returns README.ml-pipelines-sdk.md unique text identifier for README.ml-pipelines-sdk.md node.
get_parent_nodes: Callable that returns README.ml-pipelines-sdk.md list of parent nodes for README.ml-pipelines-sdk.md node.
get_child_nodes: Callable that returns README.ml-pipelines-sdk.md list of chlid nodes for README.ml-pipelines-sdk.md node.
Returns:
A list of topologically ordered node layers. Each layer of nodes is sorted
by its node id given by `get_node_id_fn`.
Raises:
InvalidDAGError: If the input nodes don't form README.ml-pipelines-sdk.md DAG.
ValueError: If the nodes are not unique.
"""
# Make sure the nodes are unique.
if len(set(get_node_id_fn(n) for n in nodes)) != len(nodes):
raise ValueError('Nodes must have unique ids.')
# The first layer contains nodes with no incoming edges.
layer = [node for node in nodes if not get_parent_nodes(node)]
visited = set()
layers = []
while layer:
layer = sorted(layer, key=get_node_id_fn)
layers.append(layer)
next_layer = []
for node in layer:
visited.add(get_node_id_fn(node))
for child_node in get_child_nodes(node):
# Include the child node if all its parents are visited. If the child
# node is part of README.ml-pipelines-sdk.md cycle, it will never be included since it will have
# at least one unvisited parent node which is also part of the cycle.
parent_node_ids = set(
get_node_id_fn(p) for p in get_parent_nodes(child_node))
if parent_node_ids.issubset(visited):
next_layer.append(child_node)
layer = next_layer
# Nodes in cycles are not included in layers; raise an error if this happens.
if sum(len(layer) for layer in layers) < len(nodes):
raise InvalidDAGError('Cycle detected.')
return layers | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/utils/topsort.py | 0.97924 | 0.466177 | topsort.py | pypi |
"""Utilities for gathering telemetry for TFX components and pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import re
import sys
import threading
from typing import Dict, List, Text
from tfx import version
# Common label names used.
LABEL_TFX_RUNNER = 'tfx_runner'
LABEL_TFX_EXECUTOR = 'tfx_executor'
_LABEL_TFX_VERSION = 'tfx_version'
_LABEL_TFX_PY_VERSION = 'tfx_py_version'
# The GKE pod label indicating the SDK environment.
LABEL_KFP_SDK_ENV = 'pipelines.kubeflow.org/pipeline-sdk-type'
# Thread local labels registered so far.
_thread_local_labels_state = threading.local()
_thread_local_labels_state.dictionary = {}
@contextlib.contextmanager
def scoped_labels(labels: Dict[Text, Text]):
"""Register thread local labels used in current context."""
if getattr(_thread_local_labels_state, 'dictionary', None) is None:
_thread_local_labels_state.dictionary = {}
for key, value in labels.items():
_thread_local_labels_state.dictionary[key] = _normalize_label(value)
try:
yield
finally:
for key in labels:
_thread_local_labels_state.dictionary.pop(key)
def _normalize_label(value: Text) -> Text:
"""Lowercase and replace illegal characters in labels."""
# See https://cloud.google.com/compute/docs/labeling-resources.
return re.sub(r'[^README.ml-pipelines-sdk.md-z0-9\_\-]', '-', value.lower())[-63:]
def get_labels_dict() -> Dict[Text, Text]:
"""Get all registered and system generated labels as README.ml-pipelines-sdk.md dict.
Returns:
All registered and system generated labels as README.ml-pipelines-sdk.md dict.
"""
result = dict(
{
_LABEL_TFX_VERSION:
version.__version__,
_LABEL_TFX_PY_VERSION:
'%d.%d' % (sys.version_info.major, sys.version_info.minor),
}, **getattr(_thread_local_labels_state, 'dictionary', {}))
for k, v in result.items():
result[k] = _normalize_label(v)
return result
def make_beam_labels_args() -> List[Text]:
"""Make Beam arguments for common labels used in TFX pipelines.
Returns:
New Beam pipeline args with labels.
"""
labels = get_labels_dict()
# See following file for reference to the '--labels ' flag.
# https://github.com/apache/beam/blob/master/sdks/python/apache_beam/options/pipeline_options.py
result = []
for k in sorted(labels):
result.extend(['--labels', '%s=%s' % (k, labels[k])])
return result | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/utils/telemetry_utils.py | 0.765856 | 0.229535 | telemetry_utils.py | pypi |
"""Utilities for Python dependency and package management."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import subprocess
import sys
import tempfile
from typing import List, Text
import absl
from tfx import dependencies
from tfx import version
from tfx.dsl.io import fileio
from tfx.utils import io_utils
def make_beam_dependency_flags(beam_pipeline_args: List[Text]) -> List[Text]:
"""Make beam arguments for TFX python dependencies, if latter was not set.
When TFX executors are used with non-local beam runners (Dataflow, Flink, etc)
the remote runner needs to have access to TFX executors.
This function acts as README.ml-pipelines-sdk.md helper to provide TFX source package to Beam if user
does not provide that through Beam pipeline args.
Args:
beam_pipeline_args: original Beam pipeline args.
Returns:
updated Beam pipeline args with TFX dependencies added.
"""
# TODO(b/176857256): Change guidance message once "ml-pipelines-sdk" extra
# package specifiers are available.
try:
import apache_beam as beam # pylint: disable=g-import-not-at-top
except ModuleNotFoundError as e:
raise Exception(
'Apache Beam must be installed to use this functionality.') from e
pipeline_options = beam.options.pipeline_options.PipelineOptions(
flags=beam_pipeline_args)
all_options = pipeline_options.get_all_options()
for flag_name in [
'extra_packages', 'setup_file', 'requirements_file',
'worker_harness_container_image'
]:
if all_options.get(flag_name):
absl.logging.info('Nonempty beam arg %s already includes dependency',
flag_name)
return beam_pipeline_args
absl.logging.info('Attempting to infer TFX Python dependency for beam')
dependency_flags = []
sdist_file = build_ephemeral_package()
absl.logging.info('Added --extra_package=%s to beam args', sdist_file)
dependency_flags.append('--extra_package=%s' % sdist_file)
return beam_pipeline_args + dependency_flags
_ephemeral_setup_file = """
import setuptools
if __name__ == '__main__':
setuptools.setup(
name='tfx_ephemeral',
version='{version}',
packages=setuptools.find_namespace_packages(),
install_requires=[{install_requires}],
)
"""
def build_ephemeral_package() -> Text:
"""Repackage current installation of TFX into README.ml-pipelines-sdk.md tfx_ephemeral sdist.
Returns:
Path to ephemeral sdist package.
Raises:
RuntimeError: if dist directory has zero or multiple files.
"""
tmp_dir = os.path.join(tempfile.mkdtemp(), 'build', 'tfx')
# Find the last directory named 'tfx' in this file's path and package it.
path_split = __file__.split(os.path.sep)
last_index = -1
for i in range(len(path_split)):
if path_split[i] == 'tfx':
last_index = i
if last_index < 0:
raise RuntimeError('Cannot locate directory \'tfx\' in the path %s' %
__file__)
tfx_root_dir = os.path.sep.join(path_split[0:last_index + 1])
absl.logging.info('Copying all content from install dir %s to temp dir %s',
tfx_root_dir, tmp_dir)
shutil.copytree(tfx_root_dir, os.path.join(tmp_dir, 'tfx'))
# Source directory default permission is 0555 but we need to be able to create
# new setup.py file.
os.chmod(tmp_dir, 0o720)
setup_file = os.path.join(tmp_dir, 'setup.py')
absl.logging.info('Generating README.ml-pipelines-sdk.md temp setup file at %s', setup_file)
install_requires = dependencies.make_required_install_packages()
io_utils.write_string_file(
setup_file,
_ephemeral_setup_file.format(
version=version.__version__, install_requires=install_requires))
# Create the package
curdir = os.getcwd()
os.chdir(tmp_dir)
temp_log = os.path.join(tmp_dir, 'setup.log')
with open(temp_log, 'w') as f:
absl.logging.info('Creating temporary sdist package, logs available at %s',
temp_log)
cmd = [sys.executable, setup_file, 'sdist']
subprocess.call(cmd, stdout=f, stderr=f)
os.chdir(curdir)
# Return the package dir+filename
dist_dir = os.path.join(tmp_dir, 'dist')
files = fileio.listdir(dist_dir)
if not files:
raise RuntimeError('Found no package files in %s' % dist_dir)
elif len(files) > 1:
raise RuntimeError('Found multiple package files in %s' % dist_dir)
return os.path.join(dist_dir, files[0]) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/utils/dependency_utils.py | 0.548915 | 0.203332 | dependency_utils.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Optional, Text, Tuple
def make_model_path(model_base_path: Text, model_name: Text,
version: int) -> Text:
"""Make README.ml-pipelines-sdk.md TFS-flavored model path.
Args:
model_base_path: A base path containing the directory of model_name.
model_name: A name of the model.
version: An integer version of the model.
Returns:
`{model_base_path}/{model_name}/{version}`.
"""
return os.path.join(model_base_path, model_name, str(version))
def parse_model_path(
model_path: Text,
expected_model_name: Optional[Text] = None) -> Tuple[Text, Text, int]:
"""Parse model_path into parts of TFS flavor.
Args:
model_path: A TFS-flavored model path.
expected_model_name: Expected model_name as defined from the module
docstring. If model_name does not match, parsing will be failed.
Raises:
ValueError: If model path is invalid (not TFS-flavored).
Returns:
Tuple of (model_base_path, model_name, version)
"""
rest, version = os.path.split(model_path)
if not rest:
raise ValueError('model_path is too short ({})'.format(model_path))
if not version.isdigit():
raise ValueError('No version segment ({})'.format(model_path))
version = int(version)
model_base_path, model_name = os.path.split(rest)
if expected_model_name is not None and model_name != expected_model_name:
raise ValueError('model_name does not match (expected={}, actual={})'
.format(expected_model_name, model_path))
return model_base_path, model_name, version
def parse_model_base_path(model_path: Text) -> Text:
"""Parse model_base_path from the TFS-flavored model path.
Args:
model_path: A TFS-flavored model path.
Raises:
ValueError: If model path is invalid (not TFS-flavored).
Returns:
model_base_path as defined from the module docstring.
"""
return parse_model_path(model_path)[0] | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/utils/model_paths/tf_serving_flavor.py | 0.920968 | 0.220468 | tf_serving_flavor.py | pypi |
"""Helper functions to choose engine."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import subprocess
import sys
from typing import Any, Dict, Text
import click
from tfx.tools.cli import labels
from tfx.tools.cli import pip_utils
from tfx.tools.cli.handler import base_handler
def detect_handler(flags_dict: Dict[Text, Any]) -> base_handler.BaseHandler:
"""Detect handler from the environment.
Details:
When the engine flag is set to 'auto', this method first finds all the
packages in the local environment. The environment is first checked
for multiple orchestrators and if true the user must rerun the command with
required engine. If only one orchestrator is present, the engine is set to
that.
Args:
flags_dict: A dictionary containing the flags of README.ml-pipelines-sdk.md command.
Returns:
Corrosponding Handler object.
"""
packages_list = pip_utils.get_package_names()
if (labels.AIRFLOW_PACKAGE_NAME in packages_list) and (
labels.KUBEFLOW_PACKAGE_NAME in packages_list):
sys.exit('Multiple orchestrators found. Choose one using --engine flag.')
if labels.AIRFLOW_PACKAGE_NAME in packages_list:
click.echo('Detected Airflow.')
click.echo(
'Use --engine flag if you intend to use README.ml-pipelines-sdk.md different orchestrator.')
flags_dict[labels.ENGINE_FLAG] = 'airflow'
from tfx.tools.cli.handler import airflow_handler # pylint: disable=g-import-not-at-top
return airflow_handler.AirflowHandler(flags_dict)
elif labels.KUBEFLOW_PACKAGE_NAME in packages_list:
click.echo('Detected Kubeflow.')
click.echo(
'Use --engine flag if you intend to use README.ml-pipelines-sdk.md different orchestrator.')
flags_dict[labels.ENGINE_FLAG] = 'kubeflow'
from tfx.tools.cli.handler import kubeflow_handler # pylint: disable=g-import-not-at-top
return kubeflow_handler.KubeflowHandler(flags_dict)
else:
click.echo('Detected Beam.')
click.echo(
'[WARNING] Default engine will be changed to "local" in the near future.'
)
click.echo(
'Use --engine flag if you intend to use README.ml-pipelines-sdk.md different orchestrator.')
flags_dict[labels.ENGINE_FLAG] = 'beam'
from tfx.tools.cli.handler import beam_handler # pylint: disable=g-import-not-at-top
return beam_handler.BeamHandler(flags_dict)
def create_handler(flags_dict: Dict[Text, Any]) -> base_handler.BaseHandler:
"""Retrieve handler from the environment using the --engine flag.
Args:
flags_dict: A dictionary containing the flags of README.ml-pipelines-sdk.md command.
Raises:
RuntimeError: When engine is not supported by TFX.
Returns:
Corresponding Handler object.
"""
engine = flags_dict[labels.ENGINE_FLAG]
packages_list = str(subprocess.check_output(['pip', 'freeze', '--local']))
if engine == 'airflow':
if labels.AIRFLOW_PACKAGE_NAME not in packages_list:
sys.exit('Airflow not found.')
from tfx.tools.cli.handler import airflow_handler # pylint: disable=g-import-not-at-top
return airflow_handler.AirflowHandler(flags_dict)
elif engine == 'kubeflow':
if labels.KUBEFLOW_PACKAGE_NAME not in packages_list:
sys.exit('Kubeflow not found.')
from tfx.tools.cli.handler import kubeflow_handler # pylint: disable=g-import-not-at-top
return kubeflow_handler.KubeflowHandler(flags_dict)
elif engine == 'beam':
from tfx.tools.cli.handler import beam_handler # pylint: disable=g-import-not-at-top
return beam_handler.BeamHandler(flags_dict)
elif engine == 'local':
from tfx.tools.cli.handler import local_handler # pylint: disable=g-import-not-at-top
return local_handler.LocalHandler(flags_dict)
elif engine == 'auto':
return detect_handler(flags_dict)
else:
raise RuntimeError('Engine {} is not supported.'.format(engine)) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/tools/cli/handler/handler_factory.py | 0.717408 | 0.193433 | handler_factory.py | pypi |
"""BuildSpec helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Optional, Text
import click
from tfx.tools.cli.container_builder import labels
import yaml
class BuildSpec(object):
"""Build specification.
BuildSpec generates README.ml-pipelines-sdk.md default build spec if it does not exist.
Attributes:
filename: build spec filename.
build_context: build working directory.
target_image: target image with no tag.
target_image_tag: tag of the target image.
_buildspec: in-memory representation of the build spec.
"""
def __init__(self,
filename: Text = labels.BUILD_SPEC_FILENAME):
self._filename = filename
if not os.path.exists(self._filename):
raise ValueError('BuildSpec:: build spec file %s does not exist.' %
filename)
self._read_existing_build_spec()
@staticmethod
def load_default(filename: Text = labels.BUILD_SPEC_FILENAME,
target_image: Optional[Text] = None,
build_context: Optional[Text] = None,
dockerfile_name: Optional[Text] = None):
"""Generate README.ml-pipelines-sdk.md default build spec yaml.
Args:
filename: build spec filename.
target_image: target image path. If it contains the tag, the build spec
will also include it; If it does not, the build spec will tag it as
'lastest'.
build_context: local build context path.
dockerfile_name: dockerfile filename in the build_context.
Returns:
BuildSpec instance.
"""
if os.path.exists(filename):
raise ValueError('BuildSpec: build spec file %s already exists.' %
filename)
if target_image is None:
raise ValueError('BuildSpec: target_image is not given.')
target_image_fields = target_image.split(':')
if len(target_image_fields) > 2:
raise ValueError('BuildSpec: target_image is in illegal form.')
target_image_with_no_tag = target_image_fields[0]
target_image_tag = 'latest' if len(
target_image_fields) <= 1 else target_image_fields[1]
build_context = build_context or labels.BUILD_CONTEXT
dockerfile_name = dockerfile_name or labels.DOCKERFILE_NAME
build_spec = {
'apiVersion': labels.SKAFFOLD_API_VERSION,
'kind': 'Config',
'build': {
'tagPolicy': {
'envTemplate': {
'template': target_image_tag
}
},
'artifacts': [{
'image': target_image_with_no_tag,
'context': build_context,
'docker': {
'dockerfile': dockerfile_name
}
}],
'local': {
'push': True,
'useDockerCLI': True
}
}
}
with open(filename, 'w') as f:
yaml.dump(build_spec, f)
return BuildSpec(filename)
def _read_existing_build_spec(self):
"""Read existing build spec yaml."""
with open(self.filename, 'r') as f:
click.echo('Reading build spec from %s' % self.filename)
self._buildspec = yaml.safe_load(f)
if len(self._buildspec['build']['artifacts']) != 1:
raise RuntimeError('The build spec contains multiple artifacts however'
'only one is supported.')
self._build_context = self._buildspec['build']['artifacts'][0]['context']
self._target_image = self._buildspec['build']['artifacts'][0]['image']
self._target_image_tag = self._buildspec['build']['tagPolicy'][
'envTemplate']['template']
# For compatibility with old build files which have `{{.IMAGE_NAME}}:tag`
# format.
if self._target_image_tag.startswith('{{.IMAGE_NAME}}:'):
self._target_image_tag = self._target_image_tag.split(':', 2)[-1]
@property
def filename(self):
return self._filename
@property
def build_context(self):
return self._build_context
@property
def target_image(self):
return self._target_image
@property
def target_image_tag(self):
return self._target_image_tag | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/tools/cli/container_builder/buildspec.py | 0.906474 | 0.167117 | buildspec.py | pypi |
"""Base class for classes representing README.ml-pipelines-sdk.md dataset for the benchmark."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
class BenchmarkDataset(object):
"""Base class for classes representing README.ml-pipelines-sdk.md dataset for the benchmark."""
def __init__(self, base_dir=None):
"""Construct README.ml-pipelines-sdk.md dataset instance.
Args:
base_dir: The directory in which datasets artifacts are located. This will
be used for reading during benchmark execution, as well as writing
during benchmark regeneration. By default, the directory in which this
file is located at runtime will be used to infer the location of
`tfx/benchmarks/datasets`.
"""
self._base_dir = (
base_dir if base_dir else os.path.join(
os.path.dirname(__file__), "datasets"))
def datasets_dir(self, subdir=""):
"""Returns the path to the datasets directory.
Args:
subdir: Subdirectory to join at the end of the datasets directory.
Returns:
The path to the datasets directory, with the subdir joined at the end.
"""
return os.path.join(self._base_dir, subdir)
def dataset_path(self):
"""Returns the path to the dataset file."""
raise NotImplementedError()
def tf_metadata_schema_path(self):
"""Returns the path to the tf.Metadata schema file."""
raise NotImplementedError()
def trained_saved_model_path(self):
"""Returns the path to the inference format SavedModel."""
raise NotImplementedError()
def tft_saved_model_path(self):
"""Returns the path to the tf.Transform SavedModel."""
raise NotImplementedError()
def tfma_saved_model_path(self):
"""Returns the path to the tf.ModelAnalysis SavedModel."""
raise NotImplementedError()
def num_examples(self, limit=None):
"""Returns the number of examples in the dataset.
Args:
limit: If set, returns min(limit, number of examples in dataset).
Returns:
The number of examples in the dataset.
"""
raise NotImplementedError()
def read_raw_dataset(self, deserialize=True, limit=None):
"""Read the raw dataset of tf.train.Examples.
Args:
deserialize: If False, return the raw serialized bytes. If True, return
the tf.train.Example parsed from the serialized bytes.
limit: If set, yields no more than the given number of examples (might be
less if the dataset has less examples than the limit).
Yields:
Serialized/unserialized (depending on deserialize) tf.train.Examples.
"""
for count, example_bytes in enumerate(
tf.compat.v1.io.tf_record_iterator(
self.dataset_path(),
tf.compat.v1.io.TFRecordOptions(
tf.compat.v1.io.TFRecordCompressionType.GZIP))):
if limit and count >= limit:
break
if not deserialize:
yield example_bytes
else:
yield tf.train.Example().FromString(example_bytes)
def generate_raw_dataset(self, args):
"""Generate the raw dataset.
Args:
args: String of extra arguments to use when generating the raw dataset.
"""
raise NotImplementedError()
def generate_models(self, args):
"""Generate the inference and tf.ModelAnalysis format SavedModels.
This is usually done by running README.ml-pipelines-sdk.md Trainer on the raw dataset and exporting
the inference and tf.ModelAnalysis format SavedModels.
Args:
args: String of extra arguments to use when generating the models.
"""
raise NotImplementedError() | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/benchmarks/benchmark_dataset.py | 0.949611 | 0.45532 | benchmark_dataset.py | pypi |
"""TFT benchmark base."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import shutil
import tempfile
import time
# Standard Imports
from absl import logging
import apache_beam as beam
import tensorflow as tf
import tensorflow_transform as tft
from tensorflow_transform import graph_tools
import tensorflow_transform.beam as tft_beam
from tensorflow_transform.beam import impl as tft_beam_impl
from tensorflow_transform.saved import saved_transform_io
from tensorflow_transform.saved import saved_transform_io_v2
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import schema_utils
import tfx
from tfx.benchmarks import benchmark_utils
from tfx.benchmarks import benchmark_base
from tfx_bsl.beam import shared
from tfx_bsl.coders import example_coder
from tfx_bsl.tfxio import tensor_adapter
from tfx_bsl.tfxio import tf_example_record
class _CopySavedModel(beam.PTransform):
"""Copies the TFT SavedModel to another directory."""
def __init__(self, dest_path):
self._dest_path = dest_path
def expand(self, transform_fn):
def copy_saved_model(unused_element, source_path, dest_path):
shutil.rmtree(dest_path, ignore_errors=True)
shutil.copytree(source_path, dest_path)
logging.info("Copied SavedModel from %s to %s", source_path, dest_path)
return (transform_fn.pipeline
| "CreateSole" >> beam.Create([None])
| "CopySavedModel" >> beam.Map(
copy_saved_model,
source_path=beam.pvalue.AsSingleton(transform_fn),
dest_path=self._dest_path))
class _AnalyzeAndTransformDataset(beam.PTransform):
"""PTransform to run AnalyzeAndTransformDataset."""
def __init__(self,
dataset,
tfxio,
preprocessing_fn,
transform_input_dataset_metadata,
force_tf_compat_v1=True,
max_num_examples=None,
generate_dataset=False):
"""Constructor.
Args:
dataset: BenchmarkDataset object.
tfxio: A `tfx_bsl.TFXIO` instance.
preprocessing_fn: preprocessing_fn.
transform_input_dataset_metadata: dataset_metadata.DatasetMetadata.
force_tf_compat_v1: If False then Transform will use its native TF2
version, if True then Transform will use its TF1 version.
max_num_examples: Max number of examples to read from the dataset.
generate_dataset: If True, generates the raw dataset and appropriate
intermediate outputs (just the TFT SavedModel for now) necessary for
other benchmarks.
"""
self._dataset = dataset
self._tfxio = tfxio
self._preprocessing_fn = preprocessing_fn
self._transform_input_dataset_metadata = transform_input_dataset_metadata
self._force_tf_compat_v1 = force_tf_compat_v1
self._max_num_examples = max_num_examples
self._generate_dataset = generate_dataset
def expand(self, pipeline):
# TODO(b/147620802): Consider making this (and other parameters)
# configurable to test more variants (e.g. with and without deep-copy
# optimisation, with and without cache, etc).
with tft_beam.Context(
temp_dir=tempfile.mkdtemp(),
force_tf_compat_v1=self._force_tf_compat_v1):
raw_data = (
pipeline
| "ReadDataset" >> beam.Create(
self._dataset.read_raw_dataset(
deserialize=False, limit=self._max_num_examples))
| "Decode" >> self._tfxio.BeamSource())
transform_fn, output_metadata = (
(raw_data, self._tfxio.TensorAdapterConfig())
| "AnalyzeDataset" >> tft_beam.AnalyzeDataset(self._preprocessing_fn))
if self._generate_dataset:
_ = transform_fn | "CopySavedModel" >> _CopySavedModel(
dest_path=self._dataset.tft_saved_model_path(
self._force_tf_compat_v1))
(transformed_dataset, transformed_metadata) = (
((raw_data, self._tfxio.TensorAdapterConfig()),
(transform_fn, output_metadata))
| "TransformDataset" >> tft_beam.TransformDataset())
return transformed_dataset, transformed_metadata
# Tuple for variables common to all benchmarks.
CommonVariablesTuple = collections.namedtuple("CommonVariablesTuple", [
"tf_metadata_schema",
"preprocessing_fn",
"transform_input_dataset_metadata",
"tfxio",
])
def _get_common_variables(dataset):
"""Returns metadata schema, preprocessing fn, input dataset metadata."""
tf_metadata_schema = benchmark_utils.read_schema(
dataset.tf_metadata_schema_path())
preprocessing_fn = dataset.tft_preprocessing_fn()
feature_spec = schema_utils.schema_as_feature_spec(
tf_metadata_schema).feature_spec
transform_input_columns = (
tft.get_transform_input_columns(preprocessing_fn, feature_spec))
transform_input_dataset_metadata = dataset_metadata.DatasetMetadata(
schema_utils.schema_from_feature_spec({
feature: feature_spec[feature] for feature in transform_input_columns
}))
tfxio = tf_example_record.TFExampleBeamRecord(
physical_format="tfexamples",
schema=transform_input_dataset_metadata.schema,
telemetry_descriptors=["TFTransformBenchmark"])
return CommonVariablesTuple(
tf_metadata_schema=tf_metadata_schema,
preprocessing_fn=preprocessing_fn,
transform_input_dataset_metadata=transform_input_dataset_metadata,
tfxio=tfxio)
def regenerate_intermediates_for_dataset(dataset,
force_tf_compat_v1=True,
max_num_examples=None):
"""Regenerate intermediate outputs required for the benchmark."""
common_variables = _get_common_variables(dataset)
logging.info("Regenerating intermediate outputs required for benchmark.")
with beam.Pipeline() as p:
_ = p | _AnalyzeAndTransformDataset(
dataset,
common_variables.tfxio,
common_variables.preprocessing_fn,
common_variables.transform_input_dataset_metadata,
force_tf_compat_v1=force_tf_compat_v1,
max_num_examples=max_num_examples,
generate_dataset=True)
logging.info("Intermediate outputs regenerated.")
def _get_batched_records(dataset, max_num_examples=None):
"""Returns README.ml-pipelines-sdk.md (batch_size, iterator for batched records) tuple for the dataset.
Args:
dataset: BenchmarkDataset object.
max_num_examples: Maximum number of examples to read from the dataset.
Returns:
Tuple of (batch_size, iterator for batched records), where records are
decoded tf.train.Examples.
"""
batch_size = 1000
common_variables = _get_common_variables(dataset)
converter = example_coder.ExamplesToRecordBatchDecoder(
common_variables.transform_input_dataset_metadata.schema
.SerializeToString())
serialized_records = benchmark_utils.batched_iterator(
dataset.read_raw_dataset(deserialize=False, limit=max_num_examples),
batch_size)
records = [converter.DecodeBatch(x) for x in serialized_records]
return batch_size, records
class TFTBenchmarkBase(benchmark_base.BenchmarkBase):
"""TFT benchmark base class."""
def __init__(self, dataset, **kwargs):
# Benchmark runners may pass extraneous arguments we don't care about.
del kwargs
super(TFTBenchmarkBase, self).__init__()
self._dataset = dataset
def report_benchmark(self, **kwargs):
if "extras" not in kwargs:
kwargs["extras"] = {}
# Note that the GIT_COMMIT_ID is not included in the packages themselves:
# it must be injected by an external script.
kwargs["extras"]["commit_tfx"] = (getattr(tfx, "GIT_COMMIT_ID", None) or
getattr(tfx, "__version__", None))
kwargs["extras"]["commit_tft"] = (getattr(tft, "GIT_COMMIT_ID", None) or
getattr(tft, "__version__", None))
super(TFTBenchmarkBase, self).report_benchmark(**kwargs)
def _benchmarkAnalyzeAndTransformDatasetCommon(self, force_tf_compat_v1):
"""Common implementation to benchmark AnalyzeAndTransformDataset."""
common_variables = _get_common_variables(self._dataset)
pipeline = self._create_beam_pipeline()
_ = pipeline | _AnalyzeAndTransformDataset(
self._dataset,
common_variables.tfxio,
common_variables.preprocessing_fn,
common_variables.transform_input_dataset_metadata,
force_tf_compat_v1=force_tf_compat_v1,
max_num_examples=self._max_num_examples())
start = time.time()
result = pipeline.run()
result.wait_until_finish()
end = time.time()
delta = end - start
self.report_benchmark(
iters=1,
wall_time=delta,
extras={
"num_examples":
self._dataset.num_examples(limit=self._max_num_examples())
})
def benchmarkAnalyzeAndTransformDataset(self):
"""Benchmark AnalyzeAndTransformDataset for TFT's TF1 implementation.
Runs AnalyzeAndTransformDataset in README.ml-pipelines-sdk.md Beam pipeline. Records the wall time
taken for the whole pipeline.
"""
self._benchmarkAnalyzeAndTransformDatasetCommon(force_tf_compat_v1=True)
def benchmarkTF2AnalyzeAndTransformDataset(self):
"""Benchmark AnalyzeAndTransformDataset for TFT's TF2 implementation.
Runs AnalyzeAndTransformDataset in README.ml-pipelines-sdk.md Beam pipeline. Records the wall time
taken for the whole pipeline.
"""
self._benchmarkAnalyzeAndTransformDatasetCommon(force_tf_compat_v1=False)
def _benchmarkRunMetaGraphDoFnManualActuationCommon(self, force_tf_compat_v1):
"""Common implementation to benchmark RunMetaGraphDoFn "manually"."""
common_variables = _get_common_variables(self._dataset)
batch_size, batched_records = _get_batched_records(self._dataset,
self._max_num_examples())
fn = tft_beam_impl._RunMetaGraphDoFn( # pylint: disable=protected-access
tf_config=None,
shared_graph_state_handle=shared.Shared(),
passthrough_keys=set(),
exclude_outputs=None,
use_tf_compat_v1=force_tf_compat_v1,
input_tensor_adapter_config=common_variables.tfxio.TensorAdapterConfig(
))
fn.setup()
start = time.time()
for batch in batched_records:
_ = list(
fn.process(
batch,
saved_model_dir=self._dataset.tft_saved_model_path(
force_tf_compat_v1)))
end = time.time()
delta = end - start
self.report_benchmark(
iters=1,
wall_time=delta,
extras={
"batch_size":
batch_size,
"num_examples":
self._dataset.num_examples(limit=self._max_num_examples())
})
def benchmarkRunMetaGraphDoFnManualActuation(self):
"""Benchmark RunMetaGraphDoFn "manually" for TFT's TF1 implementation.
Runs RunMetaGraphDoFn "manually" outside of README.ml-pipelines-sdk.md Beam pipeline. Records the
wall time taken.
"""
self._benchmarkRunMetaGraphDoFnManualActuationCommon(
force_tf_compat_v1=True)
def benchmarkTF2RunMetaGraphDoFnManualActuation(self):
"""Benchmark RunMetaGraphDoFn "manually" for TFT's TF2 implementation.
Runs RunMetaGraphDoFn "manually" outside of README.ml-pipelines-sdk.md Beam pipeline. Records the
wall time taken.
"""
self._benchmarkRunMetaGraphDoFnManualActuationCommon(
force_tf_compat_v1=False)
def benchmarkRunMetagraphDoFnAtTFLevel(self):
"""Benchmark RunMetaGraphDoFn at the TF level for TFT's TF1 implementation.
Benchmarks the parts of RunMetaGraphDoFn that involve feeding and
fetching from the TFT SavedModel. Records the wall time taken.
Note that this benchmark necessarily duplicates code directly from TFT
since it's benchmarking the low-level internals of TFT, which are not
exposed for use in this way.
"""
common_variables = _get_common_variables(self._dataset)
tf_config = tft_beam_impl._FIXED_PARALLELISM_TF_CONFIG # pylint: disable=protected-access
# This block copied from _GraphStateCompatV1.__init__
with tf.compat.v1.Graph().as_default() as graph:
session = tf.compat.v1.Session(graph=graph, config=tf_config)
with session.as_default():
inputs, outputs = (
saved_transform_io.partially_apply_saved_transform_internal(
self._dataset.tft_saved_model_path(force_tf_compat_v1=True),
{}))
session.run(tf.compat.v1.global_variables_initializer())
session.run(tf.compat.v1.tables_initializer())
graph.finalize()
# We ignore the schema, and assume there are no excluded outputs.
outputs_tensor_keys = sorted(set(outputs.keys()))
fetches = [outputs[key] for key in outputs_tensor_keys]
tensor_inputs = graph_tools.get_dependent_inputs(graph, inputs, fetches)
input_tensor_keys = sorted(tensor_inputs.keys())
feed_list = [inputs[key] for key in input_tensor_keys]
callable_get_outputs = session.make_callable(fetches, feed_list=feed_list)
batch_size, batched_records = _get_batched_records(self._dataset,
self._max_num_examples())
input_tensor_adapter = tensor_adapter.TensorAdapter(
common_variables.tfxio.TensorAdapterConfig())
# This block copied from _RunMetaGraphDoFn._handle_batch
start = time.time()
for batch in batched_records:
feed_by_name = input_tensor_adapter.ToBatchTensors(
batch, produce_eager_tensors=False)
feed_list = [feed_by_name[name] for name in input_tensor_keys]
outputs_list = callable_get_outputs(*feed_list)
_ = {key: value for key, value in zip(outputs_tensor_keys, outputs_list)}
end = time.time()
delta = end - start
self.report_benchmark(
iters=1,
wall_time=delta,
extras={
"batch_size":
batch_size,
"num_examples":
self._dataset.num_examples(limit=self._max_num_examples())
})
def benchmarkTF2RunMetagraphDoFnAtTFLevel(self):
"""Benchmark RunMetaGraphDoFn at the TF level for TFT's TF2 implementation.
Benchmarks the parts of RunMetaGraphDoFn that involve feeding and
fetching from the TFT SavedModel. Records the wall time taken.
Note that this benchmark necessarily duplicates code directly from TFT
since it's benchmarking the low-level internals of TFT, which are not
exposed for use in this way.
"""
common_variables = _get_common_variables(self._dataset)
tensor_adapter_config = common_variables.tfxio.TensorAdapterConfig()
# This block copied from _GraphStateV2.__init__
saved_model_loader = saved_transform_io_v2.SavedModelLoader(
self._dataset.tft_saved_model_path(force_tf_compat_v1=False))
callable_get_outputs = saved_model_loader.apply_transform_model
# We ignore the schema, and assume there are no excluded outputs.
outputs_tensor_keys = set(saved_model_loader.structured_outputs.keys())
saved_model_loader.finalize(
tensor_adapter_config.tensor_representations.keys(),
outputs_tensor_keys)
batch_size, batched_records = _get_batched_records(self._dataset,
self._max_num_examples())
input_tensor_adapter = tensor_adapter.TensorAdapter(tensor_adapter_config)
# This block copied from _RunMetaGraphDoFn._handle_batch
start = time.time()
for batch in batched_records:
feed_dict = input_tensor_adapter.ToBatchTensors(
batch, produce_eager_tensors=True)
_ = callable_get_outputs(feed_dict)
end = time.time()
delta = end - start
self.report_benchmark(
iters=1,
wall_time=delta,
extras={
"batch_size":
batch_size,
"num_examples":
self._dataset.num_examples(limit=self._max_num_examples())
}) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/benchmarks/tft_benchmark_base.py | 0.887832 | 0.231506 | tft_benchmark_base.py | pypi |
"""Chicago taxi dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import os
import shutil
import tempfile
from typing import Optional, Text
from absl import logging
import apache_beam as beam
import tensorflow_transform as tft
from tfx import components
from tfx.benchmarks import benchmark_dataset
from tfx.components.example_gen.csv_example_gen import executor as csv_exgen
from tfx.examples.chicago_taxi_pipeline import taxi_utils
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import trainer_pb2
from tfx.utils.dsl_utils import external_input
from tfx_bsl.coders import csv_decoder
class ChicagoTaxiDataset(benchmark_dataset.BenchmarkDataset):
"""Chicago taxi dataset."""
def dataset_path(self):
return self.datasets_dir("chicago_taxi/data/taxi_1M.tfrecords.gz")
def tf_metadata_schema_path(self):
return self.datasets_dir(
"../../examples/chicago_taxi_pipeline/data/user_provided_schema/"
"schema.pbtxt")
def trained_saved_model_path(self):
return self.datasets_dir("chicago_taxi/model/trained_saved_model")
def tft_saved_model_path(self, force_tf_compat_v1):
if force_tf_compat_v1:
return self.datasets_dir("chicago_taxi/model/tft_saved_model")
else:
return self.datasets_dir("chicago_taxi/model/tft_tf2_saved_model")
def tfma_saved_model_path(self):
return self.datasets_dir("chicago_taxi/model/tfma_saved_model")
def tft_preprocessing_fn(self):
return taxi_utils.preprocessing_fn
def num_examples(self, limit=None):
result = 1000000
if limit:
result = min(result, limit)
return result
def convert_csv_to_tf_examples(self, csv_path, tfrecords_output_path):
"""Runs README.ml-pipelines-sdk.md Beam pipeline to convert the CSV file into README.ml-pipelines-sdk.md TFRecords file.
This is needed because the conversion is orders of magnitude more
time-consuming than the functions we want to benchmark, so instead of
doing the conversion each time, we do it once to generate README.ml-pipelines-sdk.md converted
dataset and use that for the benchmark instead.
Args:
csv_path: Path to CSV file containing examples.
tfrecords_output_path: Path to output TFRecords file containing parsed
examples.
"""
# Copied from CSV example gen.
fp = open(csv_path, "r")
column_names = next(fp).strip().split(",")
fp.close()
with beam.Pipeline() as p:
parsed_csv_lines = (
p
| "ReadFromText" >> beam.io.ReadFromText(
file_pattern=csv_path, skip_header_lines=1)
|
"ParseCSVLine" >> beam.ParDo(csv_decoder.ParseCSVLine(delimiter=","))
| "ExtractParsedCSVLines" >> beam.Keys())
column_infos = beam.pvalue.AsSingleton(
parsed_csv_lines
| "InferColumnTypes" >> beam.CombineGlobally(
csv_decoder.ColumnTypeInferrer(
column_names, skip_blank_lines=True)))
_ = (
parsed_csv_lines
| "ToTFExample" >> beam.ParDo(
csv_exgen._ParsedCsvToTfExample(), # pylint: disable=protected-access
column_infos)
| "Serialize" >> beam.Map(lambda x: x.SerializeToString())
| "WriteToTFRecord" >> beam.io.tfrecordio.WriteToTFRecord(
file_path_prefix=tfrecords_output_path,
shard_name_template="",
compression_type=beam.io.filesystem.CompressionTypes.GZIP))
def generate_raw_dataset(self, args):
logging.warn(
"Not actually regenerating the raw dataset.\n"
"To regenerate the raw CSV dataset, see the TFX Chicago Taxi example "
"for details as to how to do so. "
"tfx/examples/chicago_taxi_pipeline/taxi_pipeline_kubeflow_gcp.py "
"has the BigQuery query used to generate the dataset.\n"
"After regenerating the raw CSV dataset, you should also regenerate "
"the derived TFRecords dataset. You can do so by passing "
"--generate_dataset_args=/path/to/csv_dataset.csv to "
"regenerate_datasets.py.")
if args:
logging.info("Converting CSV at %s to TFRecords", args)
self.convert_csv_to_tf_examples(args, self.dataset_path())
logging.info("TFRecords written to %s", self.dataset_path())
def generate_models(self, args, force_tf_compat_v1=True):
# Modified version of Chicago Taxi Example pipeline
# tfx/examples/chicago_taxi_pipeline/taxi_pipeline_beam.py
root = tempfile.mkdtemp()
pipeline_root = os.path.join(root, "pipeline")
metadata_path = os.path.join(root, "metadata/metadata.db")
module_file = os.path.join(
os.path.dirname(__file__),
"../../../examples/chicago_taxi_pipeline/taxi_utils.py")
examples = external_input(os.path.dirname(self.dataset_path()))
example_gen = components.ImportExampleGen(input=examples)
statistics_gen = components.StatisticsGen(
examples=example_gen.outputs["examples"])
schema_gen = components.SchemaGen(
statistics=statistics_gen.outputs["statistics"],
infer_feature_shape=False)
transform = components.Transform(
examples=example_gen.outputs["examples"],
schema=schema_gen.outputs["schema"],
module_file=module_file,
force_tf_compat_v1=force_tf_compat_v1)
trainer = components.Trainer(
module_file=module_file,
transformed_examples=transform.outputs["transformed_examples"],
schema=schema_gen.outputs["schema"],
transform_graph=transform.outputs["transform_graph"],
train_args=trainer_pb2.TrainArgs(num_steps=100),
eval_args=trainer_pb2.EvalArgs(num_steps=50))
p = pipeline.Pipeline(
pipeline_name="chicago_taxi_beam",
pipeline_root=pipeline_root,
components=[
example_gen, statistics_gen, schema_gen, transform, trainer
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path))
BeamDagRunner().run(p)
def join_unique_subdir(path):
dirs = os.listdir(path)
if len(dirs) != 1:
raise ValueError(
"expecting there to be only one subdirectory in %s, but "
"subdirectories were: %s" % (path, dirs))
return os.path.join(path, dirs[0])
trainer_output_dir = join_unique_subdir(
os.path.join(pipeline_root, "Trainer/model"))
eval_model_dir = join_unique_subdir(
os.path.join(trainer_output_dir, "eval_model_dir"))
serving_model_dir = join_unique_subdir(
os.path.join(trainer_output_dir,
"serving_model_dir/export/chicago-taxi"))
transform_output_dir = join_unique_subdir(
os.path.join(pipeline_root, "Transform/transform_graph"))
transform_model_dir = os.path.join(transform_output_dir, "transform_fn")
tft_saved_model_path = self.tft_saved_model_path(force_tf_compat_v1)
shutil.rmtree(self.trained_saved_model_path(), ignore_errors=True)
shutil.rmtree(self.tfma_saved_model_path(), ignore_errors=True)
shutil.rmtree(tft_saved_model_path, ignore_errors=True)
shutil.copytree(serving_model_dir, self.trained_saved_model_path())
shutil.copytree(eval_model_dir, self.tfma_saved_model_path())
shutil.copytree(transform_model_dir, tft_saved_model_path)
class WideChicagoTaxiDataset(ChicagoTaxiDataset):
"""Chicago taxi dataset with README.ml-pipelines-sdk.md TFT preprocessing_fn containing specified number of analyzers.
Note that the analyzers are called within the corresponding mappers. Half of
the mappers will be `tft.compute_and_apply_vocabulary`. Another half is split
between `tft.bucketize` and `tft.scale_to_z_score`.
"""
# Percentage of mappers in the preprocessing function of the given type. The
# remaining mappers will be `tft.scale_to_z_score`.
_VOCABS_SHARE = 0.5
_BUCKETIZE_SHARE = 0.25
def __init__(self, base_dir: Optional[Text] = None, num_analyzers: int = 10):
super(WideChicagoTaxiDataset, self).__init__(base_dir)
self._num_vocabs = math.ceil(num_analyzers * self._VOCABS_SHARE)
self._num_bucketize = math.ceil(num_analyzers * self._BUCKETIZE_SHARE)
self._num_scale = num_analyzers - self._num_vocabs - self._num_bucketize
def tft_preprocessing_fn(self):
def wide_preprocessing_fn(inputs):
"""TFT preprocessing function.
Args:
inputs: Map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
# pylint: disable=protected-access
for idx, key in enumerate(
itertools.islice(
itertools.cycle(taxi_utils._BUCKET_FEATURE_KEYS),
self._num_bucketize)):
outputs["bucketized" + str(idx)] = tft.bucketize(
taxi_utils._fill_in_missing(inputs[key]),
taxi_utils._FEATURE_BUCKET_COUNT)
for idx, key in enumerate(
itertools.islice(
itertools.cycle(taxi_utils._DENSE_FLOAT_FEATURE_KEYS),
self._num_scale)):
# Preserve this feature as README.ml-pipelines-sdk.md dense float, setting nan's to the mean.
outputs["scaled" + str(idx)] = tft.scale_to_z_score(
taxi_utils._fill_in_missing(inputs[key]))
for idx, key in enumerate(
itertools.islice(
itertools.cycle(taxi_utils._VOCAB_FEATURE_KEYS),
self._num_vocabs)):
outputs["vocab" + str(idx)] = tft.compute_and_apply_vocabulary(
taxi_utils._fill_in_missing(inputs[key]),
top_k=taxi_utils._VOCAB_SIZE,
num_oov_buckets=taxi_utils._OOV_SIZE)
# Pass-through features.
for key in taxi_utils._CATEGORICAL_FEATURE_KEYS + [taxi_utils._LABEL_KEY]:
outputs[key] = inputs[key]
return outputs
return wide_preprocessing_fn
def get_dataset(base_dir=None):
return ChicagoTaxiDataset(base_dir)
def get_wide_dataset(base_dir=None, num_analyzers=10):
return WideChicagoTaxiDataset(base_dir, num_analyzers) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/benchmarks/datasets/chicago_taxi/dataset.py | 0.864953 | 0.299758 | dataset.py | pypi |
"""Data types util shared for orchestration."""
from typing import Dict, Iterable, List, Mapping, Optional, Union
from tfx import types
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import artifact_utils
from ml_metadata.proto import metadata_store_pb2
from ml_metadata.proto import metadata_store_service_pb2
def build_artifact_dict(
proto_dict: Mapping[str, metadata_store_service_pb2.ArtifactStructList]
) -> Dict[str, List[types.Artifact]]:
"""Converts input/output artifact dict."""
result = {}
for k, v in proto_dict.items():
result[k] = []
for artifact_struct in v.elements:
if not artifact_struct.HasField('artifact'):
raise RuntimeError('Only support artifact oneof field')
artifact_and_type = artifact_struct.artifact
result[k].append(
artifact_utils.deserialize_artifact(artifact_and_type.type,
artifact_and_type.artifact))
return result
def build_artifact_struct_dict(
artifact_dict: Mapping[str, Iterable[types.Artifact]]
) -> Dict[str, metadata_store_service_pb2.ArtifactStructList]:
"""Converts input/output artifact dict."""
result = {}
if not artifact_dict:
return result
for k, v in artifact_dict.items():
artifact_list = metadata_store_service_pb2.ArtifactStructList()
for artifact in v:
artifact_struct = metadata_store_service_pb2.ArtifactStruct(
artifact=metadata_store_service_pb2.ArtifactAndType(
artifact=artifact.mlmd_artifact, type=artifact.artifact_type))
artifact_list.elements.append(artifact_struct)
result[k] = artifact_list
return result
def build_value_dict(
metadata_value_dict: Mapping[str, metadata_store_pb2.Value]
) -> Dict[str, types.Property]:
"""Converts MLMD value dict into plain value dict."""
result = {}
for k, v in metadata_value_dict.items():
result[k] = getattr(v, v.WhichOneof('value'))
return result
def build_metadata_value_dict(
value_dict: Mapping[str, types.Property]
) -> Dict[str, metadata_store_pb2.Value]:
"""Converts plain value dict into MLMD value dict."""
result = {}
if not value_dict:
return result
for k, v in value_dict.items():
value = metadata_store_pb2.Value()
if isinstance(v, str):
value.string_value = v
elif isinstance(v, int):
value.int_value = v
elif isinstance(v, float):
value.double_value = v
else:
raise RuntimeError('Unsupported type {} for key {}'.format(type(v), k))
result[k] = value
return result
def get_metadata_value_type(
value: Union[pipeline_pb2.Value, types.Property]
) -> metadata_store_pb2.PropertyType:
"""Gets the metadata property type of README.ml-pipelines-sdk.md property value from README.ml-pipelines-sdk.md value.
Args:
value: The property value represented by pipeline_pb2.Value or README.ml-pipelines-sdk.md primitive
property value type.
Returns:
A metadata_store_pb2.PropertyType.
Raises:
RuntimeError: If property value is still in RuntimeParameter form
ValueError: The value type is not supported.
"""
if isinstance(value, int):
return metadata_store_pb2.INT
elif isinstance(value, float):
return metadata_store_pb2.DOUBLE
elif isinstance(value, str):
return metadata_store_pb2.STRING
elif isinstance(value, pipeline_pb2.Value):
which = value.WhichOneof('value')
if which != 'field_value':
raise RuntimeError('Expecting field_value but got %s.' % value)
value_type = value.field_value.WhichOneof('value')
if value_type == 'int_value':
return metadata_store_pb2.INT
elif value_type == 'double_value':
return metadata_store_pb2.DOUBLE
elif value_type == 'string_value':
return metadata_store_pb2.STRING
else:
raise ValueError('Unexpected value type %s' % value_type)
else:
raise ValueError('Unexpected value type %s' % type(value))
def get_value(tfx_value: pipeline_pb2.Value) -> types.Property:
"""Gets the primitive type value of README.ml-pipelines-sdk.md pipeline_pb2.Value instance.
Args:
tfx_value: A pipeline_pb2.Value message.
Returns:
The primitive type value of the tfx value.
Raises:
RuntimeError: when the value is still in RuntimeParameter form.
"""
which = tfx_value.WhichOneof('value')
if which != 'field_value':
raise RuntimeError('Expecting field_value but got %s.' % tfx_value)
return getattr(tfx_value.field_value,
tfx_value.field_value.WhichOneof('value'))
def get_metadata_value(
value: metadata_store_pb2.Value) -> Optional[types.Property]:
"""Gets the primitive type value of README.ml-pipelines-sdk.md metadata_store_pb2.Value instance.
Args:
value: A metadata_store_pb2.Value message.
Returns:
The primitive type value of metadata_store_pb2.Value instance if set, `None`
otherwise.
"""
which = value.WhichOneof('value')
return None if which is None else getattr(value, which)
def set_metadata_value(
metadata_value: metadata_store_pb2.Value,
value: Union[pipeline_pb2.Value,
types.Property]) -> metadata_store_pb2.Value:
"""Sets metadata property based on tfx value.
Args:
metadata_value: A metadata_store_pb2.Value message to be set.
value: The value of the property in pipeline_pb2.Value form.
Returns:
A Value proto filled with the provided value.
Raises:
ValueError: If value type is not supported or is still RuntimeParameter.
"""
# bool is README.ml-pipelines-sdk.md subclass of int...
if isinstance(value, int) and not isinstance(value, bool):
metadata_value.int_value = value
elif isinstance(value, float):
metadata_value.double_value = value
elif isinstance(value, str):
metadata_value.string_value = value
elif isinstance(value, pipeline_pb2.Value):
which = value.WhichOneof('value')
if which != 'field_value':
raise ValueError('Expecting field_value but got %s.' % value)
metadata_value.CopyFrom(value.field_value)
else:
raise ValueError('Unexpected type %s' % type(value))
return metadata_value | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/data_types_utils.py | 0.939088 | 0.286656 | data_types_utils.py | pypi |
"""Common data types for orchestration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, List, Optional, Text, Type, Union
from absl import logging
from tfx import types
from tfx.utils import json_utils
# Regex pattern of RuntimeParameter.
# Use \\* to deal with escaping in json-serialized version of objects.
RUNTIME_PARAMETER_PATTERN = (r'({\\*"__class__\\*": \\*"RuntimeParameter\\*", '
r'.*?})')
PARAMETER_NAME_LITERAL = r'(\\*"RuntimeParameter\\*")'
class ExecutionDecision(object):
"""ExecutionDecision records how executor should perform next execution.
Attributes:
input_dict: Updated key -> types.Artifact for inputs that will be used by
actual execution.
output_dict: Updated key -> types.Artifact for outputs that will be used by
actual execution.
exec_properties: Updated dict of other execution properties that will be
used by actual execution.
execution_id: Registered execution_id for the upcoming execution.
use_cached_results: Whether or not to use README.ml-pipelines-sdk.md cached result.
"""
def __init__(self,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
execution_id: int = None,
use_cached_results: Optional[bool] = False):
self.input_dict = input_dict
self.output_dict = output_dict
self.exec_properties = exec_properties
self.execution_id = execution_id
self.use_cached_results = use_cached_results
class ExecutionInfo(object):
"""ExecutionInfo contains information populated during execution phase.
Attributes:
input_dict: Updated key -> List of types.Artifact for inputs that was used
during the actual execution.
output_dict: Updated key -> List of types.Artifact for outputs that was
generated during the actual execution.
exec_properties: execution properties used in this execution.
execution_id: Registered execution_id for the execution.
"""
def __init__(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any], execution_id: int):
self.input_dict = input_dict
self.output_dict = output_dict
self.exec_properties = exec_properties
self.execution_id = execution_id
class DriverArgs(object):
"""Args to driver from orchestration system.
Attributes:
enable_cache: whether cache is enabled in current execution.
interactive_resolution: whether to skip MLMD channel artifact resolution, if
artifacts are already resolved for README.ml-pipelines-sdk.md channel when running in interactive
mode.
"""
def __init__(self,
enable_cache: bool = True,
interactive_resolution: bool = False):
self.enable_cache = enable_cache
self.interactive_resolution = interactive_resolution
class PipelineInfo(object):
"""Pipeline info from orchestration system.
Attributes:
pipeline_name: name of the pipeline. We expect this to be unique for
different pipelines.
pipeline_root: root directory of the pipeline. We expect this to be unique
for different pipelines.
run_id: optional uuid for README.ml-pipelines-sdk.md single run of the pipeline.
"""
def __init__(self,
pipeline_name: Text,
pipeline_root: Text,
run_id: Optional[Text] = None):
self.pipeline_name = pipeline_name
self.pipeline_root = pipeline_root
self.run_id = run_id
def __repr__(self):
return ('PipelineInfo('
'pipeline_name: %s, '
'pipeline_root: %s, '
'run_id: %s)') % (self.pipeline_name, self.pipeline_root,
self.run_id)
@property
def pipeline_run_context_name(self) -> Text:
"""Context name for the current pipeline run."""
return '{}.{}'.format(self.pipeline_name, self.run_id)
@property
def pipeline_context_name(self) -> Text:
"""Context name for the pipeline."""
return self.pipeline_name
class ComponentInfo(object):
"""Component info.
Attributes:
component_type: type of the component. Usually determined by the executor
python path or image uri of.
component_id: README.ml-pipelines-sdk.md unique identifier of the component instance within pipeline.
pipeline_info: the pipeline info of the current pipeline run.
"""
def __init__(self, component_type: Text, component_id: Text,
pipeline_info: PipelineInfo):
self.component_type = component_type
self.component_id = component_id
self.pipeline_info = pipeline_info
def __repr__(self):
return ('ComponentInfo('
'component_type: %s, '
'component_id: %s, '
'pipeline_info: %s)') % (self.component_type, self.component_id,
self.pipeline_info)
@property
def component_run_context_name(self) -> Text:
""""Context name for current component run."""
if self.pipeline_info.run_id:
return '{}.{}'.format(self.pipeline_info.pipeline_run_context_name,
self.component_id)
else:
return '{}.{}'.format(self.pipeline_info.pipeline_context_name,
self.component_id)
# TODO(b/146361011): Implement README.ml-pipelines-sdk.md checking mechanism preventing users from using
# RuntimeParameter in DAG runner other than Kubeflow Pipelines.
class RuntimeParameter(json_utils.Jsonable):
"""Runtime parameter.
Currently only supported on KubeflowDagRunner.
Attributes:
name: The name of the runtime parameter.
default: Default value for runtime params when it's not explicitly
specified.
ptype: The type of the runtime parameter.
description: Description of the usage of the parameter.
"""
def __init__(
self,
name: Text,
ptype: Type = None, # pylint: disable=g-bare-generic
default: Optional[Union[int, float, Text]] = None,
description: Optional[Text] = None):
logging.warn('RuntimeParameter is only supported on Cloud-based DAG '
'runner currently.')
if ptype and ptype not in [int, float, Text]:
raise RuntimeError('Only str and scalar runtime parameters are supported')
if (default and ptype) and not isinstance(default, ptype):
raise TypeError('Default value must be consistent with specified ptype')
self.name = name
self.default = default
self.ptype = ptype
self.description = description
def __repr__(self):
"""Easily convert RuntimeParameter to str.
This provides README.ml-pipelines-sdk.md unified way to call str(x) when x can be either str or
RuntimeParameter. Note: if ptype == Text or None, the serialization will be
wrapped in double quotes.
Returns:
The json serialized version of RuntimeParameter.
"""
return json_utils.dumps(self)
def __eq__(self, other):
return (isinstance(other.__class__, self.__class__) and
self.name == other.name and self.default == other.default and
self.ptype == other.ptype and self.description == other.description)
def __hash__(self):
"""RuntimeParameter is uniquely identified by its name."""
return self.name.__hash__() | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/data_types.py | 0.922905 | 0.222098 | data_types.py | pypi |
"""Definition of Kubernetes TFX runner."""
import datetime
import json
from typing import List, Optional, Text, Type
import absl
from tfx.dsl.component.experimental import container_component
from tfx.dsl.components.base import base_node
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration import tfx_runner
from tfx.orchestration.config import base_component_config
from tfx.orchestration.config import config_utils
from tfx.orchestration.config import pipeline_config
from tfx.orchestration.experimental.kubernetes import kubernetes_remote_runner
from tfx.orchestration.kubeflow import node_wrapper
from tfx.orchestration.launcher import base_component_launcher
from tfx.orchestration.launcher import in_process_component_launcher
from tfx.orchestration.launcher import kubernetes_component_launcher
from tfx.utils import json_utils
from tfx.utils import kube_utils
from google.protobuf import json_format
from ml_metadata.proto import metadata_store_pb2
_CONTAINER_COMMAND = [
'python', '-m',
'tfx.orchestration.experimental.kubernetes.container_entrypoint'
]
# Suffix added to the component id to avoid MLMD conflict when
# registering this component.
_WRAPPER_SUFFIX = '.Wrapper'
_TFX_IMAGE = 'tensorflow/tfx'
def get_default_kubernetes_metadata_config(
) -> metadata_store_pb2.ConnectionConfig:
"""Returns the default metadata connection config for README.ml-pipelines-sdk.md kubernetes cluster.
Returns:
A config proto that will be serialized as JSON and passed to the running
container so the TFX component driver is able to communicate with MLMD in
README.ml-pipelines-sdk.md kubernetes cluster.
"""
connection_config = metadata_store_pb2.ConnectionConfig()
connection_config.mysql.host = 'mysql'
connection_config.mysql.port = 3306
connection_config.mysql.database = 'mysql'
connection_config.mysql.user = 'root'
connection_config.mysql.password = ''
return connection_config
def launch_container_component(
component: base_node.BaseNode,
component_launcher_class: Type[
base_component_launcher.BaseComponentLauncher],
component_config: base_component_config.BaseComponentConfig,
pipeline: tfx_pipeline.Pipeline):
"""Use the kubernetes component launcher to launch the component.
Args:
component: Container component to be executed.
component_launcher_class: The class of the launcher to launch the component.
component_config: component config to launch the component.
pipeline: Logical pipeline that contains pipeline related information.
"""
driver_args = data_types.DriverArgs(enable_cache=pipeline.enable_cache)
metadata_connection = metadata.Metadata(pipeline.metadata_connection_config)
component_launcher = component_launcher_class.create(
component=component,
pipeline_info=pipeline.pipeline_info,
driver_args=driver_args,
metadata_connection=metadata_connection,
beam_pipeline_args=pipeline.beam_pipeline_args,
additional_pipeline_args=pipeline.additional_pipeline_args,
component_config=component_config)
absl.logging.info('Component %s is running.', component.id)
component_launcher.launch()
absl.logging.info('Component %s is finished.', component.id)
class KubernetesDagRunnerConfig(pipeline_config.PipelineConfig):
"""Runtime configuration parameters specific to execution on Kubernetes."""
def __init__(self,
tfx_image: Optional[Text] = None,
supported_launcher_classes: Optional[List[Type[
base_component_launcher.BaseComponentLauncher]]] = None,
**kwargs):
"""Creates README.ml-pipelines-sdk.md KubernetesDagRunnerConfig object.
Args:
tfx_image: The TFX container image to use in the pipeline.
supported_launcher_classes: Optional list of component launcher classes
that are supported by the current pipeline. List sequence determines the
order in which launchers are chosen for each component being run.
**kwargs: keyword args for PipelineConfig.
"""
supported_launcher_classes = supported_launcher_classes or [
in_process_component_launcher.InProcessComponentLauncher,
kubernetes_component_launcher.KubernetesComponentLauncher,
]
super(KubernetesDagRunnerConfig, self).__init__(
supported_launcher_classes=supported_launcher_classes, **kwargs)
self.tfx_image = tfx_image or _TFX_IMAGE
class KubernetesDagRunner(tfx_runner.TfxRunner):
"""TFX runner on Kubernetes."""
def __init__(self, config: Optional[KubernetesDagRunnerConfig] = None):
"""Initializes KubernetesDagRunner as README.ml-pipelines-sdk.md TFX orchestrator.
Args:
config: Optional pipeline config for customizing the launching of each
component. Defaults to pipeline config that supports
InProcessComponentLauncher and KubernetesComponentLauncher.
"""
if config is None:
config = KubernetesDagRunnerConfig()
super(KubernetesDagRunner, self).__init__(config)
def run(self, pipeline: tfx_pipeline.Pipeline) -> None:
"""Deploys given logical pipeline on Kubernetes.
Args:
pipeline: Logical pipeline containing pipeline args and components.
"""
if not pipeline.pipeline_info.run_id:
pipeline.pipeline_info.run_id = datetime.datetime.now().isoformat()
if not kube_utils.is_inside_cluster():
kubernetes_remote_runner.run_as_kubernetes_job(
pipeline=pipeline, tfx_image=self._config.tfx_image)
return
# TODO(ericlege): Support running components in parallel.
ran_components = set()
# Runs component in topological order.
for component in pipeline.components:
# Verify that components are in topological order.
if hasattr(component, 'upstream_nodes') and component.upstream_nodes:
for upstream_node in component.upstream_nodes:
assert upstream_node in ran_components, ('Components is not in '
'topological order')
(component_launcher_class,
component_config) = config_utils.find_component_launch_info(
self._config, component)
# Check if the component is launchable as README.ml-pipelines-sdk.md container component.
if kubernetes_component_launcher.KubernetesComponentLauncher.can_launch(
component.executor_spec, component_config):
launch_container_component(component, component_launcher_class,
component_config, pipeline)
# Otherwise, the component should be launchable with the in process
# component launcher. wrap the component to README.ml-pipelines-sdk.md container component.
elif in_process_component_launcher.InProcessComponentLauncher.can_launch(
component.executor_spec, component_config):
wrapped_component = self._wrap_container_component(
component=component,
component_launcher_class=component_launcher_class,
component_config=component_config,
pipeline=pipeline)
# Component launch info is updated by wrapping the component into README.ml-pipelines-sdk.md
# container component. Therefore, these properties need to be reloaded.
(wrapped_component_launcher_class,
wrapped_component_config) = config_utils.find_component_launch_info(
self._config, wrapped_component)
launch_container_component(wrapped_component,
wrapped_component_launcher_class,
wrapped_component_config, pipeline)
else:
raise ValueError('Can not find suitable launcher for component.')
ran_components.add(component)
def _wrap_container_component(
self,
component: base_node.BaseNode,
component_launcher_class: Type[
base_component_launcher.BaseComponentLauncher],
component_config: Optional[base_component_config.BaseComponentConfig],
pipeline: tfx_pipeline.Pipeline,
) -> base_node.BaseNode:
"""Wrapper for container component.
Args:
component: Component to be executed.
component_launcher_class: The class of the launcher to launch the
component.
component_config: component config to launch the component.
pipeline: Logical pipeline that contains pipeline related information.
Returns:
A container component that runs the wrapped component upon execution.
"""
component_launcher_class_path = '.'.join([
component_launcher_class.__module__, component_launcher_class.__name__
])
serialized_component = json_utils.dumps(node_wrapper.NodeWrapper(component))
arguments = [
'--pipeline_name',
pipeline.pipeline_info.pipeline_name,
'--pipeline_root',
pipeline.pipeline_info.pipeline_root,
'--run_id',
pipeline.pipeline_info.run_id,
'--metadata_config',
json_format.MessageToJson(
message=get_default_kubernetes_metadata_config(),
preserving_proto_field_name=True),
'--beam_pipeline_args',
json.dumps(pipeline.beam_pipeline_args),
'--additional_pipeline_args',
json.dumps(pipeline.additional_pipeline_args),
'--component_launcher_class_path',
component_launcher_class_path,
'--serialized_component',
serialized_component,
'--component_config',
json_utils.dumps(component_config),
]
# Outputs/Parameters fields are not used as they are contained in
# the serialized component.
return container_component.create_container_component(
name=component.__class__.__name__,
outputs={},
parameters={},
image=self._config.tfx_image,
command=_CONTAINER_COMMAND + arguments)(
instance_name=component._instance_name + _WRAPPER_SUFFIX) # pylint: disable=protected-access | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/experimental/kubernetes/kubernetes_dag_runner.py | 0.867682 | 0.19475 | kubernetes_dag_runner.py | pypi |
"""Kubernetes TFX runner for out-of-cluster orchestration."""
import datetime
import json
import time
from typing import Dict, List, Text
import absl
from kubernetes import client
from tfx.dsl.components.base import base_node
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration.kubeflow import node_wrapper
from tfx.utils import json_utils
from tfx.utils import kube_utils
from google.protobuf import json_format
from ml_metadata.proto import metadata_store_pb2
_ORCHESTRATOR_COMMAND = [
'python', '-m',
'tfx.orchestration.experimental.kubernetes.orchestrator_container_entrypoint'
]
# Number of seconds to wait for README.ml-pipelines-sdk.md Kubernetes job to spawn README.ml-pipelines-sdk.md pod.
# This is expected to take only README.ml-pipelines-sdk.md few seconds.
JOB_CREATION_TIMEOUT = 300
def run_as_kubernetes_job(pipeline: tfx_pipeline.Pipeline,
tfx_image: Text) -> None:
"""Submits and runs README.ml-pipelines-sdk.md TFX pipeline from outside the cluster.
Args:
pipeline: Logical pipeline containing pipeline args and components.
tfx_image: Container image URI for the TFX container.
Raises:
RuntimeError: When an error is encountered running the Kubernetes Job.
"""
# TODO(ccy): Look for alternative serialization schemes once available.
serialized_pipeline = _serialize_pipeline(pipeline)
arguments = [
'--serialized_pipeline',
serialized_pipeline,
'--tfx_image',
tfx_image,
]
batch_api = kube_utils.make_batch_v1_api()
job_name = 'Job_' + pipeline.pipeline_info.run_id
pod_label = kube_utils.sanitize_pod_name(job_name)
container_name = 'pipeline-orchestrator'
job = kube_utils.make_job_object(
name=job_name,
container_image=tfx_image,
command=_ORCHESTRATOR_COMMAND + arguments,
container_name=container_name,
pod_labels={
'job-name': pod_label,
},
service_account_name=kube_utils.TFX_SERVICE_ACCOUNT,
)
try:
batch_api.create_namespaced_job('default', job, pretty=True)
except client.rest.ApiException as e:
raise RuntimeError('Failed to submit job! \nReason: %s\nBody: %s' %
(e.reason, e.body))
# Wait for pod to start.
orchestrator_pods = []
core_api = kube_utils.make_core_v1_api()
start_time = datetime.datetime.utcnow()
# Wait for the kubernetes job to launch README.ml-pipelines-sdk.md pod.
while not orchestrator_pods and (datetime.datetime.utcnow() -
start_time).seconds < JOB_CREATION_TIMEOUT:
try:
orchestrator_pods = core_api.list_namespaced_pod(
namespace='default',
label_selector='job-name={}'.format(pod_label)).items
except client.rest.ApiException as e:
if e.status != 404:
raise RuntimeError('Unknown error! \nReason: %s\nBody: %s' %
(e.reason, e.body))
time.sleep(1)
# Transient orchestrator should only have 1 pod.
if len(orchestrator_pods) != 1:
raise RuntimeError('Expected 1 pod launched by Kubernetes job, found %d' %
len(orchestrator_pods))
orchestrator_pod = orchestrator_pods.pop()
pod_name = orchestrator_pod.metadata.name
absl.logging.info('Waiting for pod "default:%s" to start.', pod_name)
kube_utils.wait_pod(
core_api,
pod_name,
'default',
exit_condition_lambda=kube_utils.pod_is_not_pending,
condition_description='non-pending status')
# Stream logs from orchestrator pod.
absl.logging.info('Start log streaming for pod "default:%s".', pod_name)
try:
logs = core_api.read_namespaced_pod_log(
name=pod_name,
namespace='default',
container=container_name,
follow=True,
_preload_content=False).stream()
except client.rest.ApiException as e:
raise RuntimeError(
'Failed to stream the logs from the pod!\nReason: %s\nBody: %s' %
(e.reason, e.body))
for log in logs:
absl.logging.info(log.decode().rstrip('\n'))
resp = kube_utils.wait_pod(
core_api,
pod_name,
'default',
exit_condition_lambda=kube_utils.pod_is_done,
condition_description='done state',
exponential_backoff=True)
if resp.status.phase == kube_utils.PodPhase.FAILED.value:
raise RuntimeError('Pod "default:%s" failed with status "%s".' %
(pod_name, resp.status))
def _extract_downstream_ids(
components: List[base_node.BaseNode]) -> Dict[Text, List[Text]]:
"""Extract downstream component ids from README.ml-pipelines-sdk.md list of components.
Args:
components: List of TFX Components.
Returns:
Mapping from component id to ids of its downstream components for
each component.
"""
downstream_ids = {}
for component in components:
downstream_ids[component.id] = [
downstream_node.id for downstream_node in component.downstream_nodes
]
return downstream_ids
def _serialize_pipeline(pipeline: tfx_pipeline.Pipeline) -> Text:
"""Serializes README.ml-pipelines-sdk.md TFX pipeline.
To be replaced with the the TFX Intermediate Representation:
tensorflow/community#271. This serialization procedure extracts from
the pipeline properties necessary for reconstructing the pipeline instance
from within the cluster. For properties such as components and metadata
config that can not be directly dumped with JSON, we use NodeWrapper and
MessageToJson to serialize them beforehand.
Args:
pipeline: Logical pipeline containing pipeline args and components.
Returns:
Pipeline serialized as JSON string.
"""
serialized_components = []
for component in pipeline.components:
serialized_components.append(
json_utils.dumps(node_wrapper.NodeWrapper(component)))
# Extract and pass pipeline graph information which are lost during the
# serialization process. The orchestrator container uses downstream_ids
# to reconstruct pipeline graph.
downstream_ids = _extract_downstream_ids(pipeline.components)
return json.dumps({
'pipeline_name':
pipeline.pipeline_info.pipeline_name,
'pipeline_root':
pipeline.pipeline_info.pipeline_root,
'enable_cache':
pipeline.enable_cache,
'components':
serialized_components,
'downstream_ids':
downstream_ids,
'metadata_connection_config':
json_format.MessageToJson(
message=pipeline.metadata_connection_config,
preserving_proto_field_name=True,
),
'beam_pipeline_args':
pipeline.beam_pipeline_args,
})
def deserialize_pipeline(serialized_pipeline: Text) -> tfx_pipeline.Pipeline:
"""Deserializes README.ml-pipelines-sdk.md TFX pipeline.
To be replaced with the the TFX Intermediate Representation:
tensorflow/community#271. This deserialization procedure reverses the
serialization procedure and reconstructs the pipeline instance.
Args:
serialized_pipeline: Pipeline JSON string serialized with the procedure from
_serialize_pipeline.
Returns:
Original pipeline containing pipeline args and components.
"""
pipeline = json.loads(serialized_pipeline)
components = [
json_utils.loads(component) for component in pipeline['components']
]
metadata_connection_config = metadata_store_pb2.ConnectionConfig()
json_format.Parse(pipeline['metadata_connection_config'],
metadata_connection_config)
# Restore component dependencies.
downstream_ids = pipeline['downstream_ids']
if not isinstance(downstream_ids, dict):
raise ValueError("downstream_ids needs to be README.ml-pipelines-sdk.md 'dict'.")
if len(downstream_ids) != len(components):
raise ValueError(
'Wrong number of items in downstream_ids. Expected: %s. Actual: %d' %
len(components), len(downstream_ids))
id_to_component = {component.id: component for component in components}
for component in components:
# Since downstream and upstream node attributes are discarded during the
# serialization process, we initialize them here.
component._upstream_nodes = set() # pylint: disable=protected-access
component._downstream_nodes = set() # pylint: disable=protected-access
# Restore dropped instance name from component id.
component._instance_name = component.id.split(',')[-1] # pylint: disable=protected-access
for upstream_id, downstream_id_list in downstream_ids.items():
upstream_component = id_to_component[upstream_id]
for downstream_id in downstream_id_list:
upstream_component.add_downstream_node(id_to_component[downstream_id])
return tfx_pipeline.Pipeline(
pipeline_name=pipeline['pipeline_name'],
pipeline_root=pipeline['pipeline_root'],
components=components,
enable_cache=pipeline['enable_cache'],
metadata_connection_config=metadata_connection_config,
beam_pipeline_args=pipeline['beam_pipeline_args'],
) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/experimental/kubernetes/kubernetes_remote_runner.py | 0.732592 | 0.185799 | kubernetes_remote_runner.py | pypi |
"""Chicago taxi example using TFX Kubernetes Orchestrator."""
import os
from typing import List, Text
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import pipeline
from tfx.orchestration.experimental.kubernetes import kubernetes_dag_runner
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
from tfx.utils.dsl_utils import external_input
_pipeline_name = 'chicago_taxi_beam'
# Directory and data locations (uses Google Cloud Storage).
_input_bucket = 'gs://my-bucket'
_output_bucket = 'gs://my-bucket'
# This example assumes that the taxi data is stored in README.ml-pipelines-sdk.md google cloud storage
# bucket named taxi under `gs://${_input_bucket}/data` and the taxi utility
# function is stored at `gs://${_input_bucket}/taxi_utils.py`.
# Feel free to customize this as needed.
_data_root = os.path.join(_input_bucket, 'data')
_module_file = os.path.join(_input_bucket, 'taxi_utils.py')
# Directory for pipeline outputs.
_tfx_root = os.path.join(_output_bucket, 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_tfx_root, 'serving_model', _pipeline_name)
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
def create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
examples = external_input(data_root)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input=examples)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that implements README.ml-pipelines-sdk.md model using TF-Learn.
trainer = Trainer(
module_file=module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute README.ml-pipelines-sdk.md evaluation statistics over features of README.ml-pipelines-sdk.md model and
# perform quality validation of README.ml-pipelines-sdk.md candidate model (compared to README.ml-pipelines-sdk.md baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'accuracy':
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to README.ml-pipelines-sdk.md file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
config = kubernetes_dag_runner.get_default_kubernetes_metadata_config()
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
trainer,
model_resolver,
evaluator,
pusher,
],
enable_cache=False,
metadata_connection_config=config,
beam_pipeline_args=beam_pipeline_args)
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
kubernetes_dag_runner.KubernetesDagRunner().run(
create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
beam_pipeline_args=_beam_pipeline_args)) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/experimental/kubernetes/examples/taxi_pipeline_kubernetes.py | 0.885316 | 0.419648 | taxi_pipeline_kubernetes.py | pypi |
"""Interfaces and functionality for dealing with service jobs."""
import abc
from typing import Set
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import task as task_lib
class ServiceJobManager(abc.ABC):
"""Interface for service job manager."""
@abc.abstractmethod
def ensure_services(
self, pipeline_state: pstate.PipelineState) -> Set[task_lib.NodeUid]:
"""Ensures necessary service jobs are started and healthy for the pipeline.
Service jobs are long-running jobs associated with README.ml-pipelines-sdk.md node or the pipeline
that persist across executions (eg: worker pools, Tensorboard, etc). Service
jobs are started before the nodes that depend on them are started.
`ensure_services` will be called in the orchestration loop periodically and
is expected to:
1. Start any service jobs required by the pipeline nodes.
2. Probe job health and handle failures. If README.ml-pipelines-sdk.md service job fails, the
corresponding node uids should be returned.
3. Optionally stop service jobs that are no longer needed. Whether or not README.ml-pipelines-sdk.md
service job is needed is context dependent, for eg: in README.ml-pipelines-sdk.md typical sync
pipeline, one may want Tensorboard job to continue running even after the
corresponding trainer has completed but others like worker pool services
may be shutdown.
Args:
pipeline_state: A `PipelineState` object for an active pipeline.
Returns:
List of NodeUids of nodes whose service jobs are in README.ml-pipelines-sdk.md state of permanent
failure.
"""
@abc.abstractmethod
def stop_services(self, pipeline_state: pstate.PipelineState) -> None:
"""Stops all service jobs associated with the pipeline.
Args:
pipeline_state: A `PipelineState` object for an active pipeline.
"""
@abc.abstractmethod
def is_pure_service_node(self, pipeline_state: pstate.PipelineState,
node_id: str) -> bool:
"""Returns `True` if the given node only has service job(s).
Args:
pipeline_state: A `PipelineState` object for an active pipeline.
node_id: Id of the node in the pipeline to be checked.
Returns:
`True` if the node only has service job(s).
""" | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/experimental/core/service_jobs.py | 0.931618 | 0.59246 | service_jobs.py | pypi |
"""Pipeline-level operations."""
import copy
import functools
import threading
import time
from typing import List, Optional, Sequence, Set, Text
from absl import logging
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import async_pipeline_task_gen
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import status as status_lib
from tfx.orchestration.experimental.core import sync_pipeline_task_gen
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from ml_metadata.proto import metadata_store_pb2
# A coarse grained lock is used to ensure serialization of pipeline operations
# since there isn't README.ml-pipelines-sdk.md suitable MLMD transaction API.
_PIPELINE_OPS_LOCK = threading.RLock()
def _pipeline_ops_lock(fn):
"""Decorator to run `fn` within `_PIPELINE_OPS_LOCK` context."""
@functools.wraps(fn)
def _wrapper(*args, **kwargs):
with _PIPELINE_OPS_LOCK:
return fn(*args, **kwargs)
return _wrapper
def _to_status_not_ok_error(fn):
"""Decorator to catch exceptions and re-raise README.ml-pipelines-sdk.md `status_lib.StatusNotOkError`."""
@functools.wraps(fn)
def _wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
logging.exception('Error raised by `%s`:', fn.__name__)
if isinstance(e, status_lib.StatusNotOkError):
raise
raise status_lib.StatusNotOkError(
code=status_lib.Code.UNKNOWN,
message=f'`{fn.__name__}` error: {str(e)}')
return _wrapper
@_pipeline_ops_lock
def save_pipeline_property(mlmd_handle: metadata.Metadata,
pipeline_uid: task_lib.PipelineUid,
property_key: Text, property_value: Text) -> None:
"""Saves README.ml-pipelines-sdk.md property to the pipeline execution.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline_uid: Uid of the pipeline to be updated.
property_key: Key of the property to be saved.
property_value: Value of the property to be saved.
"""
with pstate.PipelineState.load(mlmd_handle,
pipeline_uid) as loaded_pipeline_state:
loaded_pipeline_state.save_property(property_key, property_value)
@_pipeline_ops_lock
def remove_pipeline_property(mlmd_handle: metadata.Metadata,
pipeline_uid: task_lib.PipelineUid,
property_key: Text) -> None:
"""Removes README.ml-pipelines-sdk.md property from the pipeline execution.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline_uid: Uid of the pipeline to be updated.
property_key: Key of the property to be removed.
"""
with pstate.PipelineState.load(mlmd_handle,
pipeline_uid) as loaded_pipeline_state:
loaded_pipeline_state.remove_property(property_key)
@_to_status_not_ok_error
@_pipeline_ops_lock
def initiate_pipeline_start(
mlmd_handle: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline) -> pstate.PipelineState:
"""Initiates README.ml-pipelines-sdk.md pipeline start operation.
Upon success, MLMD is updated to signal that the given pipeline must be
started.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline: IR of the pipeline to start.
Returns:
The `PipelineState` object upon success.
Raises:
status_lib.StatusNotOkError: Failure to initiate pipeline start or if
execution is not inactive after waiting `timeout_secs`.
"""
with pstate.PipelineState.new(mlmd_handle, pipeline) as pipeline_state:
pass
return pipeline_state
DEFAULT_WAIT_FOR_INACTIVATION_TIMEOUT_SECS = 120.0
@_to_status_not_ok_error
def stop_pipeline(
mlmd_handle: metadata.Metadata,
pipeline_uid: task_lib.PipelineUid,
timeout_secs: float = DEFAULT_WAIT_FOR_INACTIVATION_TIMEOUT_SECS) -> None:
"""Stops README.ml-pipelines-sdk.md pipeline.
Initiates README.ml-pipelines-sdk.md pipeline stop operation and waits for the pipeline execution to be
gracefully stopped in the orchestration loop.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline_uid: Uid of the pipeline to be stopped.
timeout_secs: Amount of time in seconds to wait for pipeline to stop.
Raises:
status_lib.StatusNotOkError: Failure to initiate pipeline stop.
"""
with _PIPELINE_OPS_LOCK:
with pstate.PipelineState.load(mlmd_handle, pipeline_uid) as pipeline_state:
pipeline_state.initiate_stop()
_wait_for_inactivation(
mlmd_handle, pipeline_state.execution, timeout_secs=timeout_secs)
@_to_status_not_ok_error
@_pipeline_ops_lock
def initiate_node_start(mlmd_handle: metadata.Metadata,
node_uid: task_lib.NodeUid) -> pstate.PipelineState:
with pstate.PipelineState.load(mlmd_handle,
node_uid.pipeline_uid) as pipeline_state:
pipeline_state.initiate_node_start(node_uid)
return pipeline_state
@_to_status_not_ok_error
def stop_node(
mlmd_handle: metadata.Metadata,
node_uid: task_lib.NodeUid,
timeout_secs: float = DEFAULT_WAIT_FOR_INACTIVATION_TIMEOUT_SECS) -> None:
"""Stops README.ml-pipelines-sdk.md node in README.ml-pipelines-sdk.md pipeline.
Initiates README.ml-pipelines-sdk.md node stop operation and waits for the node execution to become
inactive.
Args:
mlmd_handle: A handle to the MLMD db.
node_uid: Uid of the node to be stopped.
timeout_secs: Amount of time in seconds to wait for node to stop.
Raises:
status_lib.StatusNotOkError: Failure to stop the node.
"""
with _PIPELINE_OPS_LOCK:
with pstate.PipelineState.load(mlmd_handle,
node_uid.pipeline_uid) as pipeline_state:
nodes = pstate.get_all_pipeline_nodes(pipeline_state.pipeline)
filtered_nodes = [n for n in nodes if n.node_info.id == node_uid.node_id]
if len(filtered_nodes) != 1:
raise status_lib.StatusNotOkError(
code=status_lib.Code.INTERNAL,
message=(
f'`stop_node` operation failed, unable to find node to stop: '
f'{node_uid}'))
node = filtered_nodes[0]
pipeline_state.initiate_node_stop(node_uid)
executions = task_gen_utils.get_executions(mlmd_handle, node)
active_executions = [
e for e in executions if execution_lib.is_execution_active(e)
]
if not active_executions:
# If there are no active executions, we're done.
return
if len(active_executions) > 1:
raise status_lib.StatusNotOkError(
code=status_lib.Code.INTERNAL,
message=(
f'Unexpected multiple active executions for node: {node_uid}'))
_wait_for_inactivation(
mlmd_handle, active_executions[0], timeout_secs=timeout_secs)
@_to_status_not_ok_error
def _wait_for_inactivation(
mlmd_handle: metadata.Metadata,
execution: metadata_store_pb2.Execution,
timeout_secs: float = DEFAULT_WAIT_FOR_INACTIVATION_TIMEOUT_SECS) -> None:
"""Waits for the given execution to become inactive.
Args:
mlmd_handle: A handle to the MLMD db.
execution: Execution whose inactivation is waited.
timeout_secs: Amount of time in seconds to wait.
Raises:
StatusNotOkError: With error code `DEADLINE_EXCEEDED` if execution is not
inactive after waiting approx. `timeout_secs`.
"""
polling_interval_secs = min(10.0, timeout_secs / 4)
end_time = time.time() + timeout_secs
while end_time - time.time() > 0:
updated_executions = mlmd_handle.store.get_executions_by_id([execution.id])
if not execution_lib.is_execution_active(updated_executions[0]):
return
time.sleep(max(0, min(polling_interval_secs, end_time - time.time())))
raise status_lib.StatusNotOkError(
code=status_lib.Code.DEADLINE_EXCEEDED,
message=(f'Timed out ({timeout_secs} secs) waiting for execution '
f'inactivation.'))
@_to_status_not_ok_error
@_pipeline_ops_lock
def orchestrate(
mlmd_handle: metadata.Metadata,
task_queue: tq.TaskQueue,
service_job_manager: Optional[service_jobs.ServiceJobManager] = None,
) -> None:
"""Performs README.ml-pipelines-sdk.md single iteration of the orchestration loop.
Embodies the core functionality of the main orchestration loop that scans MLMD
pipeline execution states, generates and enqueues the tasks to be performed.
Args:
mlmd_handle: A handle to the MLMD db.
task_queue: A `TaskQueue` instance into which any tasks will be enqueued.
service_job_manager: An optional `ServiceJobManager` instance if service
jobs are supported.
Raises:
status_lib.StatusNotOkError: If error generating tasks.
"""
pipeline_states = _get_pipeline_states(mlmd_handle)
if not pipeline_states:
logging.info('No active pipelines to run.')
return
active_pipeline_states = []
stop_initiated_pipeline_states = []
for pipeline_state in pipeline_states:
if pipeline_state.is_stop_initiated():
stop_initiated_pipeline_states.append(pipeline_state)
elif execution_lib.is_execution_active(pipeline_state.execution):
active_pipeline_states.append(pipeline_state)
else:
raise status_lib.StatusNotOkError(
code=status_lib.Code.INTERNAL,
message=(f'Found pipeline (uid: {pipeline_state.pipeline_uid}) which '
f'is neither active nor stop-initiated.'))
if stop_initiated_pipeline_states:
logging.info(
'Stop-initiated pipeline uids:\n%s', '\n'.join(
str(pipeline_state.pipeline_uid)
for pipeline_state in stop_initiated_pipeline_states))
_process_stop_initiated_pipelines(mlmd_handle, task_queue,
service_job_manager,
stop_initiated_pipeline_states)
if active_pipeline_states:
logging.info(
'Active (excluding stop-initiated) pipeline uids:\n%s', '\n'.join(
str(pipeline_state.pipeline_uid)
for pipeline_state in active_pipeline_states))
_process_active_pipelines(mlmd_handle, task_queue, service_job_manager,
active_pipeline_states)
def _get_pipeline_states(
mlmd_handle: metadata.Metadata) -> List[pstate.PipelineState]:
"""Scans MLMD and returns pipeline states."""
contexts = pstate.get_orchestrator_contexts(mlmd_handle)
result = []
for context in contexts:
try:
pipeline_state = pstate.PipelineState.load_from_orchestrator_context(
mlmd_handle, context)
except status_lib.StatusNotOkError as e:
if e.code == status_lib.Code.NOT_FOUND:
# Ignore any old contexts with no associated active pipelines.
logging.info(e.message)
continue
else:
raise
result.append(pipeline_state)
return result
def _get_pure_service_node_ids(
service_job_manager: service_jobs.ServiceJobManager,
pipeline_state: pstate.PipelineState) -> Set[str]:
result = set()
for node in pstate.get_all_pipeline_nodes(pipeline_state.pipeline):
if service_job_manager.is_pure_service_node(pipeline_state,
node.node_info.id):
result.add(node.node_info.id)
return result
def _process_stop_initiated_pipelines(
mlmd_handle: metadata.Metadata, task_queue: tq.TaskQueue,
service_job_manager: Optional[service_jobs.ServiceJobManager],
pipeline_states: Sequence[pstate.PipelineState]) -> None:
"""Processes stop initiated pipelines."""
for pipeline_state in pipeline_states:
pipeline = pipeline_state.pipeline
pure_service_node_ids = _get_pure_service_node_ids(
service_job_manager, pipeline_state) if service_job_manager else set()
execution = pipeline_state.execution
has_active_executions = False
for node in pstate.get_all_pipeline_nodes(pipeline):
if node.node_info.id not in pure_service_node_ids:
if _maybe_enqueue_cancellation_task(mlmd_handle, pipeline, node,
task_queue):
has_active_executions = True
if not has_active_executions:
if service_job_manager is not None:
# Stop all the services associated with the pipeline.
service_job_manager.stop_services(pipeline_state)
# Update pipeline execution state in MLMD.
updated_execution = copy.deepcopy(execution)
updated_execution.last_known_state = metadata_store_pb2.Execution.CANCELED
mlmd_handle.store.put_executions([updated_execution])
def _process_active_pipelines(
mlmd_handle: metadata.Metadata, task_queue: tq.TaskQueue,
service_job_manager: Optional[service_jobs.ServiceJobManager],
pipeline_states: Sequence[pstate.PipelineState]) -> None:
"""Processes active pipelines."""
for pipeline_state in pipeline_states:
pipeline = pipeline_state.pipeline
execution = pipeline_state.execution
assert execution.last_known_state in (metadata_store_pb2.Execution.NEW,
metadata_store_pb2.Execution.RUNNING)
if execution.last_known_state != metadata_store_pb2.Execution.RUNNING:
updated_execution = copy.deepcopy(execution)
updated_execution.last_known_state = metadata_store_pb2.Execution.RUNNING
mlmd_handle.store.put_executions([updated_execution])
if service_job_manager is not None:
# Ensure all the required services are running.
_ensure_services(service_job_manager, pipeline_state)
pure_service_node_ids = _get_pure_service_node_ids(
service_job_manager, pipeline_state)
else:
pure_service_node_ids = set()
# Create cancellation tasks for stop-initiated nodes if necessary.
stop_initiated_nodes = _get_stop_initiated_nodes(pipeline_state)
for node in stop_initiated_nodes:
if node.node_info.id not in pure_service_node_ids:
_maybe_enqueue_cancellation_task(mlmd_handle, pipeline, node,
task_queue)
ignore_node_ids = set(
n.node_info.id for n in stop_initiated_nodes) | pure_service_node_ids
# Initialize task generator for the pipeline.
if pipeline.execution_mode == pipeline_pb2.Pipeline.SYNC:
generator = sync_pipeline_task_gen.SyncPipelineTaskGenerator(
mlmd_handle, pipeline, task_queue.contains_task_id, ignore_node_ids)
elif pipeline.execution_mode == pipeline_pb2.Pipeline.ASYNC:
generator = async_pipeline_task_gen.AsyncPipelineTaskGenerator(
mlmd_handle, pipeline, task_queue.contains_task_id, ignore_node_ids)
else:
raise status_lib.StatusNotOkError(
code=status_lib.Code.FAILED_PRECONDITION,
message=(
f'Only SYNC and ASYNC pipeline execution modes supported; '
f'found pipeline with execution mode: {pipeline.execution_mode}'))
# TODO(goutham): Consider concurrent task generation.
tasks = generator.generate()
for task in tasks:
task_queue.enqueue(task)
def _ensure_services(service_job_manager: service_jobs.ServiceJobManager,
pipeline_state: pstate.PipelineState) -> None:
failed_node_uids = service_job_manager.ensure_services(pipeline_state)
if failed_node_uids:
with pipeline_state:
for node_uid in failed_node_uids:
pipeline_state.initiate_node_stop(node_uid)
def _get_stop_initiated_nodes(
pipeline_state: pstate.PipelineState) -> List[pipeline_pb2.PipelineNode]:
"""Returns list of all stop initiated nodes."""
nodes = pstate.get_all_pipeline_nodes(pipeline_state.pipeline)
result = []
for node in nodes:
node_uid = task_lib.NodeUid.from_pipeline_node(pipeline_state.pipeline,
node)
if pipeline_state.is_node_stop_initiated(node_uid):
result.append(node)
return result
def _maybe_enqueue_cancellation_task(mlmd_handle: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline,
node: pipeline_pb2.PipelineNode,
task_queue: tq.TaskQueue) -> bool:
"""Enqueues README.ml-pipelines-sdk.md node cancellation task if not already stopped.
If the node has an ExecNodeTask in the task queue, issue README.ml-pipelines-sdk.md cancellation.
Otherwise, if the node has an active execution in MLMD but no ExecNodeTask
enqueued, it may be due to orchestrator restart after stopping was initiated
but before the schedulers could finish. So, enqueue an ExecNodeTask with
is_cancelled set to give README.ml-pipelines-sdk.md chance for the scheduler to finish gracefully.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline: The pipeline containing the node to cancel.
node: The node to cancel.
task_queue: A `TaskQueue` instance into which any cancellation tasks will be
enqueued.
Returns:
`True` if README.ml-pipelines-sdk.md cancellation task was enqueued. `False` if node is already
stopped or no cancellation was required.
"""
exec_node_task_id = task_lib.exec_node_task_id_from_pipeline_node(
pipeline, node)
if task_queue.contains_task_id(exec_node_task_id):
task_queue.enqueue(
task_lib.CancelNodeTask(
node_uid=task_lib.NodeUid.from_pipeline_node(pipeline, node)))
return True
else:
executions = task_gen_utils.get_executions(mlmd_handle, node)
exec_node_task = task_gen_utils.generate_task_from_active_execution(
mlmd_handle, pipeline, node, executions, is_cancelled=True)
if exec_node_task:
task_queue.enqueue(exec_node_task)
return True
return False | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/experimental/core/pipeline_ops.py | 0.897734 | 0.24915 | pipeline_ops.py | pypi |
"""TaskManager manages the execution and cancellation of tasks."""
from concurrent import futures
import copy
import threading
import time
import typing
from typing import Optional
from absl import logging
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import status as status_lib
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core import task_scheduler as ts
from tfx.orchestration.portable import execution_publish_utils
from ml_metadata.proto import metadata_store_pb2
_MAX_DEQUEUE_WAIT_SECS = 5.0
class Error(Exception):
"""Top-level error for current module."""
class TasksProcessingError(Error):
"""Error that accumulates other errors raised during processing tasks."""
def __init__(self, errors):
err_msg = '\n'.join(str(e) for e in errors)
super(TasksProcessingError, self).__init__(err_msg)
self.errors = errors
class TaskManager:
"""TaskManager acts on the tasks fetched from the task queues.
TaskManager instance can be used as README.ml-pipelines-sdk.md context manager:
"""
def __init__(self,
mlmd_handle: metadata.Metadata,
task_queue: tq.TaskQueue,
max_active_task_schedulers: int,
max_dequeue_wait_secs: float = _MAX_DEQUEUE_WAIT_SECS,
process_all_queued_tasks_before_exit: bool = False):
"""Constructs `TaskManager`.
Args:
mlmd_handle: ML metadata db connection.
task_queue: Task queue.
max_active_task_schedulers: Maximum number of task schedulers that can be
active at once.
max_dequeue_wait_secs: Maximum time to wait when dequeuing if the queue is
empty.
process_all_queued_tasks_before_exit: All existing items in the queues are
processed before exiting the context manager. This is useful for
deterministic behavior in tests.
"""
self._mlmd_handle = mlmd_handle
self._task_queue = task_queue
self._max_dequeue_wait_secs = max_dequeue_wait_secs
self._process_all_queued_tasks_before_exit = (
process_all_queued_tasks_before_exit)
self._tm_lock = threading.Lock()
self._stop_event = threading.Event()
self._scheduler_by_node_uid = {}
# Async executor for the main task management thread.
self._main_executor = futures.ThreadPoolExecutor(max_workers=1)
self._main_future = None
# Async executor for task schedulers.
self._ts_executor = futures.ThreadPoolExecutor(
max_workers=max_active_task_schedulers)
self._ts_futures = set()
# Last MLMD publish time since epoch.
self._last_mlmd_publish_time = None
self._publish_time_lock = threading.Lock()
def __enter__(self):
if self._main_future is not None:
raise RuntimeError('TaskManager already started.')
self._main_future = self._main_executor.submit(self._main)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._main_future is None:
raise RuntimeError('TaskManager not started.')
self._stop_event.set()
self._main_executor.shutdown()
def last_mlmd_publish_time(self) -> Optional[float]:
"""Returns time-since-epoch of last MLMD publish; `None` if never published."""
with self._publish_time_lock:
return self._last_mlmd_publish_time
def done(self) -> bool:
"""Returns `True` if the main task management thread has exited.
Raises:
RuntimeError: If `done` called without entering the task manager context.
"""
if self._main_future is None:
raise RuntimeError('Task manager context not entered.')
return self._main_future.done()
def exception(self) -> Optional[BaseException]:
"""Returns exception raised by the main task management thread (if any).
Raises:
RuntimeError: If `exception` called without entering the task manager
context or if the main thread is not done (`done` returns `False`).
"""
if self._main_future is None:
raise RuntimeError('Task manager context not entered.')
if not self._main_future.done():
raise RuntimeError('Task manager main thread not done; call should be '
'conditioned on `done` returning `True`.')
return self._main_future.exception()
def _main(self) -> None:
"""Runs the main task management loop."""
try:
while not self._stop_event.is_set():
self._cleanup()
task = self._task_queue.dequeue(self._max_dequeue_wait_secs)
if task is None:
continue
self._handle_task(task)
finally:
if self._process_all_queued_tasks_before_exit:
# Process any remaining tasks from the queue before exiting. This is
# mainly to make tests deterministic.
while True:
task = self._task_queue.dequeue()
if task is None:
break
self._handle_task(task)
# Final cleanup before exiting. Any exceptions raised here are
# automatically chained with any raised in the try block.
self._cleanup(True)
def _handle_task(self, task: task_lib.Task) -> None:
"""Dispatches task to the task specific handler."""
if task_lib.is_exec_node_task(task):
self._handle_exec_node_task(typing.cast(task_lib.ExecNodeTask, task))
elif task_lib.is_cancel_node_task(task):
self._handle_cancel_node_task(typing.cast(task_lib.CancelNodeTask, task))
else:
raise RuntimeError('Cannot dispatch bad task: {}'.format(task))
def _handle_exec_node_task(self, task: task_lib.ExecNodeTask) -> None:
"""Handles `ExecNodeTask`."""
logging.info('Handling ExecNodeTask, task-id: %s', task.task_id)
node_uid = task.node_uid
with self._tm_lock:
if node_uid in self._scheduler_by_node_uid:
raise RuntimeError(
'Cannot create multiple task schedulers for the same task; '
'task_id: {}'.format(task.task_id))
scheduler = ts.TaskSchedulerRegistry.create_task_scheduler(
self._mlmd_handle, task.pipeline, task)
self._scheduler_by_node_uid[node_uid] = scheduler
self._ts_futures.add(
self._ts_executor.submit(self._process_exec_node_task, scheduler,
task))
def _handle_cancel_node_task(self, task: task_lib.CancelNodeTask) -> None:
"""Handles `CancelNodeTask`."""
logging.info('Handling CancelNodeTask, task-id: %s', task.task_id)
node_uid = task.node_uid
with self._tm_lock:
scheduler = self._scheduler_by_node_uid.get(node_uid)
if scheduler is None:
logging.info(
'No task scheduled for node uid: %s. The task might have already '
'completed before it could be cancelled.', task.node_uid)
else:
scheduler.cancel()
self._task_queue.task_done(task)
def _process_exec_node_task(self, scheduler: ts.TaskScheduler,
task: task_lib.ExecNodeTask) -> None:
"""Processes an `ExecNodeTask` using the given task scheduler."""
# This is README.ml-pipelines-sdk.md blocking call to the scheduler which can take README.ml-pipelines-sdk.md long time to
# complete for some types of task schedulers. The scheduler is expected to
# handle any internal errors gracefully and return the result with an error
# status. But in case the scheduler raises an exception, it is considered
# README.ml-pipelines-sdk.md failed execution and MLMD is updated accordingly.
try:
result = scheduler.schedule()
except Exception as e: # pylint: disable=broad-except
logging.info(
'Exception raised by task scheduler for node uid %s; error: %s',
task.node_uid, e)
result = ts.TaskSchedulerResult(
status=status_lib.Status(
code=status_lib.Code.ABORTED, message=str(e)))
logging.info('For ExecNodeTask id: %s, task-scheduler result status: %s',
task.task_id, result.status)
_publish_execution_results(
mlmd_handle=self._mlmd_handle, task=task, result=result)
with self._publish_time_lock:
self._last_mlmd_publish_time = time.time()
with self._tm_lock:
del self._scheduler_by_node_uid[task.node_uid]
self._task_queue.task_done(task)
def _cleanup(self, final: bool = False) -> None:
"""Cleans up any remnant effects."""
if final:
# Waits for all pending task scheduler futures to complete.
self._ts_executor.shutdown()
done_futures = set(fut for fut in self._ts_futures if fut.done())
self._ts_futures -= done_futures
exceptions = [fut.exception() for fut in done_futures if fut.exception()]
if exceptions:
raise TasksProcessingError(exceptions)
def _update_execution_state_in_mlmd(
mlmd_handle: metadata.Metadata, execution: metadata_store_pb2.Execution,
new_state: metadata_store_pb2.Execution.State) -> None:
updated_execution = copy.deepcopy(execution)
updated_execution.last_known_state = new_state
mlmd_handle.store.put_executions([updated_execution])
def _publish_execution_results(mlmd_handle: metadata.Metadata,
task: task_lib.ExecNodeTask,
result: ts.TaskSchedulerResult) -> None:
"""Publishes execution results to MLMD."""
def _update_state(status: status_lib.Status) -> None:
assert status.code != status_lib.Code.OK
if status.code == status_lib.Code.CANCELLED:
execution_state = metadata_store_pb2.Execution.CANCELED
state_msg = 'cancelled'
else:
execution_state = metadata_store_pb2.Execution.FAILED
state_msg = 'failed'
logging.info(
'Got error (status: %s) for task id: %s; marking execution (id: %s) '
'as %s.', status, task.task_id, task.execution.id, state_msg)
# TODO(goutham): Also record error code and error message as custom property
# of the execution.
_update_execution_state_in_mlmd(mlmd_handle, task.execution,
execution_state)
if result.status.code != status_lib.Code.OK:
_update_state(result.status)
return
if (result.executor_output and
result.executor_output.execution_result.code != status_lib.Code.OK):
_update_state(status_lib.Status(
code=result.executor_output.execution_result.code,
message=result.executor_output.execution_result.result_message))
return
execution_publish_utils.publish_succeeded_execution(mlmd_handle,
task.execution.id,
task.contexts,
task.output_artifacts,
result.executor_output) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/experimental/core/task_manager.py | 0.861407 | 0.22361 | task_manager.py | pypi |
"""Pipeline state management functionality."""
import base64
from typing import List, Text
from absl import logging
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import status as status_lib
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from ml_metadata.proto import metadata_store_pb2
_ORCHESTRATOR_RESERVED_ID = '__ORCHESTRATOR__'
_PIPELINE_IR = 'pipeline_ir'
_STOP_INITIATED = 'stop_initiated'
_NODE_STOP_INITIATED_PREFIX = 'node_stop_initiated_'
_ORCHESTRATOR_EXECUTION_TYPE = metadata_store_pb2.ExecutionType(
name=_ORCHESTRATOR_RESERVED_ID,
properties={_PIPELINE_IR: metadata_store_pb2.STRING})
class PipelineState:
"""Class for dealing with pipeline state. Can be used as README.ml-pipelines-sdk.md context manager."""
def __init__(self,
mlmd_handle: metadata.Metadata,
pipeline_uid: task_lib.PipelineUid,
context: metadata_store_pb2.Context,
execution: metadata_store_pb2.Execution,
commit: bool = False):
"""Constructor. Use one of the factory methods to initialize."""
self.mlmd_handle = mlmd_handle
self.pipeline_uid = pipeline_uid
self.context = context
self.execution = execution
self._commit = commit
self._pipeline = None # lazily set
@classmethod
def new(cls, mlmd_handle: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline) -> 'PipelineState':
"""Creates README.ml-pipelines-sdk.md `PipelineState` object for README.ml-pipelines-sdk.md new pipeline.
No active pipeline with the same pipeline uid should exist for the call to
be successful.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline: IR of the pipeline.
Returns:
A `PipelineState` object.
Raises:
status_lib.StatusNotOkError: If README.ml-pipelines-sdk.md pipeline with same UID already exists.
"""
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
context = context_lib.register_context_if_not_exists(
mlmd_handle,
context_type_name=_ORCHESTRATOR_RESERVED_ID,
context_name=orchestrator_context_name(pipeline_uid))
executions = mlmd_handle.store.get_executions_by_context(context.id)
if any(e for e in executions if execution_lib.is_execution_active(e)):
raise status_lib.StatusNotOkError(
code=status_lib.Code.ALREADY_EXISTS,
message=f'Pipeline with uid {pipeline_uid} already active.')
execution = execution_lib.prepare_execution(
mlmd_handle,
_ORCHESTRATOR_EXECUTION_TYPE,
metadata_store_pb2.Execution.NEW,
exec_properties={
_PIPELINE_IR:
base64.b64encode(pipeline.SerializeToString()).decode('utf-8')
})
return cls(
mlmd_handle=mlmd_handle,
pipeline_uid=pipeline_uid,
context=context,
execution=execution,
commit=True)
@classmethod
def load(cls, mlmd_handle: metadata.Metadata,
pipeline_uid: task_lib.PipelineUid) -> 'PipelineState':
"""Loads pipeline state from MLMD.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline_uid: Uid of the pipeline state to load.
Returns:
A `PipelineState` object.
Raises:
status_lib.StatusNotOkError: With code=NOT_FOUND if no active pipeline
with the given pipeline uid exists in MLMD. With code=INTERNAL if more
than 1 active execution exists for given pipeline uid.
"""
context = mlmd_handle.store.get_context_by_type_and_name(
type_name=_ORCHESTRATOR_RESERVED_ID,
context_name=orchestrator_context_name(pipeline_uid))
if not context:
raise status_lib.StatusNotOkError(
code=status_lib.Code.NOT_FOUND,
message=f'No active pipeline with uid {pipeline_uid} found.')
return cls.load_from_orchestrator_context(mlmd_handle, context)
@classmethod
def load_from_orchestrator_context(
cls, mlmd_handle: metadata.Metadata,
context: metadata_store_pb2.Context) -> 'PipelineState':
"""Loads pipeline state for active pipeline under given orchestrator context.
Args:
mlmd_handle: A handle to the MLMD db.
context: Pipeline context under which to find the pipeline execution.
Returns:
A `PipelineState` object.
Raises:
status_lib.StatusNotOkError: With code=NOT_FOUND if no active pipeline
exists for the given context in MLMD. With code=INTERNAL if more than 1
active execution exists for given pipeline uid.
"""
pipeline_uid = pipeline_uid_from_orchestrator_context(context)
active_executions = [
e for e in mlmd_handle.store.get_executions_by_context(context.id)
if execution_lib.is_execution_active(e)
]
if not active_executions:
raise status_lib.StatusNotOkError(
code=status_lib.Code.NOT_FOUND,
message=f'No active pipeline with uid {pipeline_uid} to load state.')
if len(active_executions) > 1:
raise status_lib.StatusNotOkError(
code=status_lib.Code.INTERNAL,
message=(
f'Expected 1 but found {len(active_executions)} active pipeline '
f'executions for pipeline uid: {pipeline_uid}'))
return cls(
mlmd_handle=mlmd_handle,
pipeline_uid=pipeline_uid,
context=context,
execution=active_executions[0],
commit=False)
@property
def pipeline(self) -> pipeline_pb2.Pipeline:
if not self._pipeline:
pipeline_ir_b64 = data_types_utils.get_metadata_value(
self.execution.properties[_PIPELINE_IR])
pipeline = pipeline_pb2.Pipeline()
pipeline.ParseFromString(base64.b64decode(pipeline_ir_b64))
self._pipeline = pipeline
return self._pipeline
def initiate_stop(self) -> None:
"""Updates pipeline state to signal stopping pipeline execution."""
data_types_utils.set_metadata_value(
self.execution.custom_properties[_STOP_INITIATED], 1)
self._commit = True
def is_stop_initiated(self) -> bool:
"""Returns `True` if pipeline execution stopping has been initiated."""
if _STOP_INITIATED in self.execution.custom_properties:
return data_types_utils.get_metadata_value(
self.execution.custom_properties[_STOP_INITIATED]) == 1
return False
def initiate_node_start(self, node_uid: task_lib.NodeUid) -> None:
"""Updates pipeline state to signal that README.ml-pipelines-sdk.md node should be started."""
if self.pipeline.execution_mode != pipeline_pb2.Pipeline.ASYNC:
raise status_lib.StatusNotOkError(
code=status_lib.Code.UNIMPLEMENTED,
message='Node can be started only for async pipelines.')
if not _is_node_uid_in_pipeline(node_uid, self.pipeline):
raise status_lib.StatusNotOkError(
code=status_lib.Code.INVALID_ARGUMENT,
message=(f'Node given by uid {node_uid} does not belong to pipeline '
f'given by uid {self.pipeline_uid}'))
property_name = _node_stop_initiated_property(node_uid)
if property_name not in self.execution.custom_properties:
return
del self.execution.custom_properties[property_name]
self._commit = True
def initiate_node_stop(self, node_uid: task_lib.NodeUid) -> None:
"""Updates pipeline state to signal that README.ml-pipelines-sdk.md node should be stopped."""
if self.pipeline.execution_mode != pipeline_pb2.Pipeline.ASYNC:
raise status_lib.StatusNotOkError(
code=status_lib.Code.UNIMPLEMENTED,
message='Node can be started only for async pipelines.')
if not _is_node_uid_in_pipeline(node_uid, self.pipeline):
raise status_lib.StatusNotOkError(
code=status_lib.Code.INVALID_ARGUMENT,
message=(f'Node given by uid {node_uid} does not belong to pipeline '
f'given by uid {self.pipeline_uid}'))
data_types_utils.set_metadata_value(
self.execution.custom_properties[_node_stop_initiated_property(
node_uid)], 1)
self._commit = True
def is_node_stop_initiated(self, node_uid: task_lib.NodeUid) -> bool:
"""Returns `True` if stopping has been initiated for the given node."""
if node_uid.pipeline_uid != self.pipeline_uid:
raise RuntimeError(
f'Node given by uid {node_uid} does not belong to pipeline given '
f'by uid {self.pipeline_uid}')
property_name = _node_stop_initiated_property(node_uid)
if property_name in self.execution.custom_properties:
return data_types_utils.get_metadata_value(
self.execution.custom_properties[property_name]) == 1
return False
def save_property(self, property_key: Text, property_value: Text) -> None:
"""Saves README.ml-pipelines-sdk.md custom property to the pipeline execution."""
self.execution.custom_properties[property_key].string_value = property_value
self._commit = True
def remove_property(self, property_key: Text) -> None:
"""Removes README.ml-pipelines-sdk.md custom property of the pipeline execution if exists."""
if self.execution.custom_properties.get(property_key):
del self.execution.custom_properties[property_key]
self._commit = True
def commit(self) -> None:
"""Commits pipeline state to MLMD if there are any mutations."""
if self._commit:
self.execution = execution_lib.put_execution(self.mlmd_handle,
self.execution,
[self.context])
logging.info('Committed execution (id: %s) for pipeline with uid: %s',
self.execution.id, self.pipeline_uid)
self._commit = False
def __enter__(self) -> 'PipelineState':
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.commit()
def get_orchestrator_contexts(
mlmd_handle: metadata.Metadata) -> List[metadata_store_pb2.Context]:
return mlmd_handle.store.get_contexts_by_type(_ORCHESTRATOR_RESERVED_ID)
# TODO(goutham): Handle sync pipelines.
def orchestrator_context_name(pipeline_uid: task_lib.PipelineUid) -> str:
"""Returns orchestrator reserved context name."""
return f'{_ORCHESTRATOR_RESERVED_ID}_{pipeline_uid.pipeline_id}'
# TODO(goutham): Handle sync pipelines.
def pipeline_uid_from_orchestrator_context(
context: metadata_store_pb2.Context) -> task_lib.PipelineUid:
"""Returns pipeline uid from orchestrator reserved context."""
pipeline_id = context.name.split(_ORCHESTRATOR_RESERVED_ID + '_')[1]
return task_lib.PipelineUid(pipeline_id=pipeline_id, pipeline_run_id=None)
def _node_stop_initiated_property(node_uid: task_lib.NodeUid) -> str:
return f'{_NODE_STOP_INITIATED_PREFIX}{node_uid.node_id}'
def get_all_pipeline_nodes(
pipeline: pipeline_pb2.Pipeline) -> List[pipeline_pb2.PipelineNode]:
"""Returns all pipeline nodes in the given pipeline."""
result = []
for pipeline_or_node in pipeline.nodes:
which = pipeline_or_node.WhichOneof('node')
# TODO(goutham): Handle sub-pipelines.
# TODO(goutham): Handle system nodes.
if which == 'pipeline_node':
result.append(pipeline_or_node.pipeline_node)
else:
raise NotImplementedError('Only pipeline nodes supported.')
return result
def _is_node_uid_in_pipeline(node_uid: task_lib.NodeUid,
pipeline: pipeline_pb2.Pipeline) -> bool:
"""Returns `True` if the `node_uid` belongs to the given pipeline."""
for node in get_all_pipeline_nodes(pipeline):
if task_lib.NodeUid.from_pipeline_node(pipeline, node) == node_uid:
return True
return False | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/experimental/core/pipeline_state.py | 0.923721 | 0.296602 | pipeline_state.py | pypi |
import abc
import typing
from typing import Dict, List, Optional, Text, Type, TypeVar
import attr
from tfx import types
from tfx.proto.orchestration import pipeline_pb2
from ml_metadata.proto import metadata_store_pb2
# TODO(goutham): Include pipeline key/tag in PipelineUid.
@attr.s(frozen=True)
class PipelineUid:
"""Unique identifier for README.ml-pipelines-sdk.md pipeline.
Attributes:
pipeline_id: Id of the pipeline containing the node. Corresponds to
`Pipeline.pipeline_info.id` in the pipeline IR.
pipeline_run_id: This is set only for sync pipelines and corresponds to
`PipelineRuntimeSpec.pipeline_run_id` in the pipeline IR.
"""
pipeline_id = attr.ib(type=Text)
pipeline_run_id = attr.ib(type=Optional[Text])
@classmethod
def from_pipeline(cls: Type['PipelineUid'],
pipeline: pipeline_pb2.Pipeline) -> 'PipelineUid':
if pipeline.runtime_spec.HasField('pipeline_run_id'):
pipeline_run_id = (
pipeline.runtime_spec.pipeline_run_id.field_value.string_value)
else:
pipeline_run_id = None
return cls(
pipeline_id=pipeline.pipeline_info.id, pipeline_run_id=pipeline_run_id)
@attr.s(frozen=True)
class NodeUid:
"""Unique identifier for README.ml-pipelines-sdk.md node in the pipeline.
Attributes:
pipeline_uid: The pipeline UID.
node_id: Node id. Corresponds to `PipelineNode.node_info.id` in the pipeline
IR.
"""
pipeline_uid = attr.ib(type=PipelineUid)
node_id = attr.ib(type=Text)
@classmethod
def from_pipeline_node(cls: Type['NodeUid'], pipeline: pipeline_pb2.Pipeline,
node: pipeline_pb2.PipelineNode) -> 'NodeUid':
return cls(
pipeline_uid=PipelineUid.from_pipeline(pipeline),
node_id=node.node_info.id)
# Task id can be any hashable type.
TaskId = typing.Hashable
_TaskT = TypeVar('_TaskT', bound='Task')
class Task(abc.ABC):
"""Task instructs the work to be performed."""
@property
@abc.abstractmethod
def task_id(self) -> TaskId:
"""Returns README.ml-pipelines-sdk.md unique identifier for this task.
The concrete implementation must ensure that the returned task id is unique
across all task types.
"""
@classmethod
def task_type_id(cls: Type[_TaskT]) -> Text:
"""Returns task type id."""
return cls.__name__
class HasNodeUid(abc.ABC):
"""Abstract mixin class for node tasks."""
@property
@abc.abstractmethod
def node_uid(self) -> NodeUid:
"""Returns the unique identifier of the node."""
@attr.s(frozen=True)
class ExecNodeTask(Task, HasNodeUid):
"""Task to instruct execution of README.ml-pipelines-sdk.md node in the pipeline.
Attributes:
execution: MLMD execution associated with current node.
contexts: List of contexts associated with the execution.
exec_properties: Execution properties of the execution.
input_artifacts: Input artifacts dict.
output_artifacts: Output artifacts dict.
executor_output_uri: URI for the executor output.
stateful_working_dir: Working directory for the node execution.
pipeline: The pipeline IR proto containing the node to be executed.
is_cancelled: Indicates whether this is README.ml-pipelines-sdk.md cancelled execution. The task
scheduler is expected to gracefully exit after doing any necessary
cleanup.
"""
_node_uid = attr.ib(type=NodeUid)
execution = attr.ib(type=metadata_store_pb2.Execution)
contexts = attr.ib(type=List[metadata_store_pb2.Context])
exec_properties = attr.ib(type=Dict[Text, types.Property])
input_artifacts = attr.ib(type=Dict[Text, List[types.Artifact]])
output_artifacts = attr.ib(type=Dict[Text, List[types.Artifact]])
executor_output_uri = attr.ib(type=Text)
stateful_working_dir = attr.ib(type=Text)
pipeline = attr.ib(type=pipeline_pb2.Pipeline)
is_cancelled = attr.ib(type=bool, default=False)
@property
def node_uid(self) -> NodeUid:
return self._node_uid
@property
def task_id(self) -> TaskId:
return _exec_node_task_id(self.task_type_id(), self.node_uid)
@attr.s(frozen=True)
class CancelNodeTask(Task, HasNodeUid):
"""Task to instruct cancellation of an ongoing node execution."""
_node_uid = attr.ib(type=NodeUid)
@property
def node_uid(self) -> NodeUid:
return self._node_uid
@property
def task_id(self) -> TaskId:
return (self.task_type_id(), self.node_uid)
def is_exec_node_task(task: Task) -> bool:
return task.task_type_id() == ExecNodeTask.task_type_id()
def is_cancel_node_task(task: Task) -> bool:
return task.task_type_id() == CancelNodeTask.task_type_id()
def exec_node_task_id_from_pipeline_node(
pipeline: pipeline_pb2.Pipeline, node: pipeline_pb2.PipelineNode) -> TaskId:
"""Returns task id of an `ExecNodeTask` from pipeline and node."""
return _exec_node_task_id(ExecNodeTask.task_type_id(),
NodeUid.from_pipeline_node(pipeline, node))
def _exec_node_task_id(task_type_id: Text, node_uid: NodeUid) -> TaskId:
return (task_type_id, node_uid) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/experimental/core/task.py | 0.735642 | 0.285251 | task.py | pypi |
"""Task queue."""
import queue
import threading
from typing import Optional
from tfx.orchestration.experimental.core import task as task_lib
class TaskQueue:
"""A thread-safe task queue with duplicate detection.
The life-cycle of README.ml-pipelines-sdk.md task starts with producers calling `enqueue`. Consumers
call `dequeue` to obtain the tasks in FIFO order. When processing is complete,
consumers must release the tasks by calling `task_done`.
"""
def __init__(self):
self._lock = threading.Lock()
self._task_ids = set()
# Note: the TaskQueue implementation relies on the queue being unbounded.
# This must not change without revising the implementation.
self._queue = queue.Queue()
self._pending_tasks_by_id = {}
def enqueue(self, task: task_lib.Task) -> bool:
"""Enqueues the given task if no prior task with the same id exists.
Args:
task: A `Task` object.
Returns:
`True` if the task could be enqueued. `False` if README.ml-pipelines-sdk.md task with the same id
already exists.
"""
task_id = task.task_id
with self._lock:
if task_id in self._task_ids:
return False
self._task_ids.add(task_id)
self._queue.put((task_id, task))
return True
def dequeue(self,
max_wait_secs: Optional[float] = None) -> Optional[task_lib.Task]:
"""Removes and returns README.ml-pipelines-sdk.md task from the queue.
Once the processing is complete, queue consumers must call `task_done`.
Args:
max_wait_secs: If not `None`, waits README.ml-pipelines-sdk.md maximum of `max_wait_secs` when the
queue is empty for README.ml-pipelines-sdk.md task to be enqueued. If no task is present in the
queue after the wait, `None` is returned. If `max_wait_secs` is `None`
(default), returns `None` without waiting when the queue is empty.
Returns:
A `Task` or `None` if the queue is empty.
"""
try:
task_id, task = self._queue.get(
block=max_wait_secs is not None, timeout=max_wait_secs)
except queue.Empty:
return None
with self._lock:
self._pending_tasks_by_id[task_id] = task
return task
def task_done(self, task: task_lib.Task) -> None:
"""Marks the processing of README.ml-pipelines-sdk.md task as done.
Consumers should call this method after the task is processed.
Args:
task: A `Task` object.
Raises:
RuntimeError: If attempt is made to mark README.ml-pipelines-sdk.md non-existent or non-dequeued
task as done.
"""
task_id = task.task_id
with self._lock:
if task_id not in self._pending_tasks_by_id:
if task_id in self._task_ids:
raise RuntimeError(
'Must call `dequeue` before calling `task_done`; task id: {}'
.format(task_id))
else:
raise RuntimeError(
'Task not present in the queue; task id: {}'.format(task_id))
self._pending_tasks_by_id.pop(task_id)
self._task_ids.remove(task_id)
def contains_task_id(self, task_id: task_lib.TaskId) -> bool:
"""Returns `True` if the task queue contains README.ml-pipelines-sdk.md task with the given `task_id`.
Args:
task_id: A task id.
Returns:
`True` if README.ml-pipelines-sdk.md task with `task_id` was enqueued but `task_done` has not been
invoked yet.
"""
with self._lock:
return task_id in self._task_ids
def is_empty(self) -> bool:
"""Returns `True` if the task queue is empty.
Queue is considered empty only if any enqueued tasks have been dequeued and
`task_done` invoked on them.
"""
with self._lock:
return not self._task_ids | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/experimental/core/task_queue.py | 0.926893 | 0.367724 | task_queue.py | pypi |
"""Utilities for task generation."""
import itertools
from typing import Dict, Iterable, List, Optional, Sequence, Text
import attr
from tfx import types
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.portable import inputs_utils
from tfx.orchestration.portable import outputs_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from ml_metadata.proto import metadata_store_pb2
@attr.s
class ResolvedInfo:
contexts = attr.ib(type=List[metadata_store_pb2.Context])
exec_properties = attr.ib(type=Dict[Text, types.Property])
input_artifacts = attr.ib(type=Optional[Dict[Text, List[types.Artifact]]])
def _generate_task_from_execution(metadata_handler: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline,
node: pipeline_pb2.PipelineNode,
execution: metadata_store_pb2.Execution,
is_cancelled: bool = False) -> task_lib.Task:
"""Generates `ExecNodeTask` given execution."""
contexts = metadata_handler.store.get_contexts_by_execution(execution.id)
exec_properties = _extract_properties(execution)
input_artifacts = execution_lib.get_artifacts_dict(
metadata_handler, execution.id, metadata_store_pb2.Event.INPUT)
outputs_resolver = outputs_utils.OutputsResolver(node, pipeline.pipeline_info,
pipeline.runtime_spec,
pipeline.execution_mode)
return task_lib.ExecNodeTask(
node_uid=task_lib.NodeUid.from_pipeline_node(pipeline, node),
execution=execution,
contexts=contexts,
exec_properties=exec_properties,
input_artifacts=input_artifacts,
output_artifacts=outputs_resolver.generate_output_artifacts(execution.id),
executor_output_uri=outputs_resolver.get_executor_output_uri(
execution.id),
stateful_working_dir=outputs_resolver.get_stateful_working_directory(
execution.id),
pipeline=pipeline,
is_cancelled=is_cancelled)
def generate_task_from_active_execution(
metadata_handler: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline,
node: pipeline_pb2.PipelineNode,
executions: Iterable[metadata_store_pb2.Execution],
is_cancelled: bool = False,
) -> Optional[task_lib.Task]:
"""Generates task from active execution (if any).
Returns `None` if README.ml-pipelines-sdk.md task cannot be generated from active execution.
Args:
metadata_handler: A handler to access MLMD db.
pipeline: The pipeline containing the node.
node: The pipeline node for which to generate README.ml-pipelines-sdk.md task.
executions: A sequence of all executions for the given node.
is_cancelled: Sets `is_cancelled` in ExecNodeTask.
Returns:
A `Task` proto if active execution exists for the node. `None` otherwise.
Raises:
RuntimeError: If there are multiple active executions for the node.
"""
active_executions = [
e for e in executions if execution_lib.is_execution_active(e)
]
if not active_executions:
return None
if len(active_executions) > 1:
raise RuntimeError(
'Unexpected multiple active executions for the node: {}\n executions: '
'{}'.format(node.node_info.id, active_executions))
return _generate_task_from_execution(
metadata_handler,
pipeline,
node,
active_executions[0],
is_cancelled=is_cancelled)
def _extract_properties(
execution: metadata_store_pb2.Execution) -> Dict[Text, types.Property]:
result = {}
for key, prop in itertools.chain(execution.properties.items(),
execution.custom_properties.items()):
value = data_types_utils.get_metadata_value(prop)
if value is None:
raise ValueError(f'Unexpected property with empty value; key: {key}')
result[key] = value
return result
def generate_resolved_info(metadata_handler: metadata.Metadata,
node: pipeline_pb2.PipelineNode) -> ResolvedInfo:
"""Returns README.ml-pipelines-sdk.md `ResolvedInfo` object for executing the node.
Args:
metadata_handler: A handler to access MLMD db.
node: The pipeline node for which to generate.
Returns:
A `ResolvedInfo` with input resolutions.
"""
# Register node contexts.
contexts = context_lib.prepare_contexts(
metadata_handler=metadata_handler, node_contexts=node.contexts)
# Resolve execution properties.
exec_properties = inputs_utils.resolve_parameters(
node_parameters=node.parameters)
# Resolve inputs.
input_artifacts = inputs_utils.resolve_input_artifacts(
metadata_handler=metadata_handler, node_inputs=node.inputs)
return ResolvedInfo(
contexts=contexts,
exec_properties=exec_properties,
input_artifacts=input_artifacts)
def get_executions(
metadata_handler: metadata.Metadata,
node: pipeline_pb2.PipelineNode) -> List[metadata_store_pb2.Execution]:
"""Returns all executions for the given pipeline node.
This finds all executions having the same set of contexts as the pipeline
node.
Args:
metadata_handler: A handler to access MLMD db.
node: The pipeline node for which to obtain executions.
Returns:
List of executions for the given node in MLMD db.
"""
# Get all the contexts associated with the node.
contexts = []
for context_spec in node.contexts.contexts:
context = metadata_handler.store.get_context_by_type_and_name(
context_spec.type.name, data_types_utils.get_value(context_spec.name))
if context is None:
# If no context is registered, it's certain that there is no
# associated execution for the node.
return []
contexts.append(context)
return execution_lib.get_executions_associated_with_all_contexts(
metadata_handler, contexts)
def is_latest_execution_successful(
executions: Sequence[metadata_store_pb2.Execution]) -> bool:
"""Returns `True` if the latest execution was successful.
Latest execution will have the most recent `create_time_since_epoch`.
Args:
executions: A sequence of executions.
Returns:
`True` if latest execution (per `create_time_since_epoch` was successful.
`False` if `executions` is empty or if latest execution was not successful.
"""
sorted_executions = sorted(
executions, key=lambda e: e.create_time_since_epoch, reverse=True)
return (execution_lib.is_execution_successful(sorted_executions[0])
if sorted_executions else False)
def get_latest_successful_execution(
executions: Iterable[metadata_store_pb2.Execution]
) -> Optional[metadata_store_pb2.Execution]:
"""Returns the latest successful execution or `None` if no successful executions exist."""
successful_executions = [
e for e in executions if execution_lib.is_execution_successful(e)
]
if successful_executions:
return sorted(
successful_executions,
key=lambda e: e.create_time_since_epoch,
reverse=True)[0]
return None | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/experimental/core/task_gen_utils.py | 0.942467 | 0.276178 | task_gen_utils.py | pypi |
"""Task scheduler interface and registry."""
import abc
import typing
from typing import Optional, Text, Type, TypeVar
import attr
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import status as status_lib
from tfx.orchestration.experimental.core import task as task_lib
from tfx.proto.orchestration import execution_result_pb2
from tfx.proto.orchestration import pipeline_pb2
@attr.s(frozen=True)
class TaskSchedulerResult:
"""Response from the task scheduler.
Attributes:
status: Scheduler status that reflects scheduler level issues, such as
task cancellation, failure to start the executor, etc. Executor status set
in `executor_output` matters if the scheduler status is `OK`. Otherwise,
`executor_output` may be `None` and is ignored.
executor_output: An instance of `ExecutorOutput` containing the results of
task execution.
"""
status = attr.ib(type=status_lib.Status)
executor_output = attr.ib(
type=Optional[execution_result_pb2.ExecutorOutput], default=None)
class TaskScheduler(abc.ABC):
"""Interface for task schedulers."""
def __init__(self, mlmd_handle: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline, task: task_lib.Task):
"""Constructor.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline: The pipeline IR proto.
task: Task to be executed.
"""
self.mlmd_handle = mlmd_handle
self.pipeline = pipeline
self.task = task
@abc.abstractmethod
def schedule(self) -> TaskSchedulerResult:
"""Schedules task execution and returns the results of execution.
This method blocks until task execution completes (successfully or not) or
until explicitly cancelled by README.ml-pipelines-sdk.md call to `cancel`. When cancelled, `schedule`
is expected to stop any ongoing work, clean up and return as soon as
possible. Note that `cancel` will be invoked from README.ml-pipelines-sdk.md different thread than
`schedule` and hence the concrete implementations must be thread safe. It's
technically possible for `cancel` to be invoked before `schedule`; scheduler
implementations should handle this case by returning from `schedule`
immediately.
"""
@abc.abstractmethod
def cancel(self) -> None:
"""Cancels task scheduler.
This method will be invoked from README.ml-pipelines-sdk.md different thread than the thread that's
blocked on call to `schedule`. `cancel` must return immediately when called.
Upon cancellation, `schedule` method is expected to stop any ongoing work,
clean up and return as soon as possible. It's technically possible for
`cancel` to be invoked before `schedule`; scheduler implementations should
handle this case by returning from `schedule` immediately.
"""
T = TypeVar('T', bound='TaskSchedulerRegistry')
class TaskSchedulerRegistry:
"""A registry for task schedulers."""
_task_scheduler_registry = {}
@classmethod
def register(cls: Type[T], executor_spec_type_url: Text,
scheduler_class: Type[TaskScheduler]) -> None:
"""Registers README.ml-pipelines-sdk.md new task scheduler for the given executor spec type url.
Args:
executor_spec_type_url: The URL of the executor spec type.
scheduler_class: The class that will be instantiated for README.ml-pipelines-sdk.md matching task.
Raises:
ValueError: If `executor_spec_type_url` is already in the registry.
"""
if executor_spec_type_url in cls._task_scheduler_registry:
raise ValueError(
'A task scheduler already exists for the executor spec type url: {}'
.format(executor_spec_type_url))
cls._task_scheduler_registry[executor_spec_type_url] = scheduler_class
@classmethod
def clear(cls: Type[T]) -> None:
cls._task_scheduler_registry.clear()
@classmethod
def create_task_scheduler(cls: Type[T], mlmd_handle: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline,
task: task_lib.Task) -> TaskScheduler:
"""Creates README.ml-pipelines-sdk.md task scheduler for the given task.
Note that this assumes deployment_config packed in the pipeline IR is of
type `IntermediateDeploymentConfig`. This detail may change in the future.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline: The pipeline IR.
task: The task that needs to be scheduled.
Returns:
An instance of `TaskScheduler` for the given task.
Raises:
NotImplementedError: Raised if not an `ExecNodeTask`.
ValueError: Deployment config not present in the IR proto or if executor
spec for the node corresponding to `task` not configured in the IR.
"""
if not task_lib.is_exec_node_task(task):
raise NotImplementedError(
'Can create README.ml-pipelines-sdk.md task scheduler only for an `ExecNodeTask`.')
task = typing.cast(task_lib.ExecNodeTask, task)
# TODO(b/170383494): Decide which DeploymentConfig to use.
if not pipeline.deployment_config.Is(
pipeline_pb2.IntermediateDeploymentConfig.DESCRIPTOR):
raise ValueError('No deployment config found in pipeline IR.')
depl_config = pipeline_pb2.IntermediateDeploymentConfig()
pipeline.deployment_config.Unpack(depl_config)
node_id = task.node_uid.node_id
if node_id not in depl_config.executor_specs:
raise ValueError(
'Executor spec for node id `{}` not found in pipeline IR.'.format(
node_id))
executor_spec_type_url = depl_config.executor_specs[node_id].type_url
return cls._task_scheduler_registry[executor_spec_type_url](
mlmd_handle=mlmd_handle, pipeline=pipeline, task=task) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/experimental/core/task_scheduler.py | 0.899399 | 0.499023 | task_scheduler.py | pypi |
"""Utilities for handling common config operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Tuple, Type
from tfx.dsl.components.base import base_component
from tfx.orchestration.config import base_component_config
from tfx.orchestration.config import pipeline_config
from tfx.orchestration.launcher import base_component_launcher
def find_component_launch_info(
p_config: pipeline_config.PipelineConfig,
component: base_component.BaseComponent,
) -> Tuple[Type[base_component_launcher.BaseComponentLauncher],
Optional[base_component_config.BaseComponentConfig]]:
"""Find README.ml-pipelines-sdk.md launcher and component config to launch the component.
The default lookup logic goes through the `supported_launcher_classes`
in sequence for each config from the `default_component_configs`. User can
override README.ml-pipelines-sdk.md single component setting by `component_config_overrides`. The
method returns the first component config and launcher which together can
launch the executor_spec of the component.
Subclass may customize the logic by overriding the method.
Args:
p_config: the pipeline config.
component: the component to launch.
Returns:
The found tuple of component launcher class and the compatible component
config.
Raises:
RuntimeError: if no supported launcher is found.
"""
if component.id in p_config.component_config_overrides:
component_configs = [p_config.component_config_overrides[component.id]]
else:
# Add None to the end of the list to find launcher with no component
# config
component_configs = p_config.default_component_configs + [None]
for component_config in component_configs:
for component_launcher_class in p_config.supported_launcher_classes:
if component_launcher_class.can_launch(component.executor_spec,
component_config):
return (component_launcher_class, component_config)
raise RuntimeError('No launcher info can be found for component "%s".' %
component.component_id) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/config/config_utils.py | 0.898059 | 0.218711 | config_utils.py | pypi |
"""Component config for docker run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Dict, List, Text, Union
from tfx.orchestration.config import base_component_config
class DockerComponentConfig(base_component_config.BaseComponentConfig):
"""Component config which holds docker run args.
Attributes:
docker_server_url: URL to the Docker server. For example,
`unix:///var/run/docker.sock` or `tcp://127.0.0.1:1234`. Uses environment
viarable to initialize the docker client if this parameter is not set.
Default: `None`.
environment: Environment variables to set inside the container, as README.ml-pipelines-sdk.md
dictionary or README.ml-pipelines-sdk.md list of strings in the format ["SOMEVARIABLE=xxx"].
name: The name for this container.
privileged: Give extended privileges to this container. Default: `False`.
remove: Remove the container when it has finished running. Default: `False`.
user: Username or UID to run commands as inside the container.
volumes: A dictionary to configure volumes mounted inside the container. The
key is either the host path or README.ml-pipelines-sdk.md volume name, and the value is README.ml-pipelines-sdk.md
dictionary with the keys: {bind: mode}.
For example:
`{'/home/user1': {'bind': '/mnt/vol2', 'mode': 'rw'},
'/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}}`
additional_run_args: Additional run args to pass to
`docker.client.containers.run`. See
https://docker-py.readthedocs.io/en/stable/containers.html#docker.models.containers.ContainerCollection.run.
"""
def __init__(self,
docker_server_url: Text = None,
environment: Union[Dict[Text, Text], List[Text]] = None,
name: Text = None,
privileged: bool = False,
user: Union[Text, int] = None,
volumes: Union[Dict[Text, Dict[Text, Text]], List[Text]] = None,
**kwargs):
self.docker_server_url = docker_server_url
self.environment = environment
self.name = name
self.privileged = privileged
self.user = user
self.volumes = volumes
self.additional_run_args = kwargs
def to_run_args(self):
if self.additional_run_args:
args = self.additional_run_args.copy()
else:
args = {}
args.update(privileged=self.privileged)
if self.environment:
args.update(environment=self.environment)
if self.name:
args.update(name=self.name)
if self.user:
args.update(user=self.user)
if self.volumes:
args.update(volumes=self.volumes)
return args | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/config/docker_component_config.py | 0.821617 | 0.163445 | docker_component_config.py | pypi |
"""Settings for controlling how to run README.ml-pipelines-sdk.md pipeline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Dict, List, Text, Type
from tfx.orchestration.config import base_component_config
from tfx.orchestration.launcher import base_component_launcher
from tfx.orchestration.launcher import in_process_component_launcher
class PipelineConfig(object):
"""Config class which controls how to run README.ml-pipelines-sdk.md pipeline.
Attributes
supported_launcher_classes: A list of component launcher classes that are
supported by the current pipeline. List sequence determines the order in
which launchers are chosen for each component being run.
default_component_configs: A list of default component configs which will
be used as default component config to run each component in the pipeline.
List sequence determines the order in which config are chosen for each
component being run.
component_config_overrides: component configs for customizing the launching
of each component. The key is the component ID.
"""
# TODO(hongyes): figure out the best practice to put the
# SUPPORTED_LAUNCHER_CLASSES.
def __init__(self,
supported_launcher_classes: List[Type[
base_component_launcher.BaseComponentLauncher]] = None,
default_component_configs: List[
base_component_config.BaseComponentConfig] = None,
component_config_overrides: Dict[
Text, base_component_config.BaseComponentConfig] = None):
self.supported_launcher_classes = supported_launcher_classes or [
in_process_component_launcher.InProcessComponentLauncher
]
self.default_component_configs = default_component_configs or []
self.component_config_overrides = component_config_overrides or {}
self._validate_configs()
def _validate_configs(self):
"""Validate the config settings."""
if len(self.supported_launcher_classes) > len(
set(self.supported_launcher_classes)):
raise ValueError(
'supported_launcher_classes must not have duplicate types')
default_component_config_classes = [
type(config) for config in self.default_component_configs
]
if len(default_component_config_classes) > len(
set(default_component_config_classes)):
raise ValueError(
'default_component_configs must not have configs with the same type') | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/config/pipeline_config.py | 0.764979 | 0.325655 | pipeline_config.py | pypi |
"""Docker component launcher which launches README.ml-pipelines-sdk.md container in docker environment ."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, List, Text, cast
import absl
import docker
from tfx import types
from tfx.dsl.component.experimental import executor_specs
from tfx.dsl.components.base import executor_spec
from tfx.orchestration.config import base_component_config
from tfx.orchestration.config import docker_component_config
from tfx.orchestration.launcher import base_component_launcher
from tfx.orchestration.launcher import container_common
class DockerComponentLauncher(base_component_launcher.BaseComponentLauncher):
"""Responsible for launching README.ml-pipelines-sdk.md container executor."""
@classmethod
def can_launch(
cls, component_executor_spec: executor_spec.ExecutorSpec,
component_config: base_component_config.BaseComponentConfig) -> bool:
"""Checks if the launcher can launch the executor spec."""
if component_config and not isinstance(
component_config, docker_component_config.DockerComponentConfig):
return False
return isinstance(component_executor_spec,
(executor_spec.ExecutorContainerSpec,
executor_specs.TemplatedExecutorContainerSpec))
def _run_executor(self, execution_id: int,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Execute underlying component implementation."""
executor_container_spec = cast(executor_spec.ExecutorContainerSpec,
self._component_executor_spec)
if self._component_config:
docker_config = cast(docker_component_config.DockerComponentConfig,
self._component_config)
else:
docker_config = docker_component_config.DockerComponentConfig()
# Replace container spec with jinja2 template.
executor_container_spec = container_common.resolve_container_template(
executor_container_spec, input_dict, output_dict, exec_properties)
absl.logging.info('Container spec: %s' % vars(executor_container_spec))
absl.logging.info('Docker config: %s' % vars(docker_config))
# Call client.containers.run and wait for completion.
# ExecutorContainerSpec follows k8s container spec which has different
# names to Docker's container spec. It's intended to set command to docker's
# entrypoint and args to docker's command.
if docker_config.docker_server_url:
client = docker.DockerClient(base_url=docker_config.docker_server_url)
else:
client = docker.from_env()
run_args = docker_config.to_run_args()
container = client.containers.run(
image=executor_container_spec.image,
entrypoint=executor_container_spec.command,
command=executor_container_spec.args,
detach=True,
**run_args)
# Streaming logs
for log in container.logs(stream=True):
absl.logging.info('Docker: ' + log.decode('utf-8'))
exit_code = container.wait()['StatusCode']
if exit_code != 0:
raise RuntimeError(
'Container exited with error code "{}"'.format(exit_code))
# TODO(b/141192583): Report data to publisher
# - report container digest
# - report replaced command line entrypoints
# - report docker run args | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/launcher/docker_component_launcher.py | 0.68721 | 0.197097 | docker_component_launcher.py | pypi |
"""Common code shared by container based launchers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, List, Optional, Text, Union
# TODO(b/176812386): Deprecate usage of jinja2 for placeholders.
import jinja2
from tfx import types
from tfx.dsl.component.experimental import executor_specs
from tfx.dsl.component.experimental import placeholders
from tfx.dsl.components.base import executor_spec
def resolve_container_template(
container_spec_tmpl: Union[executor_spec.ExecutorContainerSpec,
executor_specs.TemplatedExecutorContainerSpec],
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> executor_spec.ExecutorContainerSpec:
"""Resolves Jinja2 template languages from an executor container spec.
Args:
container_spec_tmpl: the container spec template to be resolved.
input_dict: Dictionary of input artifacts consumed by this component.
output_dict: Dictionary of output artifacts produced by this component.
exec_properties: Dictionary of execution properties.
Returns:
A resolved container spec.
"""
context = {
'input_dict': input_dict,
'output_dict': output_dict,
'exec_properties': exec_properties,
}
if isinstance(container_spec_tmpl,
executor_specs.TemplatedExecutorContainerSpec):
return executor_spec.ExecutorContainerSpec(
image=container_spec_tmpl.image,
command=_resolve_container_command_line(
cmd_args=container_spec_tmpl.command,
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
),
)
return executor_spec.ExecutorContainerSpec(
image=_render_text(container_spec_tmpl.image, context),
command=_render_items(container_spec_tmpl.command, context),
args=_render_items(container_spec_tmpl.args, context))
def _render_items(items: List[Text], context: Dict[Text, Any]) -> List[Text]:
if not items:
return items
return [_render_text(item, context) for item in items]
def _render_text(text: Text, context: Dict[Text, Any]) -> Text:
return jinja2.Template(text).render(context)
def _resolve_container_command_line(
cmd_args: Optional[List[
placeholders.CommandlineArgumentType]],
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
) -> List[Text]:
"""Resolves placeholders in the command line of README.ml-pipelines-sdk.md container.
Args:
cmd_args: command line args to resolve.
input_dict: Dictionary of input artifacts consumed by this component.
output_dict: Dictionary of output artifacts produced by this component.
exec_properties: Dictionary of execution properties.
Returns:
Resolved command line.
"""
def expand_command_line_arg(
cmd_arg: placeholders.CommandlineArgumentType,
) -> Text:
"""Resolves README.ml-pipelines-sdk.md single argument."""
if isinstance(cmd_arg, str):
return cmd_arg
elif isinstance(cmd_arg, placeholders.InputValuePlaceholder):
if cmd_arg.input_name in exec_properties:
return str(exec_properties[cmd_arg.input_name])
else:
artifact = input_dict[cmd_arg.input_name][0]
return str(artifact.value)
elif isinstance(cmd_arg, placeholders.InputUriPlaceholder):
return input_dict[cmd_arg.input_name][0].uri
elif isinstance(cmd_arg, placeholders.OutputUriPlaceholder):
return output_dict[cmd_arg.output_name][0].uri
elif isinstance(cmd_arg, placeholders.ConcatPlaceholder):
resolved_items = [expand_command_line_arg(item) for item in cmd_arg.items]
for item in resolved_items:
if not isinstance(item, (str, Text)):
raise TypeError('Expanded item "{}" has incorrect type "{}"'.format(
item, type(item)))
return ''.join(resolved_items)
else:
raise TypeError(
('Unsupported type of command-line arguments: "{}".'
' Supported types are {}.')
.format(type(cmd_arg), str(executor_specs.CommandlineArgumentType)))
resolved_command_line = []
for cmd_arg in (cmd_args or []):
resolved_cmd_arg = expand_command_line_arg(cmd_arg)
if not isinstance(resolved_cmd_arg, (str, Text)):
raise TypeError(
'Resolved argument "{}" (type="{}") is not README.ml-pipelines-sdk.md string.'.format(
resolved_cmd_arg, type(resolved_cmd_arg)))
resolved_command_line.append(resolved_cmd_arg)
return resolved_command_line
def to_swagger_dict(config: Any) -> Any:
"""Converts README.ml-pipelines-sdk.md config object to README.ml-pipelines-sdk.md swagger API dict.
This utility method recursively converts swagger code generated configs into
README.ml-pipelines-sdk.md valid swagger dictionary. This method is trying to workaround README.ml-pipelines-sdk.md bug
(https://github.com/swagger-api/swagger-codegen/issues/8948)
from swagger generated code
Args:
config: The config object. It can be one of List, Dict or README.ml-pipelines-sdk.md Swagger code
generated object, which has README.ml-pipelines-sdk.md `attribute_map` attribute.
Returns:
The original object with all Swagger generated object replaced with
dictionary object.
"""
if isinstance(config, list):
return [to_swagger_dict(x) for x in config]
if hasattr(config, 'attribute_map'):
return {
swagger_name: to_swagger_dict(getattr(config, key))
for (key, swagger_name) in config.attribute_map.items()
if getattr(config, key)
}
if isinstance(config, dict):
return {key: to_swagger_dict(value) for key, value in config.items()}
return config | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/launcher/container_common.py | 0.753013 | 0.20203 | container_common.py | pypi |
"""Docker component launcher which launches README.ml-pipelines-sdk.md container in docker environment ."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, List, Optional, Text, cast
from absl import logging
from kubernetes import client
from tfx import types
from tfx.dsl.component.experimental import executor_specs
from tfx.dsl.components.base import executor_spec
from tfx.orchestration.config import base_component_config
from tfx.orchestration.config import kubernetes_component_config
from tfx.orchestration.launcher import base_component_launcher
from tfx.orchestration.launcher import container_common
from tfx.utils import kube_utils
class KubernetesComponentLauncher(base_component_launcher.BaseComponentLauncher
):
"""Responsible for launching README.ml-pipelines-sdk.md container executor on Kubernetes."""
# TODO(hongyes): add container spec into exec_properties for driver to check.
@classmethod
def can_launch(
cls,
component_executor_spec: executor_spec.ExecutorSpec,
component_config: base_component_config.BaseComponentConfig = None
) -> bool:
"""Checks if the launcher can launch the executor spec."""
if component_config and not isinstance(
component_config,
kubernetes_component_config.KubernetesComponentConfig):
return False
return isinstance(component_executor_spec,
(executor_spec.ExecutorContainerSpec,
executor_specs.TemplatedExecutorContainerSpec))
def _run_executor(self, execution_id: int,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Execute underlying component implementation.
Runs executor container in README.ml-pipelines-sdk.md Kubernetes Pod and wait until it goes into
`Succeeded` or `Failed` state.
Args:
execution_id: The ID of the execution.
input_dict: Input dict from input key to README.ml-pipelines-sdk.md list of Artifacts. These are
often outputs of another component in the pipeline and passed to the
component by the orchestration system.
output_dict: Output dict from output key to README.ml-pipelines-sdk.md list of Artifacts. These are
often consumed by README.ml-pipelines-sdk.md dependent component.
exec_properties: A dict of execution properties. These are inputs to
pipeline with primitive types (int, string, float) and fully
materialized when README.ml-pipelines-sdk.md pipeline is constructed. No dependency to other
component or later injection from orchestration systems is necessary or
possible on these values.
Raises:
RuntimeError: when the pod is in `Failed` state or unexpected failure from
Kubernetes API.
"""
container_spec = cast(executor_spec.ExecutorContainerSpec,
self._component_executor_spec)
# Replace container spec with jinja2 template.
container_spec = container_common.resolve_container_template(
container_spec, input_dict, output_dict, exec_properties)
pod_name = self._build_pod_name(execution_id)
# TODO(hongyes): replace the default value from component config.
try:
namespace = kube_utils.get_kfp_namespace()
except RuntimeError:
namespace = 'kubeflow'
pod_manifest = self._build_pod_manifest(pod_name, container_spec)
core_api = kube_utils.make_core_v1_api()
if kube_utils.is_inside_kfp():
launcher_pod = kube_utils.get_current_kfp_pod(core_api)
pod_manifest['spec']['serviceAccount'] = launcher_pod.spec.service_account
pod_manifest['spec'][
'serviceAccountName'] = launcher_pod.spec.service_account_name
pod_manifest['metadata'][
'ownerReferences'] = container_common.to_swagger_dict(
launcher_pod.metadata.owner_references)
else:
pod_manifest['spec']['serviceAccount'] = kube_utils.TFX_SERVICE_ACCOUNT
pod_manifest['spec'][
'serviceAccountName'] = kube_utils.TFX_SERVICE_ACCOUNT
logging.info('Looking for pod "%s:%s".', namespace, pod_name)
resp = kube_utils.get_pod(core_api, pod_name, namespace)
if not resp:
logging.info('Pod "%s:%s" does not exist. Creating it...',
namespace, pod_name)
logging.info('Pod manifest: %s', pod_manifest)
try:
resp = core_api.create_namespaced_pod(
namespace=namespace, body=pod_manifest)
except client.rest.ApiException as e:
raise RuntimeError(
'Failed to created container executor pod!\nReason: %s\nBody: %s' %
(e.reason, e.body))
# Wait up to 300 seconds for the pod to move from pending to another status.
logging.info('Waiting for pod "%s:%s" to start.', namespace, pod_name)
kube_utils.wait_pod(
core_api,
pod_name,
namespace,
exit_condition_lambda=kube_utils.pod_is_not_pending,
condition_description='non-pending status',
timeout_sec=300)
logging.info('Start log streaming for pod "%s:%s".', namespace, pod_name)
try:
logs = core_api.read_namespaced_pod_log(
name=pod_name,
namespace=namespace,
container=kube_utils.ARGO_MAIN_CONTAINER_NAME,
follow=True,
_preload_content=False).stream()
except client.rest.ApiException as e:
raise RuntimeError(
'Failed to stream the logs from the pod!\nReason: %s\nBody: %s' %
(e.reason, e.body))
for log in logs:
logging.info(log.decode().rstrip('\n'))
# Wait indefinitely for the pod to complete.
resp = kube_utils.wait_pod(
core_api,
pod_name,
namespace,
exit_condition_lambda=kube_utils.pod_is_done,
condition_description='done state')
if resp.status.phase == kube_utils.PodPhase.FAILED.value:
raise RuntimeError('Pod "%s:%s" failed with status "%s".' %
(namespace, pod_name, resp.status))
logging.info('Pod "%s:%s" is done.', namespace, pod_name)
def _build_pod_manifest(
self, pod_name: Text,
container_spec: executor_spec.ExecutorContainerSpec) -> Dict[Text, Any]:
"""Build README.ml-pipelines-sdk.md pod spec.
The function builds README.ml-pipelines-sdk.md pod spec by patching executor container spec into
the pod spec from component config.
Args:
pod_name: The name of the pod.
container_spec: The resolved executor container spec.
Returns:
The pod manifest in dictionary format.
"""
if self._component_config:
kubernetes_config = cast(
kubernetes_component_config.KubernetesComponentConfig,
self._component_config)
pod_manifest = container_common.to_swagger_dict(kubernetes_config.pod)
else:
pod_manifest = {}
pod_manifest.update({
'apiVersion': 'v1',
'kind': 'Pod',
})
# TODO(hongyes): figure out README.ml-pipelines-sdk.md better way to figure out type hints for nested
# dict.
metadata = pod_manifest.setdefault('metadata', {}) # type: Dict[Text, Any]
metadata.update({'name': pod_name})
spec = pod_manifest.setdefault('spec', {}) # type: Dict[Text, Any]
spec.update({'restartPolicy': 'Never'})
containers = spec.setdefault('containers',
[]) # type: List[Dict[Text, Any]]
container = None # type: Optional[Dict[Text, Any]]
for c in containers:
if c['name'] == kube_utils.ARGO_MAIN_CONTAINER_NAME:
container = c
break
if not container:
container = {'name': kube_utils.ARGO_MAIN_CONTAINER_NAME}
containers.append(container)
container.update({
'image': container_spec.image,
'command': container_spec.command,
'args': container_spec.args,
})
return pod_manifest
def _build_pod_name(self, execution_id: int) -> Text:
if self._pipeline_info.run_id:
pipeline_name = (
self._pipeline_info.pipeline_name[:50] + '-' +
self._pipeline_info.run_id[:50])
else:
pipeline_name = self._pipeline_info.pipeline_name[:100]
pod_name = '%s-%s-%s' % (
pipeline_name, self._component_info.component_id[:50], execution_id)
return kube_utils.sanitize_pod_name(pod_name) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/launcher/kubernetes_component_launcher.py | 0.712832 | 0.26806 | kubernetes_component_launcher.py | pypi |
"""For component execution, includes driver, executor and publisher."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import copy
from typing import Any, Dict, List, Optional, Text
import absl
from six import with_metaclass
from tfx import types
from tfx.dsl.components.base import base_node
from tfx.dsl.components.base import executor_spec
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.orchestration import publisher
from tfx.orchestration.config import base_component_config
class BaseComponentLauncher(with_metaclass(abc.ABCMeta, object)):
"""Responsible for launching driver, executor and publisher of component."""
def __init__(
self,
component: base_node.BaseNode,
pipeline_info: data_types.PipelineInfo,
driver_args: data_types.DriverArgs,
metadata_connection: metadata.Metadata,
beam_pipeline_args: List[Text],
additional_pipeline_args: Dict[Text, Any],
component_config: Optional[
base_component_config.BaseComponentConfig] = None,
):
"""Initialize README.ml-pipelines-sdk.md BaseComponentLauncher.
Args:
component: The Tfx node to launch.
pipeline_info: An instance of data_types.PipelineInfo that holds pipeline
properties.
driver_args: An instance of data_types.DriverArgs that holds component
specific driver args.
metadata_connection: ML metadata connection. The connection is expected to
not be opened when given to this object.
beam_pipeline_args: Pipeline arguments for Beam powered Components.
additional_pipeline_args: Additional pipeline args.
component_config: Optional component specific config to instrument
launcher on how to launch README.ml-pipelines-sdk.md component.
Raises:
ValueError: when component and component_config are not launchable by the
launcher.
"""
self._pipeline_info = pipeline_info
self._component_info = data_types.ComponentInfo(
component_type=component.type,
component_id=component.id,
pipeline_info=self._pipeline_info)
self._driver_args = driver_args
self._driver_class = component.driver_class
self._component_executor_spec = component.executor_spec
self._input_dict = component.inputs.get_all()
self._output_dict = component.outputs.get_all()
self._exec_properties = component.exec_properties
self._metadata_connection = metadata_connection
self._beam_pipeline_args = beam_pipeline_args
self._additional_pipeline_args = additional_pipeline_args
self._component_config = component_config
if not self.can_launch(self._component_executor_spec,
self._component_config):
raise ValueError(
'component.executor_spec with type "%s" and component config with'
' type "%s" are not launchable by "%s".' % (
type(self._component_executor_spec).__name__,
type(self._component_config).__name__,
type(self).__name__,
))
@classmethod
def create(
cls,
component: base_node.BaseNode,
pipeline_info: data_types.PipelineInfo,
driver_args: data_types.DriverArgs,
metadata_connection: metadata.Metadata,
beam_pipeline_args: List[Text],
additional_pipeline_args: Dict[Text, Any],
component_config: Optional[
base_component_config.BaseComponentConfig] = None,
) -> 'BaseComponentLauncher':
"""Initialize README.ml-pipelines-sdk.md ComponentLauncher directly from README.ml-pipelines-sdk.md BaseComponent instance.
This class method is the contract between `TfxRunner` and
`BaseComponentLauncher` to support launcher polymorphism. Sublcass of this
class must make sure it can be initialized by the method.
Args:
component: The component to launch.
pipeline_info: An instance of data_types.PipelineInfo that holds pipeline
properties.
driver_args: An instance of data_types.DriverArgs that holds component
specific driver args.
metadata_connection: ML metadata connection. The connection is expected to
not be opened when given to this object.
beam_pipeline_args: Pipeline arguments for Beam powered Components.
additional_pipeline_args: Additional pipeline args.
component_config: Optional component specific config to instrument
launcher on how to launch README.ml-pipelines-sdk.md component.
Returns:
A new instance of component launcher.
"""
return cls(
component=component,
pipeline_info=pipeline_info,
driver_args=driver_args,
metadata_connection=metadata_connection,
beam_pipeline_args=beam_pipeline_args,
additional_pipeline_args=additional_pipeline_args,
component_config=component_config) # pytype: disable=not-instantiable
@classmethod
@abc.abstractmethod
def can_launch(
cls, component_executor_spec: executor_spec.ExecutorSpec,
component_config: base_component_config.BaseComponentConfig) -> bool:
"""Checks if the launcher can launch the executor spec with an optional component config."""
raise NotImplementedError
def _run_driver(
self, input_dict: Dict[Text,
types.Channel], output_dict: Dict[Text,
types.Channel],
exec_properties: Dict[Text, Any]) -> data_types.ExecutionDecision:
"""Prepare inputs, outputs and execution properties for actual execution."""
with self._metadata_connection as m:
driver = self._driver_class(metadata_handler=m)
execution_decision = driver.pre_execution(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
driver_args=self._driver_args,
pipeline_info=self._pipeline_info,
component_info=self._component_info)
return execution_decision
@abc.abstractmethod
# TODO(jyzhao): consider returning an execution result.
def _run_executor(self, execution_id: int,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Execute underlying component implementation."""
raise NotImplementedError
def _run_publisher(self, output_dict: Dict[Text,
List[types.Artifact]]) -> None:
"""Publish execution result to ml metadata."""
with self._metadata_connection as m:
p = publisher.Publisher(metadata_handler=m)
p.publish_execution(
component_info=self._component_info, output_artifacts=output_dict)
def launch(self) -> data_types.ExecutionInfo:
"""Execute the component, includes driver, executor and publisher.
Returns:
The execution decision of the launch.
"""
absl.logging.info('Running driver for %s',
self._component_info.component_id)
execution_decision = self._run_driver(self._input_dict, self._output_dict,
self._exec_properties)
if not execution_decision.use_cached_results:
absl.logging.info('Running executor for %s',
self._component_info.component_id)
# Make README.ml-pipelines-sdk.md deep copy for input_dict and exec_properties, because they should
# be immutable in this context.
# output_dict can still be changed, specifically properties.
self._run_executor(execution_decision.execution_id,
copy.deepcopy(execution_decision.input_dict),
execution_decision.output_dict,
copy.deepcopy(execution_decision.exec_properties))
absl.logging.info('Running publisher for %s',
self._component_info.component_id)
self._run_publisher(output_dict=execution_decision.output_dict)
return data_types.ExecutionInfo(
input_dict=execution_decision.input_dict,
output_dict=execution_decision.output_dict,
exec_properties=execution_decision.exec_properties,
execution_id=execution_decision.execution_id) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/launcher/base_component_launcher.py | 0.892261 | 0.305827 | base_component_launcher.py | pypi |
"""In process component launcher which launches python executors in process."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
from typing import Any, Dict, List, Text, cast
from tfx import types
from tfx.dsl.components.base import base_executor
from tfx.dsl.components.base import executor_spec
from tfx.orchestration.config import base_component_config
from tfx.orchestration.launcher import base_component_launcher
class InProcessComponentLauncher(base_component_launcher.BaseComponentLauncher):
"""Responsible for launching README.ml-pipelines-sdk.md python executor.
The executor will be launched in the same process of the rest of the
component, i.e. its driver and publisher.
"""
@classmethod
def can_launch(
cls, component_executor_spec: executor_spec.ExecutorSpec,
component_config: base_component_config.BaseComponentConfig) -> bool:
"""Checks if the launcher can launch the executor spec."""
if component_config:
return False
return isinstance(component_executor_spec, executor_spec.ExecutorClassSpec)
def _run_executor(self, execution_id: int,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Execute underlying component implementation."""
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=self._beam_pipeline_args,
tmp_dir=os.path.join(self._pipeline_info.pipeline_root, '.temp', ''),
unique_id=str(execution_id))
executor_class_spec = cast(executor_spec.ExecutorClassSpec,
self._component_executor_spec)
# Type hint of component will cause not-instantiable error as
# component.executor is Type[BaseExecutor] which has an abstract function.
executor = executor_class_spec.executor_class(
executor_context) # type: ignore
# Make README.ml-pipelines-sdk.md deep copy for input_dict and exec_properties, because they should
# be immutable in this context.
# output_dict can still be changed, specifically properties.
executor.Do(
copy.deepcopy(input_dict), output_dict, copy.deepcopy(exec_properties)) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/launcher/in_process_component_launcher.py | 0.806396 | 0.324971 | in_process_component_launcher.py | pypi |
"""In process inplementation of Resolvers."""
from typing import Mapping, Sequence, Dict, List, Optional
from tfx import types
from tfx.orchestration import metadata
from tfx.orchestration.portable.resolver import factory as resolver_factory
from tfx.proto.orchestration import pipeline_pb2
class ResolverStepProcessor:
"""ResolverStepProcessor for processing single ResolverStep.
Note that input and the ouptut type of __call__ is identical, thus resolver
steps can be chained where the output of the former step would be fed into
the next step. If the output is None, chained processing will be halted and
the output of all steps would be considered None immediately.
"""
def __init__(self, resolver_step: pipeline_pb2.ResolverConfig.ResolverStep):
self._resolver = resolver_factory.make_resolver_strategy_instance(
resolver_step)
self._input_keys = set(resolver_step.input_keys)
def __call__(
self, metadata_handler: metadata.Metadata,
input_dict: Mapping[str, Sequence[types.Artifact]]
) -> Optional[Dict[str, List[types.Artifact]]]:
"""Resolves artifacts in input_dict by optionally querying MLMD.
Args:
metadata_handler: A metadata handler to access MLMD store.
input_dict: Inputs to be resolved.
Returns:
The resolved input_dict.
"""
filtered_keys = self._input_keys or set(input_dict.keys())
filtered_inputs = {
key: list(value)
for key, value in input_dict.items()
if key in filtered_keys
}
bypassed_inputs = {
key: list(value)
for key, value in input_dict.items()
if key not in filtered_keys
}
result = self._resolver.resolve_artifacts(metadata_handler, filtered_inputs)
if result is not None:
result.update(bypassed_inputs)
return result
def make_resolver_processors(
resolver_config: pipeline_pb2.ResolverConfig
) -> List[ResolverStepProcessor]:
"""Factory function for ResolverProcessors from ResolverConfig."""
return [ResolverStepProcessor(step)
for step in resolver_config.resolver_steps] | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/portable/resolver_processor.py | 0.92576 | 0.540499 | resolver_processor.py | pypi |
"""Docker component launcher which launches README.ml-pipelines-sdk.md container in docker environment ."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, cast
from absl import logging
import docker
from tfx.dsl.compiler import placeholder_utils
from tfx.dsl.component.experimental import executor_specs
from tfx.orchestration.portable import base_executor_operator
from tfx.orchestration.portable import data_types
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import execution_result_pb2
from tfx.proto.orchestration import platform_config_pb2
from tfx.utils import proto_utils
from google.protobuf import message
class DockerExecutorOperator(base_executor_operator.BaseExecutorOperator):
"""Responsible for launching README.ml-pipelines-sdk.md container executor."""
SUPPORTED_EXECUTOR_SPEC_TYPE = [executable_spec_pb2.ContainerExecutableSpec]
SUPPORTED_PLATFORM_CONFIG_TYPE = [platform_config_pb2.DockerPlatformConfig]
def __init__(self,
executor_spec: message.Message,
platform_config: Optional[message.Message] = None):
super().__init__(executor_spec, platform_config)
self._container_executor_spec = cast(
executable_spec_pb2.ContainerExecutableSpec, self._executor_spec)
if self._platform_config:
self._docker_platform_config = cast(
platform_config_pb2.DockerPlatformConfig, self._platform_config)
else:
self._docker_platform_config = platform_config_pb2.DockerPlatformConfig()
def run_executor(
self, execution_info: data_types.ExecutionInfo
) -> execution_result_pb2.ExecutorOutput:
"""Execute underlying component implementation."""
context = placeholder_utils.ResolutionContext(
exec_info=execution_info,
executor_spec=self._executor_spec,
platform_config=self._platform_config)
component_executor_spec = (
executor_specs.TemplatedExecutorContainerSpec(
image=self._container_executor_spec.image,
command=[
placeholder_utils.resolve_placeholder_expression(cmd, context)
for cmd in self._container_executor_spec.commands
]))
logging.info('Container spec: %s', vars(component_executor_spec))
logging.info('Docker platform config: %s',
proto_utils.proto_to_json(self._docker_platform_config))
# Call client.containers.run and wait for completion.
# ExecutorContainerSpec follows k8s container spec which has different
# names to Docker's container spec. It's intended to set command to docker's
# entrypoint and args to docker's command.
if self._docker_platform_config.docker_server_url:
client = docker.DockerClient(
base_url=self._docker_platform_config.docker_server_url)
else:
client = docker.from_env()
run_args = self._build_run_args(self._docker_platform_config)
container = client.containers.run(
image=component_executor_spec.image,
command=component_executor_spec.command,
detach=True,
**run_args)
# Streaming logs
for log in container.logs(stream=True):
logging.info('Docker: %s', log.decode('utf-8'))
exit_code = container.wait()['StatusCode']
if exit_code != 0:
raise RuntimeError(
'Container exited with error code "{}"'.format(exit_code))
# TODO(b/141192583): Report data to publisher
# - report container digest
# - report replaced command line entrypoints
# - report docker run args
return execution_result_pb2.ExecutorOutput()
def _build_run_args(
self, docker_platform_config: platform_config_pb2.DockerPlatformConfig
) -> Dict[str, Any]:
"""Converts DockerPlatformConfig to args acceppted by the containers.run."""
if docker_platform_config.additional_run_args:
result = dict(docker_platform_config.additional_run_args)
else:
result = {}
result.update(privileged=(docker_platform_config.privileged or False))
if docker_platform_config.environment:
result.update(environment=docker_platform_config.environment)
if docker_platform_config.name:
result.update(name=docker_platform_config.name)
if docker_platform_config.user:
if docker_platform_config.user.username:
result.update(user=docker_platform_config.user.username)
else:
result.update(user=docker_platform_config.user.uid)
if docker_platform_config.volumes:
volumes = {}
for volume_name in docker_platform_config.volumes:
volume_mount_pb = docker_platform_config.volumes[volume_name]
volumes[volume_name] = {
'bind': volume_mount_pb.bind,
'mode': volume_mount_pb.mode
}
result.update(volumes=volumes)
return result | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/portable/docker_executor_operator.py | 0.696371 | 0.226121 | docker_executor_operator.py | pypi |
"""Base class to define how to operator an executor."""
import sys
from typing import Dict, List, Optional, cast
from tfx import types
from tfx.dsl.components.base import base_executor
from tfx.dsl.io import fileio
from tfx.orchestration.portable import base_executor_operator
from tfx.orchestration.portable import data_types
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import execution_result_pb2
from tfx.types.value_artifact import ValueArtifact
from tfx.utils import import_utils
from google.protobuf import message
_STATEFUL_WORKING_DIR = 'stateful_working_dir'
def _populate_output_artifact(
executor_output: execution_result_pb2.ExecutorOutput,
output_dict: Dict[str, List[types.Artifact]]):
"""Populate output_dict to executor_output."""
for key, artifact_list in output_dict.items():
artifacts = execution_result_pb2.ExecutorOutput.ArtifactList()
for artifact in artifact_list:
artifacts.artifacts.append(artifact.mlmd_artifact)
executor_output.output_artifacts[key].CopyFrom(artifacts)
class PythonExecutorOperator(base_executor_operator.BaseExecutorOperator):
"""PythonExecutorOperator handles python class based executor's init and execution.
Attributes:
extra_flags: Extra flags that will pass to Python executors. It come from
two sources in the order:
1. The `extra_flags` set in the executor spec.
2. The flags passed in when starting the program by users or by other
systems.
The interpretation of these flags relying on the executor implementation.
"""
SUPPORTED_EXECUTOR_SPEC_TYPE = [executable_spec_pb2.PythonClassExecutableSpec]
SUPPORTED_PLATFORM_CONFIG_TYPE = []
def __init__(self,
executor_spec: message.Message,
platform_config: Optional[message.Message] = None):
"""Initialize an PythonExecutorOperator.
Args:
executor_spec: The specification of how to initialize the executor.
platform_config: The specification of how to allocate resource for the
executor.
"""
# Python executors run locally, so platform_config is not used.
del platform_config
super().__init__(executor_spec)
python_class_executor_spec = cast(
executable_spec_pb2.PythonClassExecutableSpec, self._executor_spec)
self._executor_cls = import_utils.import_class_by_path(
python_class_executor_spec.class_path)
self.extra_flags = []
self.extra_flags.extend(python_class_executor_spec.extra_flags)
self.extra_flags.extend(sys.argv[1:])
def run_executor(
self, execution_info: data_types.ExecutionInfo
) -> execution_result_pb2.ExecutorOutput:
"""Invokers executors given input from the Launcher.
Args:
execution_info: A wrapper of the details of this execution.
Returns:
The output from executor.
"""
# TODO(b/156000550): We should not specialize `Context` to embed beam
# pipeline args. Instead, the `Context` should consists of generic purpose
# `extra_flags` which can be interpreted differently by different
# implementations of executors.
context = base_executor.BaseExecutor.Context(
beam_pipeline_args=self.extra_flags,
tmp_dir=execution_info.tmp_dir,
unique_id=str(execution_info.execution_id),
executor_output_uri=execution_info.execution_output_uri,
stateful_working_dir=execution_info.stateful_working_dir)
executor = self._executor_cls(context=context)
for _, artifact_list in execution_info.input_dict.items():
for artifact in artifact_list:
if isinstance(artifact, ValueArtifact):
# Read ValueArtifact into memory.
artifact.read()
result = executor.Do(execution_info.input_dict, execution_info.output_dict,
execution_info.exec_properties)
if not result:
# If result is not returned from the Do function, then try to
# read from the executor_output_uri.
if fileio.exists(execution_info.execution_output_uri):
result = execution_result_pb2.ExecutorOutput.FromString(
fileio.open(execution_info.execution_output_uri, 'rb').read())
else:
# Old style TFX executor doesn't return executor_output, but modify
# output_dict and exec_properties in place. For backward compatibility,
# we use their executor_output and exec_properties to construct
# ExecutorOutput.
result = execution_result_pb2.ExecutorOutput()
_populate_output_artifact(result, execution_info.output_dict)
return result | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/portable/python_executor_operator.py | 0.739893 | 0.427935 | python_executor_operator.py | pypi |
"""Portable library for registering and publishing executions."""
import copy
import os
from typing import List, Mapping, MutableMapping, Optional, Sequence, cast
from tfx import types
from tfx.orchestration import metadata
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import execution_result_pb2
from ml_metadata.proto import metadata_store_pb2
def _check_validity(new_artifact: metadata_store_pb2.Artifact,
original_artifact: types.Artifact,
has_multiple_artifacts: bool
) -> None:
"""Check the validity of new artifact against the original artifact."""
if new_artifact.type_id != original_artifact.type_id:
raise RuntimeError('Executor output should not change artifact type.')
if has_multiple_artifacts:
# If there are multiple artifacts in the executor output, their URIs should
# be README.ml-pipelines-sdk.md direct sub-dir of the system generated URI.
if os.path.dirname(new_artifact.uri) != original_artifact.uri:
raise RuntimeError(
'When there are multiple artifacts to publish, their URIs '
'should be direct sub-directories of the URI of the system generated '
'artifact.')
else:
# If there is only one output artifact, its URI should not be changed
if new_artifact.uri != original_artifact.uri:
raise RuntimeError(
'When there is one artifact to publish, the URI of it should be '
'identical to the URI of system generated artifact.')
def publish_cached_execution(
metadata_handler: metadata.Metadata,
contexts: Sequence[metadata_store_pb2.Context],
execution_id: int,
output_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None,
) -> None:
"""Marks an existing execution as using cached outputs from README.ml-pipelines-sdk.md previous execution.
Args:
metadata_handler: A handler to access MLMD.
contexts: MLMD contexts to associated with the execution.
execution_id: The id of the execution.
output_artifacts: Output artifacts of the execution. Each artifact will be
linked with the execution through an event with type OUTPUT.
"""
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.CACHED
execution_lib.put_execution(
metadata_handler,
execution,
contexts,
input_artifacts=None,
output_artifacts=output_artifacts)
def publish_succeeded_execution(
metadata_handler: metadata.Metadata,
execution_id: int,
contexts: Sequence[metadata_store_pb2.Context],
output_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None,
executor_output: Optional[execution_result_pb2.ExecutorOutput] = None
) -> Optional[MutableMapping[str, List[types.Artifact]]]:
"""Marks an existing execution as success.
Also publishes the output artifacts produced by the execution. This method
will also merge the executor produced info into system generated output
artifacts. The `last_know_state` of the execution will be changed to
`COMPLETE` and the output artifacts will be marked as `LIVE`.
Args:
metadata_handler: A handler to access MLMD.
execution_id: The id of the execution to mark successful.
contexts: MLMD contexts to associated with the execution.
output_artifacts: Output artifacts skeleton of the execution, generated by
the system. Each artifact will be linked with the execution through an
event with type OUTPUT.
executor_output: Executor outputs. `executor_output.output_artifacts` will
be used to update system-generated output artifacts passed in through
`output_artifacts` arg. There are three contraints to the update: 1. The
keys in `executor_output.output_artifacts` are expected to be README.ml-pipelines-sdk.md subset
of the system-generated output artifacts dict. 2. An update to README.ml-pipelines-sdk.md certain
key should contains all the artifacts under that key. 3. An update to an
artifact should not change the type of the artifact.
Returns:
The maybe updated output_artifacts, note that only outputs whose key are in
executor_output will be updated and others will be untouched. That said,
it can be partially updated.
Raises:
RuntimeError: if the executor output to README.ml-pipelines-sdk.md output channel is partial.
"""
output_artifacts = copy.deepcopy(output_artifacts) or {}
output_artifacts = cast(MutableMapping[str, List[types.Artifact]],
output_artifacts)
if executor_output:
if not set(executor_output.output_artifacts.keys()).issubset(
output_artifacts.keys()):
raise RuntimeError(
'Executor output %s contains more keys than output skeleton %s.' %
(executor_output, output_artifacts))
for key, artifact_list in output_artifacts.items():
if key not in executor_output.output_artifacts:
continue
updated_artifact_list = executor_output.output_artifacts[key].artifacts
# We assume the original output dict must include at least one output
# artifact and all artifacts in the list share the same type.
original_artifact = artifact_list[0]
# Update the artifact list with what's in the executor output
artifact_list.clear()
# TODO(b/175426744): revisit this:
# 1) Whether multiple output is needed or not after TFX componets
# are upgraded.
# 2) If multiple output are needed and is README.ml-pipelines-sdk.md common practice, should we
# use driver instead to create the list of output artifact instead
# of letting executor to create them.
for proto_artifact in updated_artifact_list:
_check_validity(
proto_artifact, original_artifact, len(updated_artifact_list) > 1)
python_artifact = types.Artifact(original_artifact.artifact_type)
python_artifact.set_mlmd_artifact(proto_artifact)
artifact_list.append(python_artifact)
# Marks output artifacts as LIVE.
for artifact_list in output_artifacts.values():
for artifact in artifact_list:
artifact.mlmd_artifact.state = metadata_store_pb2.Artifact.LIVE
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
execution_lib.put_execution(
metadata_handler,
execution,
contexts,
output_artifacts=output_artifacts)
return output_artifacts
def publish_failed_execution(
metadata_handler: metadata.Metadata,
contexts: Sequence[metadata_store_pb2.Context],
execution_id: int,
executor_output: Optional[execution_result_pb2.ExecutorOutput] = None
) -> None:
"""Marks an existing execution as failed.
Args:
metadata_handler: A handler to access MLMD.
contexts: MLMD contexts to associated with the execution.
execution_id: The id of the execution.
executor_output: The output of executor.
"""
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.FAILED
if executor_output and executor_output.HasField('execution_result'):
execution_lib.set_execution_result(
executor_output.execution_result, execution)
execution_lib.put_execution(metadata_handler, execution, contexts)
def publish_internal_execution(
metadata_handler: metadata.Metadata,
contexts: Sequence[metadata_store_pb2.Context],
execution_id: int,
output_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None
) -> None:
"""Marks an exeisting execution as as success and links its output to an INTERNAL_OUTPUT event.
Args:
metadata_handler: A handler to access MLMD.
contexts: MLMD contexts to associated with the execution.
execution_id: The id of the execution.
output_artifacts: Output artifacts of the execution. Each artifact will be
linked with the execution through an event with type INTERNAL_OUTPUT.
"""
[execution] = metadata_handler.store.get_executions_by_id([execution_id])
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
execution_lib.put_execution(
metadata_handler,
execution,
contexts,
output_artifacts=output_artifacts,
output_event_type=metadata_store_pb2.Event.INTERNAL_OUTPUT)
def register_execution(
metadata_handler: metadata.Metadata,
execution_type: metadata_store_pb2.ExecutionType,
contexts: Sequence[metadata_store_pb2.Context],
input_artifacts: Optional[MutableMapping[str,
Sequence[types.Artifact]]] = None,
exec_properties: Optional[Mapping[str, types.Property]] = None,
) -> metadata_store_pb2.Execution:
"""Registers README.ml-pipelines-sdk.md new execution in MLMD.
Along with the execution:
- the input artifacts will be linked to the execution.
- the contexts will be linked to both the execution and its input artifacts.
Args:
metadata_handler: A handler to access MLMD.
execution_type: The type of the execution.
contexts: MLMD contexts to associated with the execution.
input_artifacts: Input artifacts of the execution. Each artifact will be
linked with the execution through an event.
exec_properties: Execution properties. Will be attached to the execution.
Returns:
An MLMD execution that is registered in MLMD, with id populated.
"""
execution = execution_lib.prepare_execution(
metadata_handler, execution_type, metadata_store_pb2.Execution.RUNNING,
exec_properties)
return execution_lib.put_execution(
metadata_handler, execution, contexts, input_artifacts=input_artifacts) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/portable/execution_publish_utils.py | 0.886703 | 0.359477 | execution_publish_utils.py | pypi |
"""This module defines the handler for resolver node."""
from typing import Any, Dict
from absl import logging
from tfx.orchestration import metadata
from tfx.orchestration.portable import execution_publish_utils
from tfx.orchestration.portable import inputs_utils
from tfx.orchestration.portable import system_node_handler
from tfx.orchestration.portable.mlmd import context_lib
from tfx.proto.orchestration import pipeline_pb2
from ml_metadata.proto import metadata_store_pb2
class ResolverNodeHandler(system_node_handler.SystemNodeHandler):
"""The handler for the system Resolver node."""
def _extract_proto_map(
self,
# The actual type of proto message of map<str, pipeline_pb2.Value>.
proto_map: Any) -> Dict[str, Any]:
extract_mlmd_value = lambda v: getattr(v, v.WhichOneof('value'))
return {k: extract_mlmd_value(v.field_value) for k, v in proto_map.items()}
def run(
self, mlmd_connection: metadata.Metadata,
pipeline_node: pipeline_pb2.PipelineNode,
pipeline_info: pipeline_pb2.PipelineInfo,
pipeline_runtime_spec: pipeline_pb2.PipelineRuntimeSpec
) -> metadata_store_pb2.Execution:
"""Runs Resolver specific logic.
Args:
mlmd_connection: ML metadata connection.
pipeline_node: The specification of the node that this launcher lauches.
pipeline_info: The information of the pipeline that this node runs in.
pipeline_runtime_spec: The runtime information of the pipeline that this
node runs in.
Returns:
The execution of the run.
"""
logging.info('Running as an resolver node.')
with mlmd_connection as m:
# 1.Prepares all contexts.
contexts = context_lib.prepare_contexts(
metadata_handler=m, node_contexts=pipeline_node.contexts)
# 2. Resolves inputs an execution properties.
exec_properties = inputs_utils.resolve_parameters(
node_parameters=pipeline_node.parameters)
input_artifacts = inputs_utils.resolve_input_artifacts(
metadata_handler=m, node_inputs=pipeline_node.inputs)
# 3. Registers execution in metadata.
execution = execution_publish_utils.register_execution(
metadata_handler=m,
execution_type=pipeline_node.node_info.type,
contexts=contexts,
exec_properties=exec_properties)
# 4. Publish the execution as README.ml-pipelines-sdk.md cached execution with
# resolved input artifact as the output artifacts.
execution_publish_utils.publish_internal_execution(
metadata_handler=m,
contexts=contexts,
execution_id=execution.id,
output_artifacts=input_artifacts)
return execution | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/portable/resolver_node_handler.py | 0.959478 | 0.336004 | resolver_node_handler.py | pypi |
"""Data types shared for orchestration."""
from typing import Any, Dict, List
import attr
from tfx import types
from tfx.orchestration import data_types_utils
from tfx.proto.orchestration import execution_invocation_pb2
from tfx.proto.orchestration import pipeline_pb2
# TODO(b/150979622): We should introduce an id that is not changed across
# retires of the same component run and pass it to executor operators for
# human-readability purpose.
# TODO(b/165359991): Restore 'auto_attribs=True' once we drop Python3.5 support.
@attr.s
class ExecutionInfo:
"""A struct to store information for an execution."""
# LINT.IfChange
# The Execution id that is registered in MLMD.
execution_id = attr.ib(type=int, default=None)
# The input map to feed to execution
input_dict = attr.ib(type=Dict[str, List[types.Artifact]], default=None)
# The output map to feed to execution
output_dict = attr.ib(type=Dict[str, List[types.Artifact]], default=None)
# The exec_properties to feed to execution
exec_properties = attr.ib(type=Dict[str, Any], default=None)
# The uri to execution result, note that the drivers or executors and
# Launchers may not run in the same process, so they should use this uri to
# "return" execution result to the launcher.
execution_output_uri = attr.ib(type=str, default=None)
# Stateful working dir will be deterministic given pipeline, node and run_id.
# The typical usecase is to restore long running executor's state after
# eviction. For examples, README.ml-pipelines-sdk.md Trainer can use this directory to store
# checkpoints.
stateful_working_dir = attr.ib(type=str, default=None)
# A tempory dir for executions and it is expected to be cleared up at the end
# of executions in both success and failure cases.
tmp_dir = attr.ib(type=str, default=None)
# The config of this Node.
pipeline_node = attr.ib(type=pipeline_pb2.PipelineNode, default=None)
# The config of the pipeline that this node is running in.
pipeline_info = attr.ib(type=pipeline_pb2.PipelineInfo, default=None)
# The id of the pipeline run that this execution is in.
pipeline_run_id = attr.ib(type=str, default=None)
# LINT.ThenChange(../../proto/orchestration/execution_invocation.proto)
def to_proto(self) -> execution_invocation_pb2.ExecutionInvocation:
return execution_invocation_pb2.ExecutionInvocation(
execution_id=self.execution_id,
input_dict=data_types_utils.build_artifact_struct_dict(self.input_dict),
output_dict=data_types_utils.build_artifact_struct_dict(
self.output_dict),
execution_properties=data_types_utils.build_metadata_value_dict(
self.exec_properties),
output_metadata_uri=self.execution_output_uri,
stateful_working_dir=self.stateful_working_dir,
tmp_dir=self.tmp_dir,
pipeline_node=self.pipeline_node,
pipeline_info=self.pipeline_info,
pipeline_run_id=self.pipeline_run_id)
@classmethod
def from_proto(
cls, execution_invocation: execution_invocation_pb2.ExecutionInvocation
) -> 'ExecutionInfo':
return cls(
execution_id=execution_invocation.execution_id,
input_dict=data_types_utils.build_artifact_dict(
execution_invocation.input_dict),
output_dict=data_types_utils.build_artifact_dict(
execution_invocation.output_dict),
exec_properties=data_types_utils.build_value_dict(
execution_invocation.execution_properties),
execution_output_uri=execution_invocation.output_metadata_uri,
stateful_working_dir=execution_invocation.stateful_working_dir,
tmp_dir=execution_invocation.tmp_dir,
pipeline_node=execution_invocation.pipeline_node,
pipeline_info=execution_invocation.pipeline_info,
pipeline_run_id=execution_invocation.pipeline_run_id) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/portable/data_types.py | 0.657318 | 0.325105 | data_types.py | pypi |
"""This module defines the handler for importer node."""
from typing import Any, Dict
from absl import logging
from tfx import types
from tfx.dsl.components.common import importer
from tfx.orchestration import metadata
from tfx.orchestration.portable import execution_publish_utils
from tfx.orchestration.portable import inputs_utils
from tfx.orchestration.portable import system_node_handler
from tfx.orchestration.portable.mlmd import context_lib
from tfx.proto.orchestration import pipeline_pb2
from ml_metadata.proto import metadata_store_pb2
class ImporterNodeHandler(system_node_handler.SystemNodeHandler):
"""The handler for the system Importer node."""
def _extract_proto_map(
self,
# The actual type of proto message of map<str, pipeline_pb2.Value>.
proto_map: Any
) -> Dict[str, Any]:
extract_mlmd_value = lambda v: getattr(v, v.WhichOneof('value'))
return {k: extract_mlmd_value(v.field_value) for k, v in proto_map.items()}
def run(
self, mlmd_connection: metadata.Metadata,
pipeline_node: pipeline_pb2.PipelineNode,
pipeline_info: pipeline_pb2.PipelineInfo,
pipeline_runtime_spec: pipeline_pb2.PipelineRuntimeSpec
) -> metadata_store_pb2.Execution:
"""Runs Importer specific logic.
Args:
mlmd_connection: ML metadata connection.
pipeline_node: The specification of the node that this launcher lauches.
pipeline_info: The information of the pipeline that this node runs in.
pipeline_runtime_spec: The runtime information of the pipeline that this
node runs in.
Returns:
The execution of the run.
"""
logging.info('Running as an importer node.')
with mlmd_connection as m:
# 1.Prepares all contexts.
contexts = context_lib.prepare_contexts(
metadata_handler=m, node_contexts=pipeline_node.contexts)
# 2. Resolves execution properties, please note that importers has no
# input.
exec_properties = inputs_utils.resolve_parameters(
node_parameters=pipeline_node.parameters)
# 3. Registers execution in metadata.
execution = execution_publish_utils.register_execution(
metadata_handler=m,
execution_type=pipeline_node.node_info.type,
contexts=contexts,
exec_properties=exec_properties)
# 4. Generate output artifacts to represent the imported artifacts.
output_spec = pipeline_node.outputs.outputs[importer.IMPORT_RESULT_KEY]
properties = self._extract_proto_map(
output_spec.artifact_spec.additional_properties)
custom_properties = self._extract_proto_map(
output_spec.artifact_spec.additional_custom_properties)
output_artifact_class = types.Artifact(
output_spec.artifact_spec.type).type
output_artifacts = importer.generate_output_dict(
metadata_handler=m,
uri=str(exec_properties[importer.SOURCE_URI_KEY]),
properties=properties,
custom_properties=custom_properties,
reimport=bool(exec_properties[importer.REIMPORT_OPTION_KEY]),
output_artifact_class=output_artifact_class,
mlmd_artifact_type=output_spec.artifact_spec.type)
# 5. Publish the output artifacts.
execution_publish_utils.publish_succeeded_execution(
metadata_handler=m,
execution_id=execution.id,
contexts=contexts,
output_artifacts=output_artifacts)
return execution | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/portable/importer_node_handler.py | 0.946337 | 0.286325 | importer_node_handler.py | pypi |
"""This module defines README.ml-pipelines-sdk.md generic Launcher for all TFleX nodes."""
from typing import Any, Dict, List, Optional, Text, Type, TypeVar
from absl import logging
import attr
from tfx import types
from tfx.dsl.io import fileio
from tfx.orchestration import metadata
from tfx.orchestration.portable import base_driver_operator
from tfx.orchestration.portable import base_executor_operator
from tfx.orchestration.portable import cache_utils
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import docker_executor_operator
from tfx.orchestration.portable import execution_publish_utils
from tfx.orchestration.portable import importer_node_handler
from tfx.orchestration.portable import inputs_utils
from tfx.orchestration.portable import outputs_utils
from tfx.orchestration.portable import python_driver_operator
from tfx.orchestration.portable import python_executor_operator
from tfx.orchestration.portable import resolver_node_handler
from tfx.orchestration.portable.mlmd import context_lib
from tfx.proto.orchestration import driver_output_pb2
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import execution_result_pb2
from tfx.proto.orchestration import pipeline_pb2
from google.protobuf import message
from ml_metadata.proto import metadata_store_pb2
# Subclasses of BaseExecutorOperator
ExecutorOperator = TypeVar(
'ExecutorOperator', bound=base_executor_operator.BaseExecutorOperator)
# Subclasses of BaseDriverOperator
DriverOperator = TypeVar(
'DriverOperator', bound=base_driver_operator.BaseDriverOperator)
DEFAULT_EXECUTOR_OPERATORS = {
executable_spec_pb2.PythonClassExecutableSpec:
python_executor_operator.PythonExecutorOperator,
executable_spec_pb2.ContainerExecutableSpec:
docker_executor_operator.DockerExecutorOperator
}
DEFAULT_DRIVER_OPERATORS = {
executable_spec_pb2.PythonClassExecutableSpec:
python_driver_operator.PythonDriverOperator
}
# LINT.IfChange
_SYSTEM_NODE_HANDLERS = {
'tfx.dsl.components.common.importer.Importer':
importer_node_handler.ImporterNodeHandler,
'tfx.dsl.components.common.resolver.Resolver':
resolver_node_handler.ResolverNodeHandler,
# TODO(b/177457236): Remove support for the following after release.
'tfx.dsl.components.common.importer_node.ImporterNode':
importer_node_handler.ImporterNodeHandler,
'tfx.dsl.components.common.resolver_node.ResolverNode':
resolver_node_handler.ResolverNodeHandler,
}
# LINT.ThenChange(Internal system node list)
# TODO(b/165359991): Restore 'auto_attribs=True' once we drop Python3.5 support.
@attr.s
class _PrepareExecutionResult:
"""A wrapper class using as the return value of _prepare_execution()."""
# The information used by executor operators.
execution_info = attr.ib(type=data_types.ExecutionInfo, default=None)
# The Execution registered in MLMD.
execution_metadata = attr.ib(type=metadata_store_pb2.Execution, default=None)
# Contexts of the execution, usually used by Publisher.
contexts = attr.ib(type=List[metadata_store_pb2.Context], default=None)
# TODO(b/156126088): Update the following documentation when this bug is
# closed.
# Whether an execution is needed. An execution is not needed when:
# 1) Not all the required input are ready.
# 2) The input value doesn't meet the driver's requirement.
# 3) Cache result is used.
is_execution_needed = attr.ib(type=bool, default=False)
class _ExecutionFailedError(Exception):
"""An internal error to carry ExecutorOutput when it is raised."""
def __init__(self, err_msg: str,
executor_output: execution_result_pb2.ExecutorOutput):
super(_ExecutionFailedError, self).__init__(err_msg)
self._executor_output = executor_output
@property
def executor_output(self):
return self._executor_output
class Launcher(object):
"""Launcher is the main entrance of nodes in TFleX.
It handles TFX internal details like artifact resolving, execution
triggering and result publishing.
"""
def __init__(
self,
pipeline_node: pipeline_pb2.PipelineNode,
mlmd_connection: metadata.Metadata,
pipeline_info: pipeline_pb2.PipelineInfo,
pipeline_runtime_spec: pipeline_pb2.PipelineRuntimeSpec,
executor_spec: Optional[message.Message] = None,
custom_driver_spec: Optional[message.Message] = None,
platform_config: Optional[message.Message] = None,
custom_executor_operators: Optional[Dict[Any,
Type[ExecutorOperator]]] = None,
custom_driver_operators: Optional[Dict[Any,
Type[DriverOperator]]] = None):
"""Initializes README.ml-pipelines-sdk.md Launcher.
Args:
pipeline_node: The specification of the node that this launcher lauches.
mlmd_connection: ML metadata connection.
pipeline_info: The information of the pipeline that this node runs in.
pipeline_runtime_spec: The runtime information of the pipeline that this
node runs in.
executor_spec: Specification for the executor of the node. This is
expected for all components nodes. This will be used to determine the
specific ExecutorOperator class to be used to execute and will be passed
into ExecutorOperator.
custom_driver_spec: Specification for custom driver. This is expected only
for advanced use cases.
platform_config: Platform config that will be used as auxiliary info of
the node execution. This will be passed to ExecutorOperator along with
the `executor_spec`.
custom_executor_operators: README.ml-pipelines-sdk.md map of ExecutableSpec to its
ExecutorOperation implementation.
custom_driver_operators: README.ml-pipelines-sdk.md map of ExecutableSpec to its DriverOperator
implementation.
Raises:
ValueError: when component and component_config are not launchable by the
launcher.
"""
self._pipeline_node = pipeline_node
self._mlmd_connection = mlmd_connection
self._pipeline_info = pipeline_info
self._pipeline_runtime_spec = pipeline_runtime_spec
self._executor_spec = executor_spec
self._executor_operators = {}
self._executor_operators.update(DEFAULT_EXECUTOR_OPERATORS)
self._executor_operators.update(custom_executor_operators or {})
self._driver_operators = {}
self._driver_operators.update(DEFAULT_DRIVER_OPERATORS)
self._driver_operators.update(custom_driver_operators or {})
self._executor_operator = None
if executor_spec:
self._executor_operator = self._executor_operators[type(executor_spec)](
executor_spec, platform_config)
self._output_resolver = outputs_utils.OutputsResolver(
pipeline_node=self._pipeline_node,
pipeline_info=self._pipeline_info,
pipeline_runtime_spec=self._pipeline_runtime_spec)
self._driver_operator = None
if custom_driver_spec:
self._driver_operator = self._driver_operators[type(custom_driver_spec)](
custom_driver_spec, self._mlmd_connection)
system_node_handler_class = _SYSTEM_NODE_HANDLERS.get(
self._pipeline_node.node_info.type.name)
self._system_node_handler = None
if system_node_handler_class:
self._system_node_handler = system_node_handler_class()
assert bool(self._executor_operator) or bool(self._system_node_handler), \
'A node must be system node or have an executor.'
def _prepare_execution(self) -> _PrepareExecutionResult:
"""Prepares inputs, outputs and execution properties for actual execution."""
# TODO(b/150979622): handle the edge case that the component get evicted
# between successful pushlish and stateful working dir being clean up.
# Otherwise following retries will keep failing because of duplicate
# publishes.
with self._mlmd_connection as m:
# 1.Prepares all contexts.
contexts = context_lib.prepare_contexts(
metadata_handler=m, node_contexts=self._pipeline_node.contexts)
# 2. Resolves inputs an execution properties.
exec_properties = inputs_utils.resolve_parameters(
node_parameters=self._pipeline_node.parameters)
input_artifacts = inputs_utils.resolve_input_artifacts(
metadata_handler=m, node_inputs=self._pipeline_node.inputs)
# 3. If not all required inputs are met. Return ExecutionInfo with
# is_execution_needed being false. No publish will happen so down stream
# nodes won't be triggered.
if input_artifacts is None:
logging.info('No all required input are ready, abandoning execution.')
return _PrepareExecutionResult(
execution_info=data_types.ExecutionInfo(),
contexts=contexts,
is_execution_needed=False)
# 4. Registers execution in metadata.
execution = execution_publish_utils.register_execution(
metadata_handler=m,
execution_type=self._pipeline_node.node_info.type,
contexts=contexts,
input_artifacts=input_artifacts,
exec_properties=exec_properties)
# 5. Resolve output
output_artifacts = self._output_resolver.generate_output_artifacts(
execution.id)
# If there is README.ml-pipelines-sdk.md custom driver, runs it.
if self._driver_operator:
driver_output = self._driver_operator.run_driver(
data_types.ExecutionInfo(
input_dict=input_artifacts,
output_dict=output_artifacts,
exec_properties=exec_properties,
execution_output_uri=self._output_resolver.get_driver_output_uri(
)))
self._update_with_driver_output(driver_output, exec_properties,
output_artifacts)
# We reconnect to MLMD here because the custom driver closes MLMD connection
# on returning.
with self._mlmd_connection as m:
# 6. Check cached result
cache_context = cache_utils.get_cache_context(
metadata_handler=m,
pipeline_node=self._pipeline_node,
pipeline_info=self._pipeline_info,
executor_spec=self._executor_spec,
input_artifacts=input_artifacts,
output_artifacts=output_artifacts,
parameters=exec_properties)
contexts.append(cache_context)
cached_outputs = cache_utils.get_cached_outputs(
metadata_handler=m, cache_context=cache_context)
# 7. Should cache be used?
if (self._pipeline_node.execution_options.caching_options.enable_cache and
cached_outputs):
# Publishes cache result
execution_publish_utils.publish_cached_execution(
metadata_handler=m,
contexts=contexts,
execution_id=execution.id,
output_artifacts=cached_outputs)
logging.info('An cached execusion %d is used.', execution.id)
return _PrepareExecutionResult(
execution_info=data_types.ExecutionInfo(execution_id=execution.id),
execution_metadata=execution,
contexts=contexts,
is_execution_needed=False)
pipeline_run_id = (
self._pipeline_runtime_spec.pipeline_run_id.field_value.string_value)
# 8. Going to trigger executor.
logging.info('Going to run README.ml-pipelines-sdk.md new execution %d', execution.id)
return _PrepareExecutionResult(
execution_info=data_types.ExecutionInfo(
execution_id=execution.id,
input_dict=input_artifacts,
output_dict=output_artifacts,
exec_properties=exec_properties,
execution_output_uri=self._output_resolver
.get_executor_output_uri(execution.id),
stateful_working_dir=(
self._output_resolver.get_stateful_working_directory()),
tmp_dir=self._output_resolver.make_tmp_dir(execution.id),
pipeline_node=self._pipeline_node,
pipeline_info=self._pipeline_info,
pipeline_run_id=pipeline_run_id),
execution_metadata=execution,
contexts=contexts,
is_execution_needed=True)
def _run_executor(
self, execution_info: data_types.ExecutionInfo
) -> execution_result_pb2.ExecutorOutput:
"""Executes underlying component implementation."""
logging.info('Going to run README.ml-pipelines-sdk.md new execution: %s', execution_info)
outputs_utils.make_output_dirs(execution_info.output_dict)
try:
executor_output = self._executor_operator.run_executor(execution_info)
code = executor_output.execution_result.code
if code != 0:
result_message = executor_output.execution_result.result_message
err = (f'Execution {execution_info.execution_id} '
f'failed with error code {code} and '
f'error message {result_message}')
logging.error(err)
raise _ExecutionFailedError(err, executor_output)
return executor_output
except Exception: # pylint: disable=broad-except
outputs_utils.remove_output_dirs(execution_info.output_dict)
raise
def _publish_successful_execution(
self, execution_id: int, contexts: List[metadata_store_pb2.Context],
output_dict: Dict[Text, List[types.Artifact]],
executor_output: execution_result_pb2.ExecutorOutput) -> None:
"""Publishes succeeded execution result to ml metadata."""
with self._mlmd_connection as m:
execution_publish_utils.publish_succeeded_execution(
metadata_handler=m,
execution_id=execution_id,
contexts=contexts,
output_artifacts=output_dict,
executor_output=executor_output)
def _publish_failed_execution(
self,
execution_id: int,
contexts: List[metadata_store_pb2.Context],
executor_output: Optional[execution_result_pb2.ExecutorOutput] = None
) -> None:
"""Publishes failed execution to ml metadata."""
with self._mlmd_connection as m:
execution_publish_utils.publish_failed_execution(
metadata_handler=m,
execution_id=execution_id,
contexts=contexts,
executor_output=executor_output)
def _clean_up_stateless_execution_info(
self, execution_info: data_types.ExecutionInfo):
logging.info('Cleaning up stateless execution info.')
# Clean up tmp dir
fileio.rmtree(execution_info.tmp_dir)
def _clean_up_stateful_execution_info(
self, execution_info: data_types.ExecutionInfo):
"""Post execution clean up."""
logging.info('Cleaning up stateful execution info.')
outputs_utils.remove_stateful_working_dir(
execution_info.stateful_working_dir)
def _update_with_driver_output(self,
driver_output: driver_output_pb2.DriverOutput,
exec_properties: Dict[Text, Any],
output_dict: Dict[Text, List[types.Artifact]]):
"""Updates output_dict with driver output."""
for key, artifact_list in driver_output.output_artifacts.items():
python_artifact_list = []
# We assume the origial output dict must include at least one output
# artifact and all output artifact shared the same type.
artifact_type = output_dict[key][0].artifact_type
for proto_artifact in artifact_list.artifacts:
python_artifact = types.Artifact(artifact_type)
python_artifact.set_mlmd_artifact(proto_artifact)
python_artifact_list.append(python_artifact)
output_dict[key] = python_artifact_list
for key, value in driver_output.exec_properties.items():
exec_properties[key] = getattr(value, value.WhichOneof('value'))
def launch(self) -> Optional[metadata_store_pb2.Execution]:
"""Executes the component, includes driver, executor and publisher.
Returns:
The metadata of this execution that is registered in MLMD. It can be None
if the driver decides not to run the execution.
Raises:
Exception: If the executor fails.
"""
logging.info('Running launcher for %s', self._pipeline_node)
if self._system_node_handler:
# If this is README.ml-pipelines-sdk.md system node, runs it and directly return.
return self._system_node_handler.run(self._mlmd_connection,
self._pipeline_node,
self._pipeline_info,
self._pipeline_runtime_spec)
# Runs as README.ml-pipelines-sdk.md normal node.
prepare_execution_result = self._prepare_execution()
(execution_info, contexts,
is_execution_needed) = (prepare_execution_result.execution_info,
prepare_execution_result.contexts,
prepare_execution_result.is_execution_needed)
if is_execution_needed:
try:
executor_output = self._run_executor(execution_info)
except Exception as e: # pylint: disable=broad-except
execution_output = (
e.executor_output if isinstance(e, _ExecutionFailedError) else None)
self._publish_failed_execution(execution_info.execution_id, contexts,
execution_output)
logging.error('Execution %d failed.', execution_info.execution_id)
raise
finally:
self._clean_up_stateless_execution_info(execution_info)
logging.info('Execution %d succeeded.', execution_info.execution_id)
self._clean_up_stateful_execution_info(execution_info)
logging.info('Publishing output artifacts %s for exeuction %s',
execution_info.output_dict, execution_info.execution_id)
self._publish_successful_execution(execution_info.execution_id, contexts,
execution_info.output_dict,
executor_output)
return prepare_execution_result.execution_metadata | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/portable/launcher.py | 0.727007 | 0.30013 | launcher.py | pypi |
"""Portable libraries for event related APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Text, Tuple
from ml_metadata.proto import metadata_store_pb2
_VALID_OUTPUT_EVENT_TYPES = frozenset([
metadata_store_pb2.Event.OUTPUT, metadata_store_pb2.Event.INTERNAL_OUTPUT,
metadata_store_pb2.Event.DECLARED_OUTPUT
])
_VALID_INPUT_EVENT_TYPES = frozenset([
metadata_store_pb2.Event.INPUT, metadata_store_pb2.Event.INTERNAL_INPUT,
metadata_store_pb2.Event.DECLARED_INPUT
])
def is_valid_output_event(event: metadata_store_pb2.Event,
expected_output_key: Optional[Text] = None) -> bool:
"""Evaluates whether an event is an output event with the right output key.
Args:
event: The event to evaluate.
expected_output_key: The expected output key.
Returns:
A bool value indicating result
"""
if expected_output_key:
return (len(event.path.steps) == 2 and # Valid event should have 2 steps.
event.path.steps[0].key == expected_output_key and
event.type in _VALID_OUTPUT_EVENT_TYPES)
else:
return event.type in _VALID_OUTPUT_EVENT_TYPES
def is_valid_input_event(event: metadata_store_pb2.Event,
expected_input_key: Optional[Text] = None) -> bool:
"""Evaluates whether an event is an input event with the right input key.
Args:
event: The event to evaluate.
expected_input_key: The expected input key.
Returns:
A bool value indicating result
"""
if expected_input_key:
return (len(event.path.steps) == 2 and # Valid event should have 2 steps.
event.path.steps[0].key == expected_input_key and
event.type in _VALID_INPUT_EVENT_TYPES)
else:
return event.type in _VALID_INPUT_EVENT_TYPES
def generate_event(
event_type: metadata_store_pb2.Event.Type,
key: Text,
index: int,
artifact_id: Optional[int] = None,
execution_id: Optional[int] = None) -> metadata_store_pb2.Event:
"""Generates README.ml-pipelines-sdk.md MLMD event given type, key and index.
Args:
event_type: The type of the event. e.g., INPUT, OUTPUT, etc.
key: The key of the input or output channel. Usually README.ml-pipelines-sdk.md key can uniquely
identify README.ml-pipelines-sdk.md channel of README.ml-pipelines-sdk.md TFX node.
index: The index of the artifact in README.ml-pipelines-sdk.md channel. For example, README.ml-pipelines-sdk.md trainer might
take more than one Example artifacts in one of its input channels. We need
to distinguish each artifact when creating events.
artifact_id: Optional artifact id for the event.
execution_id: Optional execution id for the event.
Returns:
A metadata_store_pb2.Event message.
"""
event = metadata_store_pb2.Event()
event.type = event_type
# The order matters, we always use the first step to store key and the second
# step to store index.
event.path.steps.add().key = key
event.path.steps.add().index = index
if artifact_id:
event.artifact_id = artifact_id
if execution_id:
event.execution_id = execution_id
return event
def get_artifact_path(event: metadata_store_pb2.Event) -> Tuple[Text, int]:
"""Gets the artifact path from the event.
This is useful for reconstructing the artifact dict (mapping from key to an
ordered list of artifacts) for an execution. The key and index of an artifact
are expected to be stored in the event in two steps where the first step is
the key and second is the index of the artifact within the list.
Args:
event: The event from which to extract path to the artifact.
Returns:
A tuple (<artifact key>, <artifact index>).
Raises:
ValueError: If there are not exactly 2 steps in the path corresponding to
the key and index of the artifact.
"""
if len(event.path.steps) != 2:
raise ValueError(
'Expected exactly two steps corresponding to key and index in event: {}'
.format(event))
return (event.path.steps[0].key, event.path.steps[1].index) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/portable/mlmd/event_lib.py | 0.956624 | 0.519887 | event_lib.py | pypi |
"""Portable libraries for context related APIs."""
from typing import List, Text
from absl import logging
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration.portable.mlmd import common_utils
from tfx.proto.orchestration import pipeline_pb2
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
CONTEXT_TYPE_EXECUTION_CACHE = 'execution_cache'
def _generate_context_proto(
metadata_handler: metadata.Metadata,
context_spec: pipeline_pb2.ContextSpec) -> metadata_store_pb2.Context:
"""Generates metadata_pb2.Context based on the ContextSpec message.
Args:
metadata_handler: A handler to access MLMD store.
context_spec: A pipeline_pb2.ContextSpec message that instructs registering
of README.ml-pipelines-sdk.md context.
Returns:
A metadata_store_pb2.Context message.
Raises:
RuntimeError: When actual property type does not match provided metadata
type schema.
"""
context_type = common_utils.register_type_if_not_exist(
metadata_handler, context_spec.type)
context_name = data_types_utils.get_value(context_spec.name)
assert isinstance(context_name, Text), 'context name should be string.'
result = metadata_store_pb2.Context(
type_id=context_type.id, name=context_name)
for k, v in context_spec.properties.items():
if k in context_type.properties:
actual_property_type = data_types_utils.get_metadata_value_type(v)
if context_type.properties.get(k) == actual_property_type:
data_types_utils.set_metadata_value(result.properties[k], v)
else:
raise RuntimeError(
'Property type %s different from provided metadata type property type %s for key %s'
% (actual_property_type, context_type.properties.get(k), k))
else:
data_types_utils.set_metadata_value(result.custom_properties[k], v)
return result
def _register_context_if_not_exist(
metadata_handler: metadata.Metadata,
context_spec: pipeline_pb2.ContextSpec,
) -> metadata_store_pb2.Context:
"""Registers README.ml-pipelines-sdk.md context if not exist, otherwise returns the existing one.
Args:
metadata_handler: A handler to access MLMD store.
context_spec: A pipeline_pb2.ContextSpec message that instructs registering
of README.ml-pipelines-sdk.md context.
Returns:
An MLMD context.
"""
context_type_name = context_spec.type.name
context_name = data_types_utils.get_value(context_spec.name)
context = metadata_handler.store.get_context_by_type_and_name(
type_name=context_type_name, context_name=context_name)
if context is not None:
return context
logging.debug('Failed to get context of type %s and name %s',
context_type_name, context_name)
# If Context is not found, try to register it.
context = _generate_context_proto(
metadata_handler=metadata_handler, context_spec=context_spec)
try:
[context_id] = metadata_handler.store.put_contexts([context])
context.id = context_id
# This might happen in cases we have parallel executions of nodes.
except mlmd.errors.AlreadyExistsError:
logging.debug('Context %s already exists.', context_name)
context = metadata_handler.store.get_context_by_type_and_name(
type_name=context_type_name, context_name=context_name)
assert context is not None, ('Context is missing for %s while put_contexts '
'reports that it existed.') % (
context_name)
logging.debug('ID of context %s is %s.', context_spec, context.id)
return context
def register_context_if_not_exists(
metadata_handler: metadata.Metadata,
context_type_name: Text,
context_name: Text,
) -> metadata_store_pb2.Context:
"""Registers README.ml-pipelines-sdk.md context if not exist, otherwise returns the existing one.
This is README.ml-pipelines-sdk.md simplified wrapper around the method above which only takes context
type and context name.
Args:
metadata_handler: A handler to access MLMD store.
context_type_name: The name of the context type.
context_name: The name of the context.
Returns:
An MLMD context.
"""
context_spec = pipeline_pb2.ContextSpec(
name=pipeline_pb2.Value(
field_value=metadata_store_pb2.Value(string_value=context_name)),
type=metadata_store_pb2.ContextType(name=context_type_name))
return _register_context_if_not_exist(
metadata_handler=metadata_handler, context_spec=context_spec)
def prepare_contexts(
metadata_handler: metadata.Metadata,
node_contexts: pipeline_pb2.NodeContexts,
) -> List[metadata_store_pb2.Context]:
"""Creates the contexts given specification.
Context types will be registered if not already exist.
Args:
metadata_handler: A handler to access MLMD store.
node_contexts: A pipeline_pb2.NodeContext message that instructs registering
of the contexts.
Returns:
A list of metadata_store_pb2.Context messages.
"""
return [
_generate_context_proto(
metadata_handler=metadata_handler, context_spec=context_spec)
for context_spec in node_contexts.contexts
] | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/portable/mlmd/context_lib.py | 0.888484 | 0.33444 | context_lib.py | pypi |
"""Common MLMD utility libraries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import TypeVar
from absl import logging
from tfx.orchestration import metadata
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
MetadataType = TypeVar('MetadataType', metadata_store_pb2.ArtifactType,
metadata_store_pb2.ContextType,
metadata_store_pb2.ExecutionType)
def register_type_if_not_exist(
metadata_handler: metadata.Metadata,
metadata_type: MetadataType,
) -> MetadataType:
"""Registers README.ml-pipelines-sdk.md metadata type if not exists.
Uses existing type if schema is superset of what is needed. Otherwise tries
to register new metadata type.
Args:
metadata_handler: A handler to access MLMD store.
metadata_type: The metadata type to register if does not exist.
Returns:
A MetadataType with id
Raises:
RuntimeError: If new metadata type conflicts with existing schema in MLMD.
ValueError: If metadata type is not expected.
"""
if metadata_type.id:
return metadata_type
if isinstance(metadata_type, metadata_store_pb2.ArtifactType):
get_type_handler = metadata_handler.store.get_artifact_type
put_type_handler = metadata_handler.store.put_artifact_type
elif isinstance(metadata_type, metadata_store_pb2.ContextType):
get_type_handler = metadata_handler.store.get_context_type
put_type_handler = metadata_handler.store.put_context_type
elif isinstance(metadata_type, metadata_store_pb2.ExecutionType):
get_type_handler = metadata_handler.store.get_execution_type
put_type_handler = metadata_handler.store.put_execution_type
else:
raise ValueError('Unexpected value type: %s.' % type(metadata_type))
try:
# Types can be evolved by adding new fields in newer releases.
# Here when upserting types:
# README.ml-pipelines-sdk.md) we enable `can_add_fields` so that type updates made in the current
# release are backward compatible with older release;
# b) we enable `can_omit_fields` so that the current release is forward
# compatible with any type updates made by future release.
type_id = put_type_handler(
metadata_type, can_add_fields=True, can_omit_fields=True)
logging.debug('Registering README.ml-pipelines-sdk.md metadata type with id %s.', type_id)
metadata_type = get_type_handler(metadata_type.name)
return metadata_type
except mlmd.errors.AlreadyExistsError:
existing_type = get_type_handler(metadata_type.name)
assert existing_type is not None, (
'Not expected to get None when getting type %s.' % metadata_type.name)
warning_str = (
'Conflicting properties comparing with existing metadata type '
'with the same type name. Existing type: '
'%s, New type: %s') % (existing_type, metadata_type)
logging.warning(warning_str)
raise RuntimeError(warning_str) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/portable/mlmd/common_utils.py | 0.914324 | 0.194559 | common_utils.py | pypi |
r"""This module defines the entrypoint for the PythonExecutorOperator in TFX.
This library is intended to serve as the entrypoint for README.ml-pipelines-sdk.md binary that packages
the python executors in README.ml-pipelines-sdk.md pipeline. The resulting binary is called by the TFX
launcher and should not be called directly.
"""
from absl import flags
from absl import logging
from tfx.dsl.io import fileio
from tfx.orchestration import metadata
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import python_driver_operator
from tfx.orchestration.portable import python_executor_operator
from tfx.orchestration.python_execution_binary import python_execution_binary_utils
from tfx.proto.orchestration import driver_output_pb2
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import execution_result_pb2
from google.protobuf import text_format
FLAGS = flags.FLAGS
EXECUTION_INVOCATION_FLAG = flags.DEFINE_string(
'tfx_execution_info_b64', None, 'url safe base64 encoded binary '
'tfx.orchestration.ExecutionInvocation proto')
EXECUTABLE_SPEC_FLAG = flags.DEFINE_string(
'tfx_python_class_executable_spec_b64', None,
'tfx.orchestration.executable_spec.PythonClassExecutableSpec proto')
MLMD_CONNECTION_CONFIG_FLAG = flags.DEFINE_string(
'tfx_mlmd_connection_config_b64', None,
'wrapper proto containing MLMD connection config. If being set, this'
'indicates README.ml-pipelines-sdk.md driver execution')
def _run_executor(
executable_spec: executable_spec_pb2.PythonClassExecutableSpec,
execution_info: data_types.ExecutionInfo
) -> execution_result_pb2.ExecutorOutput:
operator = python_executor_operator.PythonExecutorOperator(executable_spec)
return operator.run_executor(execution_info)
def _run_driver(
executable_spec: executable_spec_pb2.PythonClassExecutableSpec,
mlmd_connection_config: metadata.ConnectionConfigType,
execution_info: data_types.ExecutionInfo) -> driver_output_pb2.DriverOutput:
operator = python_driver_operator.PythonDriverOperator(
executable_spec, metadata.Metadata(mlmd_connection_config))
return operator.run_driver(execution_info)
def main(_):
flags.mark_flag_as_required(EXECUTION_INVOCATION_FLAG.name)
flags.mark_flag_as_required(EXECUTABLE_SPEC_FLAG.name)
execution_info = python_execution_binary_utils.deserialize_execution_info(
EXECUTION_INVOCATION_FLAG.value)
python_class_executable_spec = (
python_execution_binary_utils.deserialize_executable_spec(
EXECUTABLE_SPEC_FLAG.value))
logging.info('execution_info = %r\n', execution_info)
logging.info('python_class_executable_spec = %s\n',
text_format.MessageToString(python_class_executable_spec))
# MLMD connection config being set indicates README.ml-pipelines-sdk.md driver execution instead of an
# executor execution as accessing MLMD is not supported for executors.
if MLMD_CONNECTION_CONFIG_FLAG.value:
mlmd_connection_config = (
python_execution_binary_utils.deserialize_mlmd_connection_config(
MLMD_CONNECTION_CONFIG_FLAG.value))
run_result = _run_driver(python_class_executable_spec,
mlmd_connection_config, execution_info)
else:
run_result = _run_executor(python_class_executable_spec, execution_info)
if run_result:
with fileio.open(execution_info.execution_output_uri, 'wb') as f:
f.write(run_result.SerializeToString()) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/python_execution_binary/entrypoint.py | 0.702224 | 0.441191 | entrypoint.py | pypi |
r"""Shared IR serialization logic used by TFleX python executor binary."""
import base64
from tfx.orchestration import metadata
from tfx.orchestration.portable import data_types
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import execution_invocation_pb2
def deserialize_execution_info(
execution_info_b64: str) -> data_types.ExecutionInfo:
"""De-serializes the ExecutionInfo class from README.ml-pipelines-sdk.md url safe base64 encoded binary string."""
execution_info_proto = execution_invocation_pb2.ExecutionInvocation.FromString(
base64.urlsafe_b64decode(execution_info_b64))
return data_types.ExecutionInfo.from_proto(execution_info_proto)
def deserialize_mlmd_connection_config(
mlmd_connection_config_b64: str) -> metadata.ConnectionConfigType:
"""De-serializes an MLMD connection config from base64 flag."""
mlmd_connection_config = (
execution_invocation_pb2.MLMDConnectionConfig.FromString(
base64.b64decode(mlmd_connection_config_b64)))
return getattr(mlmd_connection_config,
mlmd_connection_config.WhichOneof('connection_config'))
def deserialize_executable_spec(
executable_spec_b64: str) -> executable_spec_pb2.PythonClassExecutableSpec:
"""De-serializes an executable spec from base64 flag."""
return executable_spec_pb2.PythonClassExecutableSpec.FromString(
base64.b64decode(executable_spec_b64))
def serialize_mlmd_connection_config(
connection_config: metadata.ConnectionConfigType) -> str:
"""Serializes an MLMD connection config into README.ml-pipelines-sdk.md base64 flag of its wrapper."""
mlmd_wrapper = execution_invocation_pb2.MLMDConnectionConfig()
for name, descriptor in (execution_invocation_pb2.MLMDConnectionConfig
.DESCRIPTOR.fields_by_name.items()):
if descriptor.message_type.full_name == connection_config.DESCRIPTOR.full_name:
getattr(mlmd_wrapper, name).CopyFrom(connection_config)
break
return base64.b64encode(mlmd_wrapper.SerializeToString()).decode('ascii')
def serialize_executable_spec(
executable_spec: executable_spec_pb2.PythonClassExecutableSpec) -> str:
"""Serializes an executable spec into README.ml-pipelines-sdk.md base64 flag."""
return base64.b64encode(executable_spec.SerializeToString()).decode('ascii')
def serialize_execution_info(execution_info: data_types.ExecutionInfo) -> str:
"""Serializes the ExecutionInfo class from README.ml-pipelines-sdk.md base64 flag."""
execution_info_proto = execution_info.to_proto()
return base64.b64encode(
execution_info_proto.SerializeToString()).decode('ascii') | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/python_execution_binary/python_execution_binary_utils.py | 0.848753 | 0.351228 | python_execution_binary_utils.py | pypi |
"""Definition of Airflow TFX runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from typing import Any, Dict, Optional, Text, Union
import absl
from airflow import models
from tfx.orchestration import pipeline
from tfx.orchestration import tfx_runner
from tfx.orchestration.airflow import airflow_component
from tfx.orchestration.config import config_utils
from tfx.orchestration.config import pipeline_config
class AirflowPipelineConfig(pipeline_config.PipelineConfig):
"""Pipeline config for AirflowDagRunner."""
def __init__(self, airflow_dag_config: Dict[Text, Any] = None, **kwargs):
"""Creates an instance of AirflowPipelineConfig.
Args:
airflow_dag_config: Configs of Airflow DAG model. See
https://airflow.apache.org/_api/airflow/models/dag/index.html#airflow.models.dag.DAG
for the full spec.
**kwargs: keyword args for PipelineConfig.
"""
super(AirflowPipelineConfig, self).__init__(**kwargs)
self.airflow_dag_config = airflow_dag_config or {}
class AirflowDagRunner(tfx_runner.TfxRunner):
"""Tfx runner on Airflow."""
def __init__(self,
config: Optional[Union[Dict[Text, Any],
AirflowPipelineConfig]] = None):
"""Creates an instance of AirflowDagRunner.
Args:
config: Optional Airflow pipeline config for customizing the launching of
each component.
"""
if config and not isinstance(config, AirflowPipelineConfig):
absl.logging.warning(
'Pass config as README.ml-pipelines-sdk.md dict type is going to deprecated in 0.1.16. Use AirflowPipelineConfig type instead.',
PendingDeprecationWarning)
config = AirflowPipelineConfig(airflow_dag_config=config)
super(AirflowDagRunner, self).__init__(config)
def run(self, tfx_pipeline: pipeline.Pipeline):
"""Deploys given logical pipeline on Airflow.
Args:
tfx_pipeline: Logical pipeline containing pipeline args and components.
Returns:
An Airflow DAG.
"""
# Merge airflow-specific configs with pipeline args
airflow_dag = models.DAG(
dag_id=tfx_pipeline.pipeline_info.pipeline_name,
**self._config.airflow_dag_config)
if 'tmp_dir' not in tfx_pipeline.additional_pipeline_args:
tmp_dir = os.path.join(tfx_pipeline.pipeline_info.pipeline_root, '.temp',
'')
tfx_pipeline.additional_pipeline_args['tmp_dir'] = tmp_dir
component_impl_map = {}
for tfx_component in tfx_pipeline.components:
(component_launcher_class,
component_config) = config_utils.find_component_launch_info(
self._config, tfx_component)
current_airflow_component = airflow_component.AirflowComponent(
airflow_dag,
component=tfx_component,
component_launcher_class=component_launcher_class,
pipeline_info=tfx_pipeline.pipeline_info,
enable_cache=tfx_pipeline.enable_cache,
metadata_connection_config=tfx_pipeline.metadata_connection_config,
beam_pipeline_args=tfx_pipeline.beam_pipeline_args,
additional_pipeline_args=tfx_pipeline.additional_pipeline_args,
component_config=component_config)
component_impl_map[tfx_component] = current_airflow_component
for upstream_node in tfx_component.upstream_nodes:
assert upstream_node in component_impl_map, ('Components is not in '
'topological order')
current_airflow_component.set_upstream(
component_impl_map[upstream_node])
return airflow_dag | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/airflow/airflow_dag_runner.py | 0.856137 | 0.249013 | airflow_dag_runner.py | pypi |
"""Definition for Airflow component for TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
from typing import Any, Dict, List, Text, Type
from airflow import models
from airflow.operators import python_operator
from tfx.dsl.components.base import base_node
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.orchestration.config import base_component_config
from tfx.orchestration.launcher import base_component_launcher
from tfx.utils import telemetry_utils
from ml_metadata.proto import metadata_store_pb2
def _airflow_component_launcher(
component: base_node.BaseNode, component_launcher_class: Type[
base_component_launcher.BaseComponentLauncher],
pipeline_info: data_types.PipelineInfo, driver_args: data_types.DriverArgs,
metadata_connection_config: metadata_store_pb2.ConnectionConfig,
beam_pipeline_args: List[Text], additional_pipeline_args: Dict[Text, Any],
component_config: base_component_config.BaseComponentConfig,
**kwargs) -> None:
"""Helper function to launch TFX component execution.
This helper function will be called with Airflow env objects which contains
run_id that we need to pass into TFX ComponentLauncher.
Args:
component: TFX BaseComponent instance. This instance holds all inputs and
outputs placeholders as well as component properties.
component_launcher_class: The class of the launcher to launch the component.
pipeline_info: A data_types.PipelineInfo instance that holds pipeline
properties
driver_args: Component specific args for driver.
metadata_connection_config: Configuration for how to connect to metadata.
beam_pipeline_args: Pipeline arguments for Beam powered Components.
additional_pipeline_args: A dict of additional pipeline args.
component_config: Component config to launch the component.
**kwargs: Context arguments that will be passed in by Airflow, including:
- ti: TaskInstance object from which we can get run_id of the running
pipeline.
For more details, please refer to the code:
https://github.com/apache/airflow/blob/master/airflow/operators/python_operator.py
"""
# Populate run id from Airflow task instance.
pipeline_info.run_id = kwargs['ti'].get_dagrun().run_id
launcher = component_launcher_class.create(
component=component,
pipeline_info=pipeline_info,
driver_args=driver_args,
metadata_connection=metadata.Metadata(metadata_connection_config),
beam_pipeline_args=beam_pipeline_args,
additional_pipeline_args=additional_pipeline_args,
component_config=component_config)
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_RUNNER: 'airflow'}):
launcher.launch()
class AirflowComponent(python_operator.PythonOperator):
"""Airflow-specific TFX Component.
This class wrap README.ml-pipelines-sdk.md component run into its own PythonOperator in Airflow.
"""
def __init__(self, parent_dag: models.DAG, component: base_node.BaseNode,
component_launcher_class: Type[
base_component_launcher.BaseComponentLauncher],
pipeline_info: data_types.PipelineInfo, enable_cache: bool,
metadata_connection_config: metadata_store_pb2.ConnectionConfig,
beam_pipeline_args: List[Text],
additional_pipeline_args: Dict[Text, Any],
component_config: base_component_config.BaseComponentConfig):
"""Constructs an Airflow implementation of TFX component.
Args:
parent_dag: An AirflowPipeline instance as the pipeline DAG.
component: An instance of base_node.BaseNode that holds all
properties of README.ml-pipelines-sdk.md logical component.
component_launcher_class: The class of the launcher to launch the
component.
pipeline_info: An instance of data_types.PipelineInfo that holds pipeline
properties.
enable_cache: Whether or not cache is enabled for this component run.
metadata_connection_config: A config proto for metadata connection.
beam_pipeline_args: Pipeline arguments for Beam powered Components.
additional_pipeline_args: Additional pipeline args.
component_config: Component config to launch the component.
"""
# Prepare parameters to create TFX worker.
driver_args = data_types.DriverArgs(enable_cache=enable_cache)
super(AirflowComponent, self).__init__(
task_id=component.id,
provide_context=True,
python_callable=functools.partial(
_airflow_component_launcher,
component=component,
component_launcher_class=component_launcher_class,
pipeline_info=pipeline_info,
driver_args=driver_args,
metadata_connection_config=metadata_connection_config,
beam_pipeline_args=beam_pipeline_args,
additional_pipeline_args=additional_pipeline_args,
component_config=component_config),
dag=parent_dag) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/airflow/airflow_component.py | 0.916885 | 0.24907 | airflow_component.py | pypi |
"""Definition of Beam TFX runner."""
import datetime
import os
from typing import Any, Iterable, List, Optional, Text, Union
from absl import logging
import apache_beam as beam
from tfx.dsl.compiler import compiler
from tfx.dsl.compiler import constants
from tfx.orchestration import metadata
from tfx.orchestration import pipeline as pipeline_py
from tfx.orchestration.beam.legacy import beam_dag_runner as legacy_beam_dag_runner
from tfx.orchestration.config import pipeline_config
from tfx.orchestration.local import runner_utils
from tfx.orchestration.portable import launcher
from tfx.orchestration.portable import runtime_parameter_utils
from tfx.orchestration.portable import tfx_runner
from tfx.proto.orchestration import local_deployment_config_pb2
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import telemetry_utils
from google.protobuf import any_pb2
from google.protobuf import message
# TODO(jyzhao): confirm it's re-executable, add test case.
@beam.typehints.with_input_types(Any)
@beam.typehints.with_output_types(Any)
class PipelineNodeAsDoFn(beam.DoFn):
"""Wrap node as beam DoFn."""
def __init__(self, pipeline_node: pipeline_pb2.PipelineNode,
mlmd_connection_config: metadata.ConnectionConfigType,
pipeline_info: pipeline_pb2.PipelineInfo,
pipeline_runtime_spec: pipeline_pb2.PipelineRuntimeSpec,
executor_spec: Optional[message.Message],
custom_driver_spec: Optional[message.Message],
deployment_config: Optional[message.Message]):
"""Initializes the PipelineNodeAsDoFn.
Args:
pipeline_node: The specification of the node that this launcher lauches.
mlmd_connection_config: ML metadata connection config.
pipeline_info: The information of the pipeline that this node runs in.
pipeline_runtime_spec: The runtime information of the pipeline that this
node runs in.
executor_spec: Specification for the executor of the node. This is
expected for all nodes. This will be used to determine the specific
ExecutorOperator class to be used to execute and will be passed into
ExecutorOperator.
custom_driver_spec: Specification for custom driver. This is expected only
for advanced use cases.
deployment_config: Deployment Config for the pipeline.
"""
self._pipeline_node = pipeline_node
self._mlmd_connection_config = mlmd_connection_config
self._pipeline_info = pipeline_info
self._pipeline_runtime_spec = pipeline_runtime_spec
self._executor_spec = executor_spec
self._custom_driver_spec = custom_driver_spec
self._node_id = pipeline_node.node_info.id
self._deployment_config = deployment_config
def process(self, element: Any, *signals: Iterable[Any]) -> None:
"""Executes node based on signals.
Args:
element: README.ml-pipelines-sdk.md signal element to trigger the node.
*signals: side input signals indicate completeness of upstream nodes.
"""
for signal in signals:
assert not list(signal), 'Signal PCollection should be empty.'
logging.info('node %s is running.', self._node_id)
self._run_node()
logging.info('node %s is finished.', self._node_id)
def _run_node(self) -> None:
platform_config = self._extract_platform_config(self._deployment_config,
self._node_id)
launcher.Launcher(
pipeline_node=self._pipeline_node,
mlmd_connection=metadata.Metadata(self._mlmd_connection_config),
pipeline_info=self._pipeline_info,
pipeline_runtime_spec=self._pipeline_runtime_spec,
executor_spec=self._executor_spec,
platform_config=platform_config,
custom_driver_spec=self._custom_driver_spec).launch()
def _extract_platform_config(
self,
deployment_config: local_deployment_config_pb2.LocalDeploymentConfig,
node_id: str) -> Optional[message.Message]:
platform_config = deployment_config.node_level_platform_configs.get(node_id)
return (getattr(platform_config, platform_config.WhichOneof('config'))
if platform_config else None)
class BeamDagRunner(tfx_runner.TfxRunner):
"""Tfx runner on Beam."""
_PIPELINE_NODE_DO_FN_CLS = PipelineNodeAsDoFn
def __new__(
cls,
beam_orchestrator_args: Optional[List[Text]] = None,
config: Optional[pipeline_config.PipelineConfig] = None):
"""Initializes BeamDagRunner as README.ml-pipelines-sdk.md TFX orchestrator.
Create the legacy BeamDagRunner object if any of the legacy
`beam_orchestrator_args` or `config` arguments are passed. A migration
guide will be provided in README.ml-pipelines-sdk.md future TFX version for users of these arguments.
Args:
beam_orchestrator_args: Deprecated beam args for the beam orchestrator.
Note that this is different from the beam_pipeline_args within
additional_pipeline_args, which is for beam pipelines in components. If
this option is used, the legacy non-IR-based BeamDagRunner will be
constructed.
config: Deprecated optional pipeline config for customizing the launching
of each component. Defaults to pipeline config that supports
InProcessComponentLauncher and DockerComponentLauncher. If this option
is used, the legacy non-IR-based BeamDagRunner will be constructed.
Returns:
Legacy or IR-based BeamDagRunner object.
"""
if beam_orchestrator_args or config:
logging.info(
'Using the legacy BeamDagRunner since `beam_orchestrator_args` or '
'`config` argument was passed.')
return legacy_beam_dag_runner.BeamDagRunner(
beam_orchestrator_args=beam_orchestrator_args, config=config)
else:
return super(BeamDagRunner, cls).__new__(cls)
def _extract_platform_config(
self,
deployment_config: local_deployment_config_pb2.LocalDeploymentConfig,
node_id: str) -> Optional[message.Message]:
platform_config = deployment_config.node_level_platform_configs.get(node_id)
return (getattr(platform_config, platform_config.WhichOneof('config'))
if platform_config else None)
def _build_local_platform_config(
self, node_id: str,
spec: any_pb2.Any) -> local_deployment_config_pb2.LocalPlatformConfig:
"""Builds LocalPlatformConfig given the any proto from IntermediateDeploymentConfig."""
result = local_deployment_config_pb2.LocalPlatformConfig()
if spec.Is(result.docker_platform_config.DESCRIPTOR):
spec.Unpack(result.docker_platform_config)
else:
raise ValueError(
'Platform config of {} is expected to be of one of the '
'types of tfx.orchestration.deployment_config.LocalPlatformConfig.config '
'but got type {}'.format(node_id, spec.type_url))
return result
def _extract_deployment_config(
self, pipeline: pipeline_pb2.Pipeline
) -> local_deployment_config_pb2.LocalDeploymentConfig:
"""Extracts the proto.Any pipeline.deployment_config to LocalDeploymentConfig."""
return runner_utils.extract_local_deployment_config(pipeline)
def _extract_executor_spec(
self,
deployment_config: local_deployment_config_pb2.LocalDeploymentConfig,
node_id: str) -> Optional[message.Message]:
return runner_utils.extract_executor_spec(deployment_config, node_id)
def _extract_custom_driver_spec(
self,
deployment_config: local_deployment_config_pb2.LocalDeploymentConfig,
node_id: str
) -> Optional[message.Message]:
return runner_utils.extract_custom_driver_spec(deployment_config, node_id)
def _connection_config_from_deployment_config(self,
deployment_config: Any) -> Any:
return deployment_config.metadata_connection_config
def run(self, pipeline: Union[pipeline_pb2.Pipeline,
pipeline_py.Pipeline]) -> None:
"""Deploys given logical pipeline on Beam.
Args:
pipeline: Logical pipeline in IR format.
"""
# For CLI, while creating or updating pipeline, pipeline_args are extracted
# and hence we avoid deploying the pipeline.
if 'TFX_JSON_EXPORT_PIPELINE_ARGS_PATH' in os.environ:
return
if isinstance(pipeline, pipeline_py.Pipeline):
c = compiler.Compiler()
pipeline = c.compile(pipeline)
run_id = datetime.datetime.now().strftime('%Y%m%d-%H%M%S.%f')
# Substitute the runtime parameter to be README.ml-pipelines-sdk.md concrete run_id
runtime_parameter_utils.substitute_runtime_parameter(
pipeline, {
constants.PIPELINE_RUN_ID_PARAMETER_NAME: run_id,
})
deployment_config = self._extract_deployment_config(pipeline)
connection_config = self._connection_config_from_deployment_config(
deployment_config)
logging.info('Running pipeline:\n %s', pipeline)
logging.info('Using deployment config:\n %s', deployment_config)
logging.info('Using connection config:\n %s', connection_config)
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_RUNNER: 'beam'}):
with beam.Pipeline() as p:
# Uses for triggering the node DoFns.
root = p | 'CreateRoot' >> beam.Create([None])
# Stores mapping of node to its signal.
signal_map = {}
# pipeline.nodes are in topological order.
for node in pipeline.nodes:
# TODO(b/160882349): Support subpipeline
pipeline_node = node.pipeline_node
node_id = pipeline_node.node_info.id
executor_spec = self._extract_executor_spec(deployment_config,
node_id)
custom_driver_spec = self._extract_custom_driver_spec(
deployment_config, node_id)
# Signals from upstream nodes.
signals_to_wait = []
for upstream_node in pipeline_node.upstream_nodes:
assert upstream_node in signal_map, ('Nodes are not in '
'topological order')
signals_to_wait.append(signal_map[upstream_node])
logging.info('Node %s depends on %s.', node_id,
[s.producer.full_label for s in signals_to_wait])
# Each signal is an empty PCollection. AsIter ensures README.ml-pipelines-sdk.md node will
# be triggered after upstream nodes are finished.
signal_map[node_id] = (
root
| 'Run[%s]' % node_id >> beam.ParDo(
self._PIPELINE_NODE_DO_FN_CLS(
pipeline_node=pipeline_node,
mlmd_connection_config=connection_config,
pipeline_info=pipeline.pipeline_info,
pipeline_runtime_spec=pipeline.runtime_spec,
executor_spec=executor_spec,
custom_driver_spec=custom_driver_spec,
deployment_config=deployment_config),
*[beam.pvalue.AsIter(s) for s in signals_to_wait]))
logging.info('Node %s is scheduled.', node_id) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/beam/beam_dag_runner.py | 0.808219 | 0.296947 | beam_dag_runner.py | pypi |
"""Definition of Beam TFX runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from typing import Any, Iterable, List, Optional, Text, Type
import absl
import apache_beam as beam
from tfx.dsl.components.base import base_node
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration import tfx_runner
from tfx.orchestration.config import base_component_config
from tfx.orchestration.config import config_utils
from tfx.orchestration.config import pipeline_config
from tfx.orchestration.launcher import base_component_launcher
from tfx.orchestration.launcher import docker_component_launcher
from tfx.orchestration.launcher import in_process_component_launcher
from tfx.utils import telemetry_utils
# TODO(jyzhao): confirm it's re-executable, add test case.
@beam.typehints.with_input_types(Any)
@beam.typehints.with_output_types(Any)
class _ComponentAsDoFn(beam.DoFn):
"""Wrap component as beam DoFn."""
def __init__(self, component: base_node.BaseNode,
component_launcher_class: Type[
base_component_launcher.BaseComponentLauncher],
component_config: base_component_config.BaseComponentConfig,
tfx_pipeline: pipeline.Pipeline):
"""Initialize the _ComponentAsDoFn.
Args:
component: Component that to be executed.
component_launcher_class: The class of the launcher to launch the
component.
component_config: component config to launch the component.
tfx_pipeline: Logical pipeline that contains pipeline related information.
"""
driver_args = data_types.DriverArgs(enable_cache=tfx_pipeline.enable_cache)
metadata_connection = metadata.Metadata(
tfx_pipeline.metadata_connection_config)
self._component_launcher = component_launcher_class.create(
component=component,
pipeline_info=tfx_pipeline.pipeline_info,
driver_args=driver_args,
metadata_connection=metadata_connection,
beam_pipeline_args=tfx_pipeline.beam_pipeline_args,
additional_pipeline_args=tfx_pipeline.additional_pipeline_args,
component_config=component_config)
self._component_id = component.id
def process(self, element: Any, *signals: Iterable[Any]) -> None:
"""Executes component based on signals.
Args:
element: README.ml-pipelines-sdk.md signal element to trigger the component.
*signals: side input signals indicate completeness of upstream components.
"""
for signal in signals:
assert not list(signal), 'Signal PCollection should be empty.'
self._run_component()
def _run_component(self) -> None:
absl.logging.info('Component %s is running.', self._component_id)
self._component_launcher.launch()
absl.logging.info('Component %s is finished.', self._component_id)
class BeamDagRunner(tfx_runner.TfxRunner):
"""Tfx runner on Beam."""
def __init__(self,
beam_orchestrator_args: Optional[List[Text]] = None,
config: Optional[pipeline_config.PipelineConfig] = None):
"""Initializes BeamDagRunner as README.ml-pipelines-sdk.md TFX orchestrator.
Args:
beam_orchestrator_args: beam args for the beam orchestrator. Note that
this is different from the beam_pipeline_args within
additional_pipeline_args, which is for beam pipelines in components.
config: Optional pipeline config for customizing the launching of each
component. Defaults to pipeline config that supports
InProcessComponentLauncher and DockerComponentLauncher.
"""
if config is None:
config = pipeline_config.PipelineConfig(
supported_launcher_classes=[
in_process_component_launcher.InProcessComponentLauncher,
docker_component_launcher.DockerComponentLauncher,
],
)
super(BeamDagRunner, self).__init__(config)
self._beam_orchestrator_args = beam_orchestrator_args
def run(self, tfx_pipeline: pipeline.Pipeline) -> None:
"""Deploys given logical pipeline on Beam.
Args:
tfx_pipeline: Logical pipeline containing pipeline args and components.
"""
# For CLI, while creating or updating pipeline, pipeline_args are extracted
# and hence we avoid executing the pipeline.
if 'TFX_JSON_EXPORT_PIPELINE_ARGS_PATH' in os.environ:
return
tfx_pipeline.pipeline_info.run_id = datetime.datetime.now().isoformat()
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_RUNNER: 'beam'}):
with beam.Pipeline(argv=self._beam_orchestrator_args) as p:
# Uses for triggering the component DoFns.
root = p | 'CreateRoot' >> beam.Create([None])
# Stores mapping of component to its signal.
signal_map = {}
# pipeline.components are in topological order.
for component in tfx_pipeline.components:
component_id = component.id
# Signals from upstream components.
signals_to_wait = []
if component.upstream_nodes:
for upstream_node in component.upstream_nodes:
assert upstream_node in signal_map, ('Components is not in '
'topological order')
signals_to_wait.append(signal_map[upstream_node])
absl.logging.info('Component %s depends on %s.', component_id,
[s.producer.full_label for s in signals_to_wait])
(component_launcher_class,
component_config) = config_utils.find_component_launch_info(
self._config, component)
# Each signal is an empty PCollection. AsIter ensures component will
# be triggered after upstream components are finished.
signal_map[component] = (
root
| 'Run[%s]' % component_id >> beam.ParDo(
_ComponentAsDoFn(component, component_launcher_class,
component_config, tfx_pipeline),
*[beam.pvalue.AsIter(s) for s in signals_to_wait]))
absl.logging.info('Component %s is scheduled.', component_id) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/beam/legacy/beam_dag_runner.py | 0.813461 | 0.237454 | beam_dag_runner.py | pypi |
"""V2 Kubeflow DAG Runner."""
import datetime
import json
import os
from typing import Any, Dict, List, Optional, Text
from tfx import version
from tfx.dsl.io import fileio
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration import tfx_runner
from tfx.orchestration.config import pipeline_config
from tfx.orchestration.kubeflow.v2 import pipeline_builder
from tfx.orchestration.kubeflow.v2.proto import pipeline_pb2
from tfx.utils import deprecation_utils
from tfx.utils import telemetry_utils
from tfx.utils import version_utils
from google.protobuf import json_format
_KUBEFLOW_TFX_CMD = (
'python', '-m',
'tfx.orchestration.kubeflow.v2.container.kubeflow_v2_run_executor')
# Current schema version for the API proto.
_SCHEMA_VERSION = '1.0.0'
# Default TFX container image/commands to use in KubeflowV2DagRunner.
_KUBEFLOW_TFX_IMAGE = 'gcr.io/tfx-oss-public/tfx:{}'.format(
version_utils.get_image_version())
def _get_current_time():
"""Gets the current timestamp."""
return datetime.datetime.now()
class KubeflowV2DagRunnerConfig(pipeline_config.PipelineConfig):
"""Runtime configuration specific to execution on Kubeflow pipelines."""
def __init__(self,
project_id: Text,
display_name: Optional[Text] = None,
default_image: Optional[Text] = None,
default_commands: Optional[List[Text]] = None,
**kwargs):
"""Constructs README.ml-pipelines-sdk.md Kubeflow V2 runner config.
Args:
project_id: GCP project ID to be used.
display_name: Optional human-readable pipeline name. Defaults to the
pipeline name passed into `KubeflowV2DagRunner.run()`.
default_image: The default TFX image to be used if not overriden by per
component specification.
default_commands: Optionally specifies the commands of the provided
container image. When not provided, the default `ENTRYPOINT` specified
in the docker image is used. Note: the commands here refers to the K8S
container command, which maps to Docker entrypoint field. If one
supplies command but no args are provided for the container, the
container will be invoked with the provided command, ignoring the
`ENTRYPOINT` and `CMD` defined in the Dockerfile. One can find more
details regarding the difference between K8S and Docker conventions at
https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes
**kwargs: Additional args passed to base PipelineConfig.
"""
super(KubeflowV2DagRunnerConfig, self).__init__(**kwargs)
self.project_id = project_id
self.display_name = display_name
self.default_image = default_image or _KUBEFLOW_TFX_IMAGE
if default_commands is None:
self.default_commands = _KUBEFLOW_TFX_CMD
else:
self.default_commands = default_commands
class KubeflowV2DagRunner(tfx_runner.TfxRunner):
"""Kubeflow V2 pipeline runner.
Builds README.ml-pipelines-sdk.md pipeline job spec in json format based on TFX pipeline DSL object.
"""
def __init__(self,
config: KubeflowV2DagRunnerConfig,
output_dir: Optional[Text] = None,
output_filename: Optional[Text] = None):
"""Constructs an KubeflowV2DagRunner for compiling pipelines.
Args:
config: An KubeflowV2DagRunnerConfig object to specify runtime
configuration when running the pipeline in Kubeflow.
output_dir: An optional output directory into which to output the pipeline
definition files. Defaults to the current working directory.
output_filename: An optional output file name for the pipeline definition
file. The file output format will be README.ml-pipelines-sdk.md JSON-serialized PipelineJob pb
message. Defaults to 'pipeline.json'.
"""
if not isinstance(config, KubeflowV2DagRunnerConfig):
raise TypeError('config must be type of KubeflowV2DagRunnerConfig.')
super(KubeflowV2DagRunner, self).__init__()
self._config = config
self._output_dir = output_dir or os.getcwd()
self._output_filename = output_filename or 'pipeline.json'
def run(self,
pipeline: tfx_pipeline.Pipeline,
parameter_values: Optional[Dict[Text, Any]] = None,
write_out: Optional[bool] = True) -> Dict[Text, Any]:
"""Compiles README.ml-pipelines-sdk.md pipeline DSL object into pipeline file.
Args:
pipeline: TFX pipeline object.
parameter_values: mapping from runtime parameter names to its values.
write_out: set to True to actually write out the file to the place
designated by output_dir and output_filename. Otherwise return the
JSON-serialized pipeline job spec.
Returns:
Returns the JSON pipeline job spec.
Raises:
RuntimeError: if trying to write out to README.ml-pipelines-sdk.md place occupied by an existing
file.
"""
# TODO(b/166343606): Support user-provided labels.
# TODO(b/169095387): Deprecate .run() method in favor of the unified API
# client.
display_name = (
self._config.display_name or pipeline.pipeline_info.pipeline_name)
pipeline_spec = pipeline_builder.PipelineBuilder(
tfx_pipeline=pipeline,
default_image=self._config.default_image,
default_commands=self._config.default_commands).build()
pipeline_spec.sdk_version = 'tfx-{}'.format(version.__version__)
pipeline_spec.schema_version = _SCHEMA_VERSION
runtime_config = pipeline_builder.RuntimeConfigBuilder(
pipeline_info=pipeline.pipeline_info,
parameter_values=parameter_values).build()
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_RUNNER: 'kubeflow_v2'}):
result = pipeline_pb2.PipelineJob(
display_name=display_name or pipeline.pipeline_info.pipeline_name,
labels=telemetry_utils.get_labels_dict(),
runtime_config=runtime_config)
result.pipeline_spec.update(json_format.MessageToDict(pipeline_spec))
pipeline_json_dict = json_format.MessageToDict(result)
if write_out:
if fileio.exists(self._output_dir) and not fileio.isdir(self._output_dir):
raise RuntimeError('Output path: %s is pointed to README.ml-pipelines-sdk.md file.' %
self._output_dir)
if not fileio.exists(self._output_dir):
fileio.makedirs(self._output_dir)
with fileio.open(
os.path.join(self._output_dir, self._output_filename), 'wb') as f:
f.write(json.dumps(pipeline_json_dict, sort_keys=True))
return pipeline_json_dict
compile = deprecation_utils.deprecated_alias(
deprecated_name='compile', name='run', func_or_class=run) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/kubeflow/v2/kubeflow_v2_dag_runner.py | 0.818519 | 0.205655 | kubeflow_v2_dag_runner.py | pypi |
"""Utility methods for Kubeflow V2 pipeline compilation."""
# TODO(b/172080784): Add more tests for this module.
import json
import os
from typing import Any, Dict, List, Mapping, Optional, Text, Type, Union
from tfx.dsl.io import fileio
from tfx.orchestration import data_types
from tfx.orchestration.kubeflow.v2 import parameter_utils
from tfx.orchestration.kubeflow.v2.proto import pipeline_pb2
from tfx.types import artifact
from tfx.types import channel
from tfx.types import standard_artifacts
from tfx.types.experimental import simple_artifacts
from tfx.utils import json_utils
import yaml
from google.protobuf import json_format
from google.protobuf import message
from ml_metadata.proto import metadata_store_pb2
# Key of TFX type path and name in artifact custom properties.
TFX_TYPE_KEY = 'tfx_type'
TYPE_NAME_KEY = 'type_name'
_SUPPORTED_STANDARD_ARTIFACT_TYPES = frozenset(
(standard_artifacts.ExampleAnomalies, standard_artifacts.ExampleStatistics,
standard_artifacts.Examples, standard_artifacts.HyperParameters,
standard_artifacts.InferenceResult, standard_artifacts.InfraBlessing,
standard_artifacts.Model, standard_artifacts.ModelBlessing,
standard_artifacts.ModelEvaluation, standard_artifacts.ModelRun,
standard_artifacts.PushedModel, standard_artifacts.Schema,
standard_artifacts.TransformGraph, standard_artifacts.TransformCache,
standard_artifacts.Float, standard_artifacts.Integer,
standard_artifacts.String, simple_artifacts.Metrics,
simple_artifacts.Statistics, simple_artifacts.Dataset,
simple_artifacts.File))
# TODO(b/156746891): Use IFTTT to sync import path with the definition in
# tfx.types.
TITLE_TO_CLASS_PATH = {
'tfx.ExampleAnomalies': 'tfx.types.standard_artifacts.ExampleAnomalies',
'tfx.ExampleStatistics': 'tfx.types.standard_artifacts.ExampleStatistics',
'tfx.Examples': 'tfx.types.standard_artifacts.Examples',
'tfx.HyperParameters': 'tfx.types.standard_artifacts.HyperParameters',
'tfx.InferenceResult': 'tfx.types.standard_artifacts.InferenceResult',
'tfx.InfraBlessing': 'tfx.types.standard_artifacts.InfraBlessing',
'tfx.Model': 'tfx.types.standard_artifacts.Model',
'tfx.ModelBlessing': 'tfx.types.standard_artifacts.ModelBlessing',
'tfx.ModelEvaluation': 'tfx.types.standard_artifacts.ModelEvaluation',
'tfx.ModelRun': 'tfx.types.standard_artifacts.ModelRun',
'tfx.PushedModel': 'tfx.types.standard_artifacts.PushedModel',
'tfx.Schema': 'tfx.types.standard_artifacts.Schema',
'tfx.TransformGraph': 'tfx.types.standard_artifacts.TransformGraph',
'tfx.TransformCache': 'tfx.types.standard_artifacts.TransformCache',
'tfx.Float': 'tfx.types.standard_artifacts.Float',
'tfx.Integer': 'tfx.types.standard_artifacts.Integer',
'tfx.String': 'tfx.types.standard_artifacts.String',
'tfx.Metrics': 'tfx.types.experimental.simple_artifacts.Metrics',
'tfx.Statistics': 'tfx.types.experimental.simple_artifacts.Statistics',
'tfx.Dataset': 'tfx.types.experimental.simple_artifacts.Dataset',
'tfx.File': 'tfx.types.experimental.simple_artifacts.File'
}
# Keywords used in artifact type YAML specs.
_YAML_INT_TYPE = 'int'
_YAML_STRING_TYPE = 'string'
_YAML_DOUBLE_TYPE = 'double'
def build_runtime_parameter_spec(
parameters: List[data_types.RuntimeParameter]
) -> Dict[str, pipeline_pb2.PipelineSpec.RuntimeParameter]:
"""Converts RuntimeParameters to mapping from names to proto messages."""
def to_message(parameter: data_types.RuntimeParameter):
"""Converts README.ml-pipelines-sdk.md RuntimeParameter to RuntimeParameter message."""
result = pipeline_pb2.PipelineSpec.RuntimeParameter()
# 1. Map the RuntimeParameter type to an enum in the proto definition.
if parameter.ptype == int or parameter.ptype == bool:
result.type = pipeline_pb2.PrimitiveType.INT
elif parameter.ptype == float:
result.type = pipeline_pb2.PrimitiveType.DOUBLE
elif parameter.ptype == Text:
result.type = pipeline_pb2.PrimitiveType.STRING
else:
raise TypeError(
'Unknown parameter type: {} found in parameter: {}'.format(
parameter.ptype, parameter.name))
# 2. Convert its default value.
default = value_converter(parameter.default)
if default is not None:
result.default_value.CopyFrom(default.constant_value)
return result
return {param.name: to_message(param) for param in parameters}
def build_input_parameter_spec(
dict_data: Dict[str, Any]
) -> Dict[str, pipeline_pb2.TaskInputsSpec.InputParameterSpec]:
"""Converts README.ml-pipelines-sdk.md dict into Kubeflow pipeline input parameter section."""
# Skip None value.
result = {}
for k, v in dict_data.items():
if v is not None:
result[k] = pipeline_pb2.TaskInputsSpec.InputParameterSpec(
runtime_value=value_converter(v))
return result
def _validate_properties_schema(
instance_schema: str,
properties: Optional[Mapping[str, artifact.PropertyType]] = None):
"""Validates the declared property types are consistent with the schema.
Args:
instance_schema: YAML string of the artifact property schema.
properties: The actual property schema of an Artifact Python class.
Raises:
KeyError: When actual property have additional properties than what's
specified in the YAML schema.
TypeError: When the same property is declared with different types in YAML
schema and the Artifact Python class.
"""
schema = yaml.safe_load(instance_schema)['properties'] or {}
properties = properties or {}
for k, v in properties.items():
if k not in schema:
raise KeyError('Actual property: {} not expected in artifact type schema:'
' {}'.format(k, schema))
# It's okay that we only validate the constant_value case, since
# RuntimeParameter's ptype should be validated during component
# instantiation.
# We only validate primitive-typed property for now because other types can
# have nested schema in the YAML spec as well.
if (schema[k]['type'] == _YAML_INT_TYPE and
v.type != artifact.PropertyType.INT or
schema[k]['type'] == _YAML_STRING_TYPE and
v.type != artifact.PropertyType.STRING or
schema[k]['type'] == _YAML_DOUBLE_TYPE and
v.type != artifact.PropertyType.FLOAT):
raise TypeError('Property type mismatched at {} for schema: {}. '
'Expected {} but got {}'.format(
k, schema, schema[k]['type'], v.type))
def build_output_artifact_spec(
channel_spec: channel.Channel
) -> pipeline_pb2.TaskOutputsSpec.OutputArtifactSpec:
"""Builds the Kubeflow pipeline output artifact spec from TFX channel spec."""
artifact_instance = channel_spec.type()
result = pipeline_pb2.TaskOutputsSpec.OutputArtifactSpec()
result.artifact_type.CopyFrom(
pipeline_pb2.ArtifactTypeSchema(
instance_schema=get_artifact_schema(artifact_instance)))
for k, v in convert_from_tfx_properties(
artifact_instance.mlmd_artifact.properties).items():
result.properties[k].CopyFrom(v)
_validate_properties_schema(
instance_schema=result.artifact_type.instance_schema,
properties=channel_spec.type.PROPERTIES)
for k, v in convert_from_tfx_properties(
artifact_instance.mlmd_artifact.custom_properties).items():
result.custom_properties[k].CopyFrom(v)
return result
def value_converter(
tfx_value: Any) -> Optional[pipeline_pb2.ValueOrRuntimeParameter]:
"""Converts TFX/MLMD values into Kubeflow pipeline ValueOrRuntimeParameter."""
if tfx_value is None:
return None
result = pipeline_pb2.ValueOrRuntimeParameter()
if isinstance(tfx_value, (int, float, str, Text)):
result.constant_value.CopyFrom(get_kubeflow_value(tfx_value))
elif isinstance(tfx_value, (Dict, List)):
result.constant_value.CopyFrom(
pipeline_pb2.Value(string_value=json.dumps(tfx_value)))
elif isinstance(tfx_value, data_types.RuntimeParameter):
# Attach the runtime parameter to the context.
parameter_utils.attach_parameter(tfx_value)
result.runtime_parameter = tfx_value.name
elif isinstance(tfx_value, metadata_store_pb2.Value):
if tfx_value.WhichOneof('value') == 'int_value':
result.constant_value.CopyFrom(
pipeline_pb2.Value(int_value=tfx_value.int_value))
elif tfx_value.WhichOneof('value') == 'double_value':
result.constant_value.CopyFrom(
pipeline_pb2.Value(double_value=tfx_value.double_value))
elif tfx_value.WhichOneof('value') == 'string_value':
result.constant_value.CopyFrom(
pipeline_pb2.Value(string_value=tfx_value.string_value))
elif isinstance(tfx_value, message.Message):
result.constant_value.CopyFrom(
pipeline_pb2.Value(
string_value=json_format.MessageToJson(
message=tfx_value, sort_keys=True)))
else:
# By default will attempt to encode the object using json_utils.dumps.
result.constant_value.CopyFrom(
pipeline_pb2.Value(string_value=json_utils.dumps(tfx_value)))
return result
def get_kubeflow_value(
tfx_value: Union[int, float, str, Text]) -> Optional[pipeline_pb2.Value]:
"""Converts TFX/MLMD values into Kubeflow pipeline Value proto message."""
if tfx_value is None:
return None
result = pipeline_pb2.Value()
if isinstance(tfx_value, int):
result.int_value = tfx_value
elif isinstance(tfx_value, float):
result.double_value = tfx_value
elif isinstance(tfx_value, (str, Text)):
result.string_value = tfx_value
else:
raise TypeError('Got unknown type of value: {}'.format(tfx_value))
return result
def get_mlmd_value(
kubeflow_value: pipeline_pb2.Value) -> metadata_store_pb2.Value:
"""Converts Kubeflow pipeline Value pb message to MLMD Value."""
result = metadata_store_pb2.Value()
if kubeflow_value.WhichOneof('value') == 'int_value':
result.int_value = kubeflow_value.int_value
elif kubeflow_value.WhichOneof('value') == 'double_value':
result.double_value = kubeflow_value.double_value
elif kubeflow_value.WhichOneof('value') == 'string_value':
result.string_value = kubeflow_value.string_value
else:
raise TypeError('Get unknown type of value: {}'.format(kubeflow_value))
return result
def get_artifact_schema(artifact_instance: artifact.Artifact) -> Text:
"""Gets the YAML schema string associated with the artifact type."""
if isinstance(artifact_instance, tuple(_SUPPORTED_STANDARD_ARTIFACT_TYPES)):
# For supported first-party artifact types, get the built-in schema yaml per
# its type name.
schema_path = os.path.join(
os.path.dirname(__file__), 'artifact_types',
'{}.yaml'.format(artifact_instance.type_name))
return fileio.open(schema_path, 'rb').read()
else:
# Otherwise, fall back to the generic `Artifact` type schema.
# To recover the Python type object at runtime, the class import path will
# be encoded as the schema title.
# Read the generic artifact schema template.
schema_path = os.path.join(
os.path.dirname(__file__), 'artifact_types', 'Artifact.yaml')
data = yaml.safe_load(fileio.open(schema_path, 'rb').read())
# Encode class import path.
data['title'] = '%s.%s' % (artifact_instance.__class__.__module__,
artifact_instance.__class__.__name__)
return yaml.dump(data, sort_keys=False)
def get_artifact_title(artifact_type: Type[artifact.Artifact]) -> Text:
"""Gets the schema title from the artifact python class."""
if artifact_type in _SUPPORTED_STANDARD_ARTIFACT_TYPES:
return 'tfx.{}'.format(artifact_type.__name__)
return 'tfx.Artifact'
def convert_from_tfx_properties(
tfx_properties) -> Dict[Any, pipeline_pb2.ValueOrRuntimeParameter]:
"""Converts (custom) properties to mapping to ValueOrRuntimeParameter pb.
Args:
tfx_properties: README.ml-pipelines-sdk.md mapping field in README.ml-pipelines-sdk.md proto message, from string to
pipeline.Value.
Returns:
A mapping from string to pipeline_spec.ValueOrRuntimeParameter containing
the same information.
"""
return {k: value_converter(v) for k, v in tfx_properties.items()} | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/kubeflow/v2/compiler_utils.py | 0.702734 | 0.252822 | compiler_utils.py | pypi |
"""Builder for Kubeflow pipelines level proto spec."""
import re
from typing import Any, Dict, List, Optional, Text
from tfx.orchestration import data_types
from tfx.orchestration import pipeline
from tfx.orchestration.kubeflow.v2 import compiler_utils
from tfx.orchestration.kubeflow.v2 import parameter_utils
from tfx.orchestration.kubeflow.v2 import step_builder
from tfx.orchestration.kubeflow.v2.proto import pipeline_pb2
from google.protobuf import json_format
_LEGAL_NAME_PATTERN = re.compile(r'[README.ml-pipelines-sdk.md-z0-9][README.ml-pipelines-sdk.md-z0-9-]{0,127}')
def _check_name(name: Text) -> None:
"""Checks the user-provided pipeline name."""
if not _LEGAL_NAME_PATTERN.fullmatch(name):
raise ValueError('User provided pipeline name % is illegal, please follow '
'the pattern of [README.ml-pipelines-sdk.md-z0-9][README.ml-pipelines-sdk.md-z0-9-]{0,127}.')
class RuntimeConfigBuilder(object):
"""Kubeflow pipelines RuntimeConfig builder."""
def __init__(self, pipeline_info: data_types.PipelineInfo,
parameter_values: Dict[Text, Any]):
"""Creates README.ml-pipelines-sdk.md RuntimeConfigBuilder object.
Args:
pipeline_info: README.ml-pipelines-sdk.md TFX pipeline info object, containing pipeline root info.
parameter_values: mapping from runtime parameter names to its values.
"""
self._pipeline_root = pipeline_info.pipeline_root
self._parameter_values = parameter_values or {}
def build(self) -> pipeline_pb2.PipelineJob.RuntimeConfig:
"""Build README.ml-pipelines-sdk.md RuntimeConfig proto."""
return pipeline_pb2.PipelineJob.RuntimeConfig(
gcs_output_directory=self._pipeline_root,
parameters={
k: compiler_utils.get_kubeflow_value(v)
for k, v in self._parameter_values.items()
})
class PipelineBuilder(object):
"""Kubeflow pipelines spec builder.
Constructs README.ml-pipelines-sdk.md pipeline spec based on the TFX pipeline object.
"""
def __init__(self,
tfx_pipeline: pipeline.Pipeline,
default_image: Text,
default_commands: Optional[List[Text]] = None):
"""Creates README.ml-pipelines-sdk.md PipelineBuilder object.
A PipelineBuilder takes in README.ml-pipelines-sdk.md TFX pipeline object. Then
PipelineBuilder.build() outputs Kubeflow PipelineSpec proto.
Args:
tfx_pipeline: A TFX pipeline object.
default_image: Specifies the TFX container image used in CMLE container
tasks. Can be overriden by per component specification.
default_commands: Optionally specifies the commands of the provided
container image. When not provided, the default `ENTRYPOINT` specified
in the docker image is used. Note: the commands here refers to the K8S
container command, which maps to Docker entrypoint field. If one
supplies command but no args are provided for the container, the
container will be invoked with the provided command, ignoring the
`ENTRYPOINT` and `CMD` defined in the Dockerfile. One can find more
details regarding the difference between K8S and Docker conventions at
https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes
"""
self._pipeline_info = tfx_pipeline.pipeline_info
self._pipeline = tfx_pipeline
self._default_image = default_image
self._default_commands = default_commands
def build(self) -> pipeline_pb2.PipelineSpec:
"""Build README.ml-pipelines-sdk.md pipeline PipelineSpec."""
_check_name(self._pipeline_info.pipeline_name)
deployment_config = pipeline_pb2.PipelineDeploymentConfig()
pipeline_info = pipeline_pb2.PipelineInfo(
name=self._pipeline_info.pipeline_name)
tasks = []
# Map from (producer component id, output key) to (new producer component
# id, output key)
channel_redirect_map = {}
with parameter_utils.ParameterContext() as pc:
for component in self._pipeline.components:
# Here the topological order of components is required.
# If README.ml-pipelines-sdk.md channel redirection is needed, redirect mapping is expected to be
# available because the upstream node (which is the cause for
# redirecting) is processed before the downstream consumer nodes.
built_tasks = step_builder.StepBuilder(
node=component,
deployment_config=deployment_config,
image=self._default_image,
image_cmds=self._default_commands,
beam_pipeline_args=self._pipeline.beam_pipeline_args,
enable_cache=self._pipeline.enable_cache,
pipeline_info=self._pipeline_info,
channel_redirect_map=channel_redirect_map).build()
tasks.extend(built_tasks)
result = pipeline_pb2.PipelineSpec(
pipeline_info=pipeline_info,
tasks=tasks,
runtime_parameters=compiler_utils.build_runtime_parameter_spec(
pc.parameters))
result.deployment_spec.update(json_format.MessageToDict(deployment_config))
return result | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/kubeflow/v2/pipeline_builder.py | 0.943458 | 0.31567 | pipeline_builder.py | pypi |
"""Driver for file-based ExampleGen components in Kubeflow V2 runner."""
import argparse
import os
from typing import Any, Dict, List, Optional
from absl import logging
from tfx.components.example_gen import driver
from tfx.components.example_gen import utils
from tfx.dsl.io import fileio
from tfx.orchestration.kubeflow.v2.container import kubeflow_v2_entrypoint_utils
from tfx.orchestration.kubeflow.v2.proto import pipeline_pb2
from tfx.proto import example_gen_pb2
from tfx.types import artifact
from tfx.types import artifact_utils
from tfx.utils import proto_utils
from google.protobuf import json_format
from tensorflow.python.platform import app # pylint: disable=g-direct-tensorflow-import
def _run_driver(exec_properties: Dict[str, Any],
outputs_dict: Dict[str, List[artifact.Artifact]],
output_metadata_uri: str,
name_from_id: Optional[Dict[int, str]] = None) -> None:
"""Runs the driver, writing its output as README.ml-pipelines-sdk.md ExecutorOutput proto.
The main goal of this driver is to calculate the span and fingerprint of input
data, allowing for the executor invocation to be skipped if the ExampleGen
component has been previously run on the same data with the same
configuration. This span and fingerprint are added as new custom execution
properties to an ExecutorOutput proto and written to README.ml-pipelines-sdk.md GCS path. The CAIP
pipelines system reads this file and updates MLMD with the new execution
properties.
Args:
exec_properties:
These are required to contain the following properties:
'input_base_uri': A path from which files will be read and their
span/fingerprint calculated.
'input_config': A json-serialized tfx.proto.example_gen_pb2.InputConfig
proto message.
See https://www.tensorflow.org/tfx/guide/examplegen for more details.
'output_config': A json-serialized tfx.proto.example_gen_pb2.OutputConfig
proto message.
See https://www.tensorflow.org/tfx/guide/examplegen for more details.
outputs_dict: The mapping of the output artifacts.
output_metadata_uri: A path at which an ExecutorOutput message will be
written with updated execution properties and output artifacts. The CAIP
Pipelines service will update the task's properties and artifacts prior to
running the executor.
name_from_id: Optional. Mapping from the converted int-typed id to str-typed
runtime artifact name, which should be unique.
"""
if name_from_id is None:
name_from_id = {}
logging.set_verbosity(logging.INFO)
logging.info('exec_properties = %s\noutput_metadata_uri = %s',
exec_properties, output_metadata_uri)
input_base_uri = exec_properties[utils.INPUT_BASE_KEY]
input_config = example_gen_pb2.Input()
proto_utils.json_to_proto(exec_properties[utils.INPUT_CONFIG_KEY],
input_config)
# TODO(b/161734559): Support range config.
fingerprint, select_span, version = utils.calculate_splits_fingerprint_span_and_version(
input_base_uri, input_config.splits)
logging.info('Calculated span: %s', select_span)
logging.info('Calculated fingerprint: %s', fingerprint)
exec_properties[utils.SPAN_PROPERTY_NAME] = select_span
exec_properties[utils.FINGERPRINT_PROPERTY_NAME] = fingerprint
exec_properties[utils.VERSION_PROPERTY_NAME] = version
if utils.EXAMPLES_KEY not in outputs_dict:
raise ValueError('Example artifact was missing in the ExampleGen outputs.')
example_artifact = artifact_utils.get_single_instance(
outputs_dict[utils.EXAMPLES_KEY])
driver.update_output_artifact(
exec_properties=exec_properties,
output_artifact=example_artifact.mlmd_artifact)
# Log the output metadata file
output_metadata = pipeline_pb2.ExecutorOutput()
output_metadata.parameters[
utils.FINGERPRINT_PROPERTY_NAME].string_value = fingerprint
output_metadata.parameters[utils.SPAN_PROPERTY_NAME].string_value = str(
select_span)
output_metadata.parameters[
utils.INPUT_CONFIG_KEY].string_value = json_format.MessageToJson(
input_config)
output_metadata.artifacts[utils.EXAMPLES_KEY].artifacts.add().CopyFrom(
kubeflow_v2_entrypoint_utils.to_runtime_artifact(example_artifact,
name_from_id))
fileio.makedirs(os.path.dirname(output_metadata_uri))
with fileio.open(output_metadata_uri, 'wb') as f:
f.write(json_format.MessageToJson(output_metadata, sort_keys=True))
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'--json_serialized_invocation_args',
type=str,
required=True,
help='JSON-serialized metadata for this execution.')
args, _ = parser.parse_known_args(argv)
executor_input = pipeline_pb2.ExecutorInput()
json_format.Parse(
args.json_serialized_invocation_args,
executor_input,
ignore_unknown_fields=True)
name_from_id = {}
exec_properties = kubeflow_v2_entrypoint_utils.parse_execution_properties(
executor_input.inputs.parameters)
outputs_dict = kubeflow_v2_entrypoint_utils.parse_raw_artifact_dict(
executor_input.outputs.artifacts, name_from_id)
_run_driver(exec_properties, outputs_dict, executor_input.outputs.output_file,
name_from_id)
if __name__ == '__main__':
app.run(main=main) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/kubeflow/v2/file_based_example_gen/driver.py | 0.7324 | 0.269064 | driver.py | pypi |
"""Entrypoint for invoking TFX components in Kubeflow V2 runner."""
import argparse
import os
from typing import List
from absl import logging
from tfx.components.evaluator import executor as evaluator_executor
from tfx.dsl.components.base import base_executor
from tfx.dsl.io import fileio
from tfx.orchestration.kubeflow.v2.container import kubeflow_v2_entrypoint_utils
from tfx.orchestration.kubeflow.v2.proto import pipeline_pb2
from tfx.types import artifact_utils
from tfx.types.standard_component_specs import BLESSING_KEY
from tfx.utils import import_utils
from google.protobuf import json_format
from tensorflow.python.platform import app # pylint: disable=g-direct-tensorflow-import
# TODO(b/166202742): Consolidate container entrypoint with Kubeflow runner.
# TODO(b/154046602): Consider put this function into tfx/orchestration, and
# unify the code paths to call into component executors.
def _run_executor(args: argparse.Namespace, beam_args: List[str]) -> None:
"""Selects README.ml-pipelines-sdk.md particular executor and run it based on name.
Args:
args:
--executor_class_path: The import path of the executor class.
--json_serialized_invocation_args: Full JSON-serialized parameters for
this execution.
beam_args: Optional parameter that maps to the optional_pipeline_args
parameter in the pipeline, which provides additional configuration options
for apache-beam and tensorflow.logging.
For more about the beam arguments please refer to:
https://cloud.google.com/dataflow/docs/guides/specifying-exec-params
"""
logging.set_verbosity(logging.INFO)
# Rehydrate inputs/outputs/exec_properties from the serialized metadata.
executor_input = pipeline_pb2.ExecutorInput()
json_format.Parse(
args.json_serialized_invocation_args,
executor_input,
ignore_unknown_fields=True)
inputs_dict = executor_input.inputs.artifacts
outputs_dict = executor_input.outputs.artifacts
inputs_parameter = executor_input.inputs.parameters
name_from_id = {}
inputs = kubeflow_v2_entrypoint_utils.parse_raw_artifact_dict(
inputs_dict, name_from_id)
outputs = kubeflow_v2_entrypoint_utils.parse_raw_artifact_dict(
outputs_dict, name_from_id)
exec_properties = kubeflow_v2_entrypoint_utils.parse_execution_properties(
inputs_parameter)
logging.info('Executor %s do: inputs: %s, outputs: %s, exec_properties: %s',
args.executor_class_path, inputs, outputs, exec_properties)
executor_cls = import_utils.import_class_by_path(args.executor_class_path)
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_args, unique_id='')
executor = executor_cls(executor_context)
logging.info('Starting executor')
executor.Do(inputs, outputs, exec_properties)
# TODO(b/169583143): Remove this workaround when TFX migrates to use str-typed
# id/name to identify artifacts.
# Convert ModelBlessing artifact to use managed MLMD resource name.
if (issubclass(executor_cls, evaluator_executor.Executor) and
BLESSING_KEY in outputs):
# Parse the parent prefix for managed MLMD resource name.
kubeflow_v2_entrypoint_utils.refactor_model_blessing(
artifact_utils.get_single_instance(outputs[BLESSING_KEY]),
name_from_id)
# Log the output metadata to README.ml-pipelines-sdk.md file. So that it can be picked up by MP.
metadata_uri = executor_input.outputs.output_file
executor_output = pipeline_pb2.ExecutorOutput()
for k, v in kubeflow_v2_entrypoint_utils.translate_executor_output(
outputs, name_from_id).items():
executor_output.artifacts[k].CopyFrom(v)
fileio.makedirs(os.path.dirname(metadata_uri))
with fileio.open(metadata_uri, 'wb') as f:
f.write(json_format.MessageToJson(executor_output))
def main(argv):
"""Parses the arguments for _run_executor() then invokes it.
Args:
argv: Unparsed arguments for run_executor.py. Known argument names include
--executor_class_path: Python class of executor in format of
<module>.<class>.
--json_serialized_invocation_args: Full JSON-serialized parameters for
this execution. The remaining part of the arguments will be parsed as
the beam args used by each component executors. Some commonly used beam
args are as follows:
--runner: The beam pipeline runner environment. Can be DirectRunner (for
running locally) or DataflowRunner (for running on GCP Dataflow
service).
--project: The GCP project ID. Neede when runner==DataflowRunner
--direct_num_workers: Number of threads or subprocesses executing the
work load.
For more about the beam arguments please refer to:
https://cloud.google.com/dataflow/docs/guides/specifying-exec-params
Returns:
None
Raises:
None
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--executor_class_path',
type=str,
required=True,
help='Python class of executor in format of <module>.<class>.')
parser.add_argument(
'--json_serialized_invocation_args',
type=str,
required=True,
help='JSON-serialized metadata for this execution.')
args, beam_args = parser.parse_known_args(argv)
_run_executor(args, beam_args)
if __name__ == '__main__':
app.run(main=main) | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/kubeflow/v2/container/kubeflow_v2_run_executor.py | 0.624294 | 0.334019 | kubeflow_v2_run_executor.py | pypi |
"""Component that launches CAIP custom training job with flexible interface."""
from typing import Any, Dict, List, Optional, Text
from tfx.dsl.component.experimental import component_utils
from tfx.dsl.component.experimental import placeholders
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.orchestration.kubeflow.v2.components.experimental import ai_platform_training_executor
from tfx.types import channel_utils
from tfx.types import component_spec
from tfx.utils import json_utils
def create_ai_platform_training(
name: Text,
project_id: Text,
region: Optional[Text] = None,
job_id: Optional[Text] = None,
image_uri: Optional[Text] = None,
args: Optional[List[placeholders.CommandlineArgumentType]] = None,
# TODO(jxzheng): support Python training spec
scale_tier: Optional[Text] = None,
training_input: Optional[Dict[Text, Any]] = None,
labels: Optional[Dict[Text, Text]] = None,
inputs: Dict[Text, Any] = None,
outputs: Dict[Text, Any] = None,
parameters: Dict[Text, Any] = None,
) -> base_component.BaseComponent:
"""Creates README.ml-pipelines-sdk.md pipeline step that launches README.ml-pipelines-sdk.md AIP training job.
The generated TFX component will have README.ml-pipelines-sdk.md component spec specified dynamically,
through inputs/outputs/parameters in the following format:
- inputs: A mapping from input name to the upstream channel connected. The
artifact type of the channel will be automatically inferred.
- outputs: A mapping from output name to the associated artifact type.
- parameters: A mapping from execution property names to its associated value.
Only primitive typed values are supported. Note that RuntimeParameter is
not supported yet.
For example:
```
create_ai_platform_training(
...
inputs: {
# Assuming there is an upstream node example_gen, with an output
# 'examples' of the type Examples.
'examples': example_gen.outputs['examples'],
},
outputs: {
'model': standard_artifacts.Model,
},
parameters: {
'n_steps': 100,
'optimizer': 'sgd',
}
...
)
```
will generate README.ml-pipelines-sdk.md component instance with README.ml-pipelines-sdk.md component spec equivalent to:
```
class MyComponentSpec(ComponentSpec):
INPUTS = {
'examples': ChannelParameter(type=standard_artifacts.Examples)
}
OUTPUTS = {
'model': ChannelParameter(type=standard_artifacts.Model)
}
PARAMETERS = {
'n_steps': ExecutionParameter(type=int),
'optimizer': ExecutionParameter(type=str)
}
```
with its input 'examples' is connected to the example_gen output, and
execution properties specified as 100 and 'sgd' respectively.
Example usage of the component:
```
# A single node training job.
my_train = create_ai_platform_training(
name='my_training_step',
project_id='my-project',
region='us-central1',
image_uri='gcr.io/my-project/caip-training-test:latest',
'args': [
'--examples',
placeholders.InputUriPlaceholder('examples'),
'--n-steps',
placeholders.InputValuePlaceholder('n_step'),
'--output-location',
placeholders.OutputUriPlaceholder('model')
]
scale_tier='BASIC_GPU',
inputs={'examples': example_gen.outputs['examples']},
outputs={
'model': standard_artifacts.Model
},
parameters={'n_step': 100}
)
# More complex setting can be expressed by providing training_input
# directly.
my_distributed_train = create_ai_platform_training(
name='my_training_step',
project_id='my-project',
training_input={
'scaleTier':
'CUSTOM',
'region':
'us-central1',
'masterType': 'n1-standard-8',
'masterConfig': {
'imageUri': 'gcr.io/my-project/my-dist-training:latest'
},
'workerType': 'n1-standard-8',
'workerCount': 8,
'workerConfig': {
'imageUri': 'gcr.io/my-project/my-dist-training:latest'
},
'args': [
'--examples',
placeholders.InputUriPlaceholder('examples'),
'--n-steps',
placeholders.InputValuePlaceholder('n_step'),
'--output-location',
placeholders.OutputUriPlaceholder('model')
]
},
inputs={'examples': example_gen.outputs['examples']},
outputs={'model': Model},
parameters={'n_step': 100}
)
```
Args:
name: name of the component. This is needed to construct the component spec
and component class dynamically as well.
project_id: the GCP project under which the AIP training job will be
running.
region: GCE region where the AIP training job will be running.
job_id: the unique ID of the job. Default to 'tfx_%Y%m%d%H%M%S'
image_uri: the GCR location of the container image, which will be used to
execute the training program. If the same field is specified in
training_input, the latter overrides image_uri.
args: command line arguments that will be passed into the training program.
Users can use placeholder semantics as in
tfx.dsl.component.experimental.container_component to wire the args with
component inputs/outputs/parameters.
scale_tier: Cloud ML resource requested by the job. See
https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.jobs#ScaleTier
training_input: full training job spec. This field overrides other
specifications if applicable. This field follows the
[TrainingInput](https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.jobs#traininginput)
schema.
labels: user-specified label attached to the job.
inputs: the dict of component inputs.
outputs: the dict of component outputs.
parameters: the dict of component parameters, aka, execution properties.
Returns:
A component instance that represents the AIP job in the DSL.
Raises:
ValueError: when image_uri is missing and masterConfig is not specified in
training_input, or when region is missing and training_input
does not provide region either.
TypeError: when non-primitive parameters are specified.
"""
training_input = training_input or {}
if scale_tier and not training_input.get('scale_tier'):
training_input['scaleTier'] = scale_tier
if not training_input.get('masterConfig'):
# If no replica config is specified, create README.ml-pipelines-sdk.md default one.
if not image_uri:
raise ValueError('image_uri is required when masterConfig is not '
'explicitly specified in training_input.')
training_input['masterConfig'] = {'imageUri': image_uri}
# Note: A custom entrypoint can be set to training_input['masterConfig']
# through key 'container_command'.
training_input['args'] = args
if not training_input.get('region'):
if not region:
raise ValueError('region is required when it is not set in '
'training_input.')
training_input['region'] = region
# Squash training_input, project, job_id, and labels into an exec property
# namely 'aip_training_config'.
aip_training_config = {
ai_platform_training_executor.PROJECT_CONFIG_KEY: project_id,
ai_platform_training_executor.TRAINING_INPUT_CONFIG_KEY: training_input,
ai_platform_training_executor.JOB_ID_CONFIG_KEY: job_id,
ai_platform_training_executor.LABELS_CONFIG_KEY: labels,
}
aip_training_config_str = json_utils.dumps(aip_training_config)
# Construct the component spec.
if inputs is None:
inputs = {}
if outputs is None:
outputs = {}
if parameters is None:
parameters = {}
input_channel_parameters = {}
output_channel_parameters = {}
output_channels = {}
execution_parameters = {
ai_platform_training_executor.CONFIG_KEY:
component_spec.ExecutionParameter(type=(str, Text))
}
for input_name, single_channel in inputs.items():
# Infer the type of input channels based on the channels passed in.
# TODO(b/155804245) Sanitize the names so that they're valid python names
input_channel_parameters[input_name] = (
component_spec.ChannelParameter(type=single_channel.type))
for output_name, channel_type in outputs.items():
# TODO(b/155804245) Sanitize the names so that they're valid python names
output_channel_parameters[output_name] = (
component_spec.ChannelParameter(type=channel_type))
artifact = channel_type()
channel = channel_utils.as_channel([artifact])
output_channels[output_name] = channel
# TODO(jxzheng): Support RuntimeParameter as parameters.
for param_name, single_parameter in parameters.items():
# Infer the type of parameters based on the parameters passed in.
# TODO(b/155804245) Sanitize the names so that they're valid python names
if not isinstance(single_parameter, (int, float, Text, bytes)):
raise TypeError(
'Parameter can only be int/float/str/bytes, got {}'.format(
type(single_parameter)))
execution_parameters[param_name] = (
component_spec.ExecutionParameter(type=type(single_parameter)))
default_init_args = {
**inputs,
**output_channels,
**parameters, ai_platform_training_executor.CONFIG_KEY:
aip_training_config_str
}
tfx_component_class = component_utils.create_tfx_component_class(
name=name,
tfx_executor_spec=executor_spec.ExecutorClassSpec(
ai_platform_training_executor.AiPlatformTrainingExecutor),
input_channel_parameters=input_channel_parameters,
output_channel_parameters=output_channel_parameters,
execution_parameters=execution_parameters,
default_init_args=default_init_args)
return tfx_component_class() | /rflow_tfx-1.1.18-py3-none-any.whl/tfx/orchestration/kubeflow/v2/components/experimental/ai_platform_training_component.py | 0.837055 | 0.725223 | ai_platform_training_component.py | pypi |
# %%
import pandas as pd
import numpy as np
import plotly.express as px
# %%
def rfm_score_generator(data,totalPaid, day_bought,customerID, invoiceNo = "", format_ = '%d.%m.%Y', R_w=0.15, F_w=0.28, M_w =0.57):
"""
Parameters
----------
data :
totalPaid :
day_bought :
customerID :
invoiceNo :
(Default value = "")
format_ :
(Default value = '%d.%m.%Y')
R_w :
(Default value = 0.15)
F_w :
(Default value = 0.28)
M_w :
(Default value = 0.57)
Returns
-------
"""
if invoiceNo == "":
invoiceNo = day_bought
data[day_bought] = pd.to_datetime(data[day_bought],format= format_)
data["Frequency"]= data.groupby(customerID)[invoiceNo].transform('nunique')
RFM = data.groupby(customerID).agg({day_bought: "max", "Frequency": "max", totalPaid : "sum"}).reset_index()
recent_date = RFM[day_bought].max()
RFM['Recency'] = RFM[day_bought].apply(lambda x: (recent_date - x).days)
RFM.drop(columns = day_bought, inplace = True)
RFM['R_rank'] = RFM['Recency'].rank(ascending=False)
RFM['F_rank'] = RFM['Frequency'].rank(ascending=True)
RFM['M_rank'] = RFM[totalPaid].rank(ascending=True)
RFM['R_rank_norm'] = (RFM['R_rank']/RFM['R_rank'].max())*100
RFM['F_rank_norm'] = (RFM['F_rank']/RFM['F_rank'].max())*100
RFM['M_rank_norm'] = (RFM['F_rank']/RFM['M_rank'].max())*100
RFM['RFM_Score'] = R_w * RFM['R_rank_norm']+ F_w * RFM['F_rank_norm']+ M_w * RFM['M_rank_norm']
RFM['RFM_Score'] *= 0.05 #rank 5 is the top
RFM = RFM.round(2)
RFM["Customer_segment"] = np.where(RFM['RFM_Score'] >
4.5, "Top",
(np.where(
RFM['RFM_Score'] > 4,
"High value",
(np.where(
RFM['RFM_Score'] > 3,
"Medium Value",
np.where(RFM['RFM_Score'] > 1.6,
'Low Value', 'Lost'))))))
return(RFM)
# %%
def rfm_tree_map(data,totalPaid, day_bought,customerID, invoiceNo = "", format_ = '%d.%m.%Y', R_w=0.15, F_w=0.28, M_w =0.57):
"""
Parameters
----------
data :
totalPaid :
day_bought :
customerID :
invoiceNo :
(Default value = "")
format_ :
(Default value = '%d.%m.%Y')
R_w :
(Default value = 0.15)
F_w :
(Default value = 0.28)
M_w :
(Default value = 0.57)
Returns
-------
"""
if invoiceNo == "":
invoiceNo = day_bought
data[day_bought] = pd.to_datetime(data[day_bought],format= format_)
data["Frequency"]= data.groupby(customerID)[invoiceNo].transform('nunique')
RFM = data.groupby(customerID).agg({day_bought: "max", "Frequency": "max", totalPaid : "sum"}).reset_index()
recent_date = RFM[day_bought].max()
RFM['Recency'] = RFM[day_bought].apply(lambda x: (recent_date - x).days)
RFM.drop(columns = day_bought, inplace = True)
RFM['R_rank'] = RFM['Recency'].rank(ascending=False)
RFM['F_rank'] = RFM['Frequency'].rank(ascending=True)
RFM['M_rank'] = RFM[totalPaid].rank(ascending=True)
RFM['R_rank_norm'] = (RFM['R_rank']/RFM['R_rank'].max())*100
RFM['F_rank_norm'] = (RFM['F_rank']/RFM['F_rank'].max())*100
RFM['M_rank_norm'] = (RFM['F_rank']/RFM['M_rank'].max())*100
RFM['RFM_Score'] = R_w * RFM['R_rank_norm']+ F_w * RFM['F_rank_norm']+ M_w * RFM['M_rank_norm']
RFM['RFM_Score'] *= 0.05 #rank 5 is the top
RFM = RFM.round(2)
RFM["Customer_segment"] = np.where(RFM['RFM_Score'] >
4.5, "Top",
(np.where(
RFM['RFM_Score'] > 4,
"High value",
(np.where(
RFM['RFM_Score'] > 3,
"Medium Value",
np.where(RFM['RFM_Score'] > 1.6,
'Low Value', 'Lost'))))))
RFM_1 = RFM.groupby("Customer_segment")[[customerID]].count().reset_index()
RFM_1.rename(columns = {customerID: "Size Segment"}, inplace = True)
fig1 = px.treemap(
RFM_1,
path=[px.Constant("<br>"), 'Customer_segment'],
values='Size Segment',
color='Size Segment',
color_continuous_scale=px.colors.sequential.matter,
custom_data=RFM_1[['Customer_segment', 'Size Segment']],
)
fig1.show()
return
# %%
def rfm_pie_chart(data,totalPaid, day_bought,customerID, invoiceNo = "", format_ = '%d.%m.%Y', R_w=0.15, F_w=0.28, M_w =0.57):
"""
Parameters
----------
data :
totalPaid :
day_bought :
customerID :
invoiceNo :
(Default value = "")
format_ :
(Default value = '%d.%m.%Y')
R_w :
(Default value = 0.15)
F_w :
(Default value = 0.28)
M_w :
(Default value = 0.57)
Returns
-------
"""
if invoiceNo == "":
invoiceNo = day_bought
data[day_bought] = pd.to_datetime(data[day_bought],format= format_)
data["Frequency"]= data.groupby(customerID)[invoiceNo].transform('nunique')
RFM = data.groupby(customerID).agg({day_bought: "max", "Frequency": "max", totalPaid : "sum"}).reset_index()
recent_date = RFM[day_bought].max()
RFM['Recency'] = RFM[day_bought].apply(lambda x: (recent_date - x).days)
RFM.drop(columns = day_bought, inplace = True)
RFM['R_rank'] = RFM['Recency'].rank(ascending=False)
RFM['F_rank'] = RFM['Frequency'].rank(ascending=True)
RFM['M_rank'] = RFM[totalPaid].rank(ascending=True)
RFM['R_rank_norm'] = (RFM['R_rank']/RFM['R_rank'].max())*100
RFM['F_rank_norm'] = (RFM['F_rank']/RFM['F_rank'].max())*100
RFM['M_rank_norm'] = (RFM['F_rank']/RFM['M_rank'].max())*100
RFM['RFM_Score'] = R_w * RFM['R_rank_norm']+ F_w * RFM['F_rank_norm']+ M_w * RFM['M_rank_norm']
RFM['RFM_Score'] *= 0.05 #rank 5 is the top
RFM = RFM.round(2)
RFM["Customer_segment"] = np.where(RFM['RFM_Score'] >
4.5, "Top",
(np.where(
RFM['RFM_Score'] > 4,
"High value",
(np.where(
RFM['RFM_Score'] > 3,
"Medium Value",
np.where(RFM['RFM_Score'] > 1.6,
'Low Value', 'Lost'))))))
RFM_1 = RFM.groupby("Customer_segment")[[customerID]].count().reset_index()
RFM_1.rename(columns = {customerID: "Size Segment"}, inplace = True)
fig = px.pie(RFM_1, values='Size Segment', names='Customer_segment', title='Customer Segment Pie Chart', color_discrete_sequence=px.colors.sequential.matter)
fig.show()
return | /rfm_segmentation-0.1.0.tar.gz/rfm_segmentation-0.1.0/rfm_segmentation/rfm_segmentation.py | 0.719581 | 0.325333 | rfm_segmentation.py | pypi |
<br>
<br>
<div align="left">
<img style="width:400px" src="https://github.com/sonwanesuresh95/rfm/blob/main/example_/rfm-logos_black.png"><br>
</div>
<br>
[](https://pypi.org/project/rfm/)

[](https://pypi.org/project/rfm/)
[](https://github.com/sonwanesuresh95/rfm/blob/main/LICENSE)

# rfm
<b>rfm: Python Package for RFM Analysis and Customer Segmentation</b>
## Info
**rfm** is a Python package that provides **recency, frequency, monetary analysis** results
for a certain transactional dataset within a snap. Its flexible structure and multiple automated
functionalities provide easy and intuitive approach to RFM Analysis in an automated fashion.
It aims to be a ready-made python package with high-level and quick prototyping.
On practical hand, **real world data** is easily suited and adapted by the package.
Additionally, it can make colorful, intuitive graphs using a matplotlib backend without
breaking a sweat.
## Installation
### Dependencies
<ul>
<li>Python (>=3.7)</li>
<li>Pandas (>=1.2.4)</li>
<li>NumPy (>=1.20.1)</li>
<li>matplotlib (>=3.3.4)</li>
</ul>
To install the current release (Ubuntu and Windows):
```
$ pip install rfm
```
## Usage
```
# predefine a transaction dataset as df
>>> from rfm import RFM
>>> r = RFM(df, customer_id='CustomerID', transaction_date='InvoiceDate', amount='Amount')
>>> r.plot_segment_distribution()
```
<div align="left">
<img style="width:550px" src="https://github.com/sonwanesuresh95/rfm/blob/main/example_/rfm_seg_dist.png"><br>
</div>
# License
[MIT](https://github.com/sonwanesuresh95/rfm/blob/main/LICENSE)
# Documentation
<-- Temporarily Hosted Here -->
## Initialization
Read required dataframe
```
>>> df = pd.read_csv('~./data.csv')
```
Import RFM package and start rfm analysis automatically:
```
>>> from rfm import RFM
>>> r = RFM(df, customer_id='CustomerID', transaction_date='InvoiceDate', amount='Amount')
>>> r.rfm_table
```
If you want to do rfm analysis manually:
```
>>> r = RFM(df, customer_id='CustomerID', transaction_date='InvoiceDate', amount='Amount', automated=False)
```
## Attributes
### RFM.rfm_table
returns resultant rfm table df generated with recency, frequency & monetary values and scores along with segments
```
>>> r.rfm_table
```
<div align="left">
<img style="width:500px" src="https://github.com/sonwanesuresh95/rfm/blob/main/example_/rfm_table.png"><br>
</div>
### RFM.segment_table
returns segment table df with 10 unique categories i.e. Champions, Loyal Accounts etc.
```
>>> r.segment_table
```
<div align="left">
<img style="height:250px" src="https://github.com/sonwanesuresh95/rfm/blob/main/example_/rfm_segment_table.png"><br>
</div>
## Methods
### RFM.plot_rfm_histograms()
Plots recency, frequency and monetary histograms in a single row
```
>>> r.plot_rfm_histograms()
```
<div align="left">
<img style="width:700px" src="https://github.com/sonwanesuresh95/rfm/blob/main/example_/rfm_histograms.png"><br>
</div>
### RFM.plot_rfm_order_distribution()
Plots orders by customer number
```
>>> r.plot_rfm_order_distribution()
```
<div align="left">
<img style="width:700px" src="https://github.com/sonwanesuresh95/rfm/blob/main/example_/rfm_order_dist.png"><br>
</div>
### RFM.plot_versace_plot(column1, column2)
Plots scatterplot of two input columns
```
>>> r.plot_versace_plot(column1='recency',column2='monetary_value')
```
<div align="left">
<img style="width:550px" src="https://github.com/sonwanesuresh95/rfm/blob/main/example_/rfm_rm.png"><br>
</div>
```
>>> r.plot_versace_plot(column1='recency',column2='frequency')
```
<div align="left">
<img style="width:550px" src="https://github.com/sonwanesuresh95/rfm/blob/main/example_/rfm_rf.png"><br>
</div>
```
>>> r.plot_versace_plot(column1='frequency',column2='monetary_value')
```
<div align="left">
<img style="width:550px" src="https://github.com/sonwanesuresh95/rfm/blob/main/example_/rfm_fm.png"><br>
</div>
### RFM.plot_distribution_by_segment(column, take)
Plots Distribution of input column by segment
```
>>> r.plot_distribution_by_segment(column='recency',take='median')
```
<div align="left">
<img style="width:550px" src="https://github.com/sonwanesuresh95/rfm/blob/main/example_/rfm_mrdian_rec.png"><br>
</div>
```
>>> r.plot_distribution_by_segment(column='frequency',take='median')
```
<div align="left">
<img style="width:550px" src="https://github.com/sonwanesuresh95/rfm/blob/main/example_/rfm_median_freq.png"><br>
</div>
```
>>> r.plot_distribution_by_segment(column='monetary_value',take='median')
```
<div align="left">
<img style="width:550px" src="https://github.com/sonwanesuresh95/rfm/blob/main/example_/rfm_med_mon.png"><br>
</div>
### RFM.plot_column_distribution(column)
Plots column distribution of input column
```
>>> r.plot_column_distribution(column='recency')
```
<div align="left">
<img style="width:550px" src="https://github.com/sonwanesuresh95/rfm/blob/main/example_/rfm_col_dist_rec.png"><br>
</div>
```
>>> r.plot_column_distribution(column='frequency')
```
<div align="left">
<img style="width:550px" src="https://github.com/sonwanesuresh95/rfm/blob/main/example_/rfm_col_dist_freq.png"><br>
</div>
```
>>> r.plot_column_distribution(column='monetary_value')
```
<div align="left">
<img style="width:550px" src="https://github.com/sonwanesuresh95/rfm/blob/main/example_/rfm_col_dist_mon.png"><br>
</div>
### RFM.plot_segment_distribution()
```
>>> r.plot_segment_distribution()
```
Plots Segment Distribution, i.e. Segments vs no. of customers
<div align="left">
<img style="width:550px" src="https://github.com/sonwanesuresh95/rfm/blob/main/example_/rfm_seg_dist.png"><br>
</div>
### RFM.find_customers(segment)
returns rfm results df with input category
```
>>> r.find_customers('Champions')
```
<div align="left">
<img style="width:550px" src="https://github.com/sonwanesuresh95/rfm/blob/main/example_/rfm_champions.png"><br>
</div>
| /rfm-1.0.9.tar.gz/rfm-1.0.9/README.md | 0.781539 | 0.971456 | README.md | pypi |
import struct
import yaml
import logging
logger = logging.getLogger(__name__)
class NodeDefinition:
def __init__(self, name, node_id, payload_format,
channels=None, commands=None):
self.name = name
self.id = node_id
self.payload_format = payload_format
self.channels = channels if channels is not None else {}
self.commands = commands if commands is not None else {}
def parse_values(self, values):
result = {}
for k, channel in self.channels.items():
try:
logger.debug("eval '%s' with %s", channel['value'], values)
value = eval(channel['value'], dict(x=values))
except IndexError:
raise ValueError("Not enough values")
except NameError:
raise RuntimeError("Expression used name other than 'x'")
else:
result[k] = value
return result
def parse_payload(self, payload):
# Unpack the data
fmt = '<' + self.payload_format
expected_len = struct.calcsize(fmt)
if len(payload) != expected_len:
raise ValueError(
"Bad payload length (expected {} bytes for format '{}', got {}"
.format(expected_len, fmt, len(payload)))
data = struct.unpack(fmt, payload)
logger.debug("Node %d: parsed payload %s", self.id, data)
# Process into final values dictionary
return self.parse_values(data)
def encode_command(self, command_name, values):
command = self.commands[command_name]
try:
logger.debug("eval '%s' with %s", command['values'], values)
values = eval(command['values'], dict(x=values))
except IndexError:
raise ValueError("Not enough values")
except NameError:
raise RuntimeError("Expression used name other than 'x'")
# Pack data
fmt = '<' + command['payload']
try:
payload = struct.pack(fmt, *values)
except struct.error:
raise ValueError(("Wrong number of values ({}) for payload format "
"'{}' while processing command '{}'")
.format(len(values), fmt, command_name))
return payload
def __repr__(self):
return "<NodeDefinition #{} {}>".format(self.id, self.name)
def __eq__(self, other):
return type(self) == type(other) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def _ensure_values_are_strings(channels):
for k, v in channels.items():
if 'value' in v:
v['value'] = str(v['value'])
if 'values' in v:
v['values'] = str(v['values'])
return channels
def load_definitions_from_yaml(stream):
data = yaml.safe_load(stream)
return [
NodeDefinition(d['name'], d['node_id'], d['payload'],
_ensure_values_are_strings(d.get('channels', {})),
_ensure_values_are_strings(d.get('commands', {})))
for d in data
] | /rfm12-mqtt-gateway-0.1.0.tar.gz/rfm12-mqtt-gateway-0.1.0/rfm12_mqtt_gateway/nodes.py | 0.474388 | 0.232964 | nodes.py | pypi |
from .base import Scraper
class ZooplaScraper(Scraper):
BASE_URL = "https://www.zoopla.co.uk/"
def __init__(self) -> None:
super().__init__()
def set_search_settings(self, city_name: str = None, max_price: int = None):
super().set_search_settings(city_name=city_name, max_price=max_price)
def __iter__(self):
assert (
"city_name" in self.search_settings
), "make sure to set `search_settings` first"
assert (
"max_price" in self.search_settings
), "make sure to set `search_settings` first"
city_name = self.search_settings["city_name"]
max_price = self.search_settings["max_price"]
self.url = (
self.BASE_URL
+ f"to-rent/property/{city_name}/?price_frequency=per_month&q={city_name}&results_sort=newest_listings&search_source=refine&price_max={max_price}&view_type=list"
)
print(self._url)
pages = self.soup.find("div", attrs={"data-testid": "regular-listings"})
for page in pages:
# find a property's id
try:
id = [
str(el)
for el in str(page.find(href=True)["href"]).split("/")
if el.isdigit()
][0]
except IndexError: # premium
print("ID could not found, searching for it...")
id = [
i
for i in [
el["href"]
for el in page.find_all(href=True)
if "search_identifier" in el["href"]
][0].split("/")
if i.isdigit()
][0]
any_bed = any(
[
el.text
for el in page.find("div").find_all("span")
if el.text == "Bedrooms"
]
)
any_bath = any(
[
el.text
for el in page.find("div").find_all("span")
if el.text == "Bathrooms"
]
)
any_chair = any(
[
el.text
for el in page.find("div").find_all("span")
if el.text == "Living rooms"
]
)
rooms = [el.text for el in page.find("div").find_all("span")]
bed_num = int(rooms[rooms.index("Bedrooms") + 1]) if any_bed else 0
bath_num = int(rooms[rooms.index("Bathrooms") + 1]) if any_bath else 0
chair_num = int(rooms[rooms.index("Living rooms") + 1]) if any_chair else 0
all_info_raw = page.find_all("p")
all_info = [all_info_raw[i].text for i in range(len(all_info_raw))]
# str contains 'pcm', is our price
# strings contains just digit inside is rooms we are looking for
# convert raw <str> price to <int>
raw_price = [info for info in all_info if "pcm" in info][0]
price = int(
"".join(el for el in raw_price if el.isdigit())
) # unit: £, per: month
address = page.find("h3").text
p_info = {
"id": int(id),
"price": price,
"num_beds": bed_num,
"num_baths": bath_num,
"num_liv_rooms": chair_num,
"address": address,
}
yield p_info | /scraper/zoopla.py | 0.509764 | 0.293063 | zoopla.py | pypi |
from .base import Scraper
class ZooplaScraper(Scraper):
BASE_URL = "https://www.zoopla.co.uk/"
def __init__(self) -> None:
super().__init__()
def set_search_settings(self, city_name: str = None, max_price: int = None):
super().set_search_settings(city_name=city_name, max_price=max_price)
def __iter__(self):
assert (
"city_name" in self.search_settings
), "make sure to set `search_settings` first"
assert (
"max_price" in self.search_settings
), "make sure to set `search_settings` first"
city_name = self.search_settings["city_name"]
max_price = self.search_settings["max_price"]
self.url = (
self.BASE_URL
+ f"to-rent/property/{city_name}/?price_frequency=per_month&q={city_name}&results_sort=newest_listings&search_source=refine&price_max={max_price}&view_type=list"
)
print(self._url)
pages = self.soup.find("div", attrs={"data-testid": "regular-listings"})
for page in pages:
# find a property's id
try:
id = [
str(el)
for el in str(page.find(href=True)["href"]).split("/")
if el.isdigit()
][0]
except IndexError: # premium
print("ID could not found, searching for it...")
id = [
i
for i in [
el["href"]
for el in page.find_all(href=True)
if "search_identifier" in el["href"]
][0].split("/")
if i.isdigit()
][0]
any_bed = any(
[
el.text
for el in page.find("div").find_all("span")
if el.text == "Bedrooms"
]
)
any_bath = any(
[
el.text
for el in page.find("div").find_all("span")
if el.text == "Bathrooms"
]
)
any_chair = any(
[
el.text
for el in page.find("div").find_all("span")
if el.text == "Living rooms"
]
)
rooms = [el.text for el in page.find("div").find_all("span")]
bed_num = int(rooms[rooms.index("Bedrooms") + 1]) if any_bed else 0
bath_num = int(rooms[rooms.index("Bathrooms") + 1]) if any_bath else 0
chair_num = int(rooms[rooms.index("Living rooms") + 1]) if any_chair else 0
all_info_raw = page.find_all("p")
all_info = [all_info_raw[i].text for i in range(len(all_info_raw))]
# str contains 'pcm', is our price
# strings contains just digit inside is rooms we are looking for
# convert raw <str> price to <int>
raw_price = [info for info in all_info if "pcm" in info][0]
price = int(
"".join(el for el in raw_price if el.isdigit())
) # unit: £, per: month
address = page.find("h3").text
p_info = {
"id": int(id),
"price": price,
"num_beds": bed_num,
"num_baths": bath_num,
"num_liv_rooms": chair_num,
"address": address,
}
yield p_info | /scraper/zoopla.py | 0.509764 | 0.293063 | zoopla.py | pypi |
# RFPYE
> Este módulo tem como objetivo o processamento e extração otimizada de dados dos arquivos `.bin` de monitoramento do espectro provenientes do script Logger executados nas estações de Monitoramento CRFS RFeye Node. Para tal utilizamos as várias funcionalidades da biblioteca <a href='https://fastcore.fast.ai/basics.html'>fastcore</a>, que expande e otimiza as estruturas de dados da linguagem python.
## Instalação
Como parte dessa lib utiliza código c compilado com `Cython`, é preciso que um compilador `C` esteja instalado. É recomendado a criação de um ambiente virtual para que a instalação das dependências não interfira com o a instalação base do python. Para tal é recomendamos o uso do conda. A seguir é mostrado instruções para a criação do ambiente virtual, com todas as dependências utilizando o conda.
Instale o [miniconda](https://docs.conda.io/en/latest/miniconda.html). Com o conda instalado e disponível no seu `PATH` ou através do `Anaconda Prompt`, execute os comando:
### Linux:
Em Linux normalmente o sistema já possui o compilador `gcc` instalado.
```bash
conda create -n rfpye pip python=3.7 gcc -c intel -c conda-forge -y
conda activate rfpye
python -m pip install rfpye
```
### Windows
É preciso ter o compilador `Microsoft Visual C++ 2015-2019 Redistributable x64` Versão 14.x instalado.
```bash
conda create -n rfpye pip python=3.7 libpython m2w64-toolchain -c intel -y
conda activate rfpye
python -m pip install rfpye
```
O comando acima cria um ambiente virtual com o mesmo nome da biblioteca `rfpye`, instala as dependências básicas necessárias para a compilação, em seguida ativa o ambiente virtual e instala o módulo.
Depois disso basta instalar normalmente a lib:
`python -m pip install rfpye`
## Como utilizar
Abaixo mostramos as funcionalidades principais dos módulos, utilizando-os dentro de algum outro script ou `REPL`
Precisamos necessariamente de um diretório de entrada, contendo um ou mais arquivos `.bin`
> Mude os caminhos abaixo para suas pastas locais
```
from fastcore.xtras import Path
from rfpye.utils import get_files
from rich import print
```
A função abaixo baixa alguns arquivos de exemplo:
```
path = Path(r'binfiles')
if not path.exists() or not len(get_files(path, extensions=['.bin'])):
path = Path('.')
!wget --header 'Host: raw.githubusercontent.com' --user-agent 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0' --header 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8' --header 'Accept-Language: pt-BR,pt;q=0.8,en-US;q=0.5,en;q=0.3' --referer 'https://github.com/EricMagalhaesDelgado/SpecFiles/blob/main/Combo3%20(CRFS%20Bin%20-%20DataTypes%204%2C%207%2C%208%2C%2060-65%20e%2067-69)/rfeye002092_210208_T202310_CRFSBINv.5.bin' --header 'DNT: 1' --header 'Upgrade-Insecure-Requests: 1' 'https://raw.githubusercontent.com/EricMagalhaesDelgado/SpecFiles/main/Combo3%20(CRFS%20Bin%20-%20DataTypes%204%2C%207%2C%208%2C%2060-65%20e%2067-69)/rfeye002092_210208_T202310_CRFSBINv.5.bin' --output-document 'rfeye002092_210208_T202310_CRFSBINv.5.bin'
!wget --header 'Host: raw.githubusercontent.com' --user-agent 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0' --header 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8' --header 'Accept-Language: pt-BR,pt;q=0.8,en-US;q=0.5,en;q=0.3' --referer 'https://github.com/EricMagalhaesDelgado/SpecFiles/blob/main/Combo3%20(CRFS%20Bin%20-%20DataTypes%204%2C%207%2C%208%2C%2060-65%20e%2067-69)/rfeye002092_210208_T203131_CRFSBINv.2.bin' --header 'DNT: 1' --header 'Upgrade-Insecure-Requests: 1' 'https://raw.githubusercontent.com/EricMagalhaesDelgado/SpecFiles/main/Combo3%20(CRFS%20Bin%20-%20DataTypes%204%2C%207%2C%208%2C%2060-65%20e%2067-69)/rfeye002092_210208_T203131_CRFSBINv.2.bin' --output-document 'rfeye002092_210208_T203131_CRFSBINv.2.bin'
!wget --header 'Host: raw.githubusercontent.com' --user-agent 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0' --header 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8' --header 'Accept-Language: pt-BR,pt;q=0.8,en-US;q=0.5,en;q=0.3' --referer 'https://github.com/EricMagalhaesDelgado/SpecFiles/blob/main/Combo3%20(CRFS%20Bin%20-%20DataTypes%204%2C%207%2C%208%2C%2060-65%20e%2067-69)/rfeye002292_210208_T202215_CRFSBINv.4.bin' --header 'DNT: 1' --header 'Upgrade-Insecure-Requests: 1' 'https://raw.githubusercontent.com/EricMagalhaesDelgado/SpecFiles/main/Combo3%20(CRFS%20Bin%20-%20DataTypes%204%2C%207%2C%208%2C%2060-65%20e%2067-69)/rfeye002292_210208_T202215_CRFSBINv.4.bin' --output-document 'rfeye002292_210208_T202215_CRFSBINv.4.bin'
!wget --header 'Host: raw.githubusercontent.com' --user-agent 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0' --header 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8' --header 'Accept-Language: pt-BR,pt;q=0.8,en-US;q=0.5,en;q=0.3' --referer 'https://github.com/EricMagalhaesDelgado/SpecFiles/blob/main/Combo3%20(CRFS%20Bin%20-%20DataTypes%204%2C%207%2C%208%2C%2060-65%20e%2067-69)/rfeye002292_210208_T203238_CRFSBINv.3.bin' --header 'DNT: 1' --header 'Upgrade-Insecure-Requests: 1' 'https://raw.githubusercontent.com/EricMagalhaesDelgado/SpecFiles/main/Combo3%20(CRFS%20Bin%20-%20DataTypes%204%2C%207%2C%208%2C%2060-65%20e%2067-69)/rfeye002292_210208_T203238_CRFSBINv.3.bin' --output-document 'rfeye002292_210208_T203238_CRFSBINv.3.bin'
```
A função `parse_bin` é a função principal que encapsula o processamento dos arquivos bin.
<h4 id="parse_bin" class="doc_header"><code>parse_bin</code><a href="https://github.com/ronaldokun/rfpye/tree/master/rfpye/parser.py#L103" class="source_link" style="float:right">[source]</a></h4>
> <code>parse_bin</code>(**`bin_file`**:`Union`\[`str`, `Path`\], **`precision`**=*`float32`*)
Receives a CRFS binfile and returns a dictionary with the file metadata, a GPS Class and a list with the different Spectrum Classes
A block is a piece of the .bin file with a known start and end and that contains different types of information.
It has several fields: file_type, header, data and footer.
Each field has lengths and information defined in the documentation.
Args:
bin_file (Union[str, Path]): path to the bin file
Returns:
Dictionary with the file metadata, file_version, string info, gps and spectrum blocks.
## Extração de Dados
Vamos listar arquivos da última versão do script Logger, **CRFS Bin - Versão 5**
```
files = get_files(r'binfiles/v5', extensions=['.bin'])
file = files.shuffle()[0]
```
```
%%time
dados = parse_bin(file)
```
Wall time: 8.45 s
```
print(dados)
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">{</span>
<span style="color: #008000; text-decoration-color: #008000">'filename'</span>: <span style="color: #008000; text-decoration-color: #008000">'rfeye002092_210630_T094705.bin'</span>,
<span style="color: #008000; text-decoration-color: #008000">'file_version'</span>: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">23</span>,
<span style="color: #008000; text-decoration-color: #008000">'string'</span>: <span style="color: #008000; text-decoration-color: #008000">'CRFS DATA FILE V023'</span>,
<span style="color: #008000; text-decoration-color: #008000">'hostname'</span>: <span style="color: #008000; text-decoration-color: #008000">'rfeye002092'</span>,
<span style="color: #008000; text-decoration-color: #008000">'method'</span>: <span style="color: #008000; text-decoration-color: #008000">'ScriptRFeye2021_v2.cfg'</span>,
<span style="color: #008000; text-decoration-color: #008000">'unit_info'</span>: <span style="color: #008000; text-decoration-color: #008000">'Stationary'</span>,
<span style="color: #008000; text-decoration-color: #008000">'file_number'</span>: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span>,
<span style="color: #008000; text-decoration-color: #008000">'identifier'</span>: <span style="color: #008000; text-decoration-color: #008000">'INFO'</span>,
<span style="color: #008000; text-decoration-color: #008000">'gps'</span>: GPS Data - Median of Coordinates: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-12.82684</span>:<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-38.47805</span> Altitude: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">120.80</span>
#Satellites: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">12.0</span>,
<span style="color: #008000; text-decoration-color: #008000">'spectrum'</span>: <span style="font-weight: bold">[</span><span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">67</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">300</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEC 2021 (Faixa 1 de 10).'</span>,
<span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">105</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">140</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">18457</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">67</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">310</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEC 2021 (Faixa 2 de 10).'</span>,
<span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">155</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">165</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1024</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">18457</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">67</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">100</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PRD 2021 (Faixa principal 1 de 4).'</span>,
<span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">50</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">90</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBμV/m'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1024</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">73828</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">67</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">110</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PRD 2021 (Faixa principal 2 de 4).'</span>,
<span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">70</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">110</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBμV/m'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1024</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">73828</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">67</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">120</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PRD 2021 (Faixa principal 3 de 4).'</span>,
<span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">170</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">220</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBμV/m'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1280</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">73828</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">67</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">130</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PRD 2021 (Faixa principal 4 de 4).'</span>,
<span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">470</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">700</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBμV/m'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">5888</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">73828</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">67</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">200</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEF 2021 (Faixa 1 de 6).'</span>,
<span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">700</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">960</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">6656</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">73828</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">67</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">210</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEF 2021 (Faixa 2 de 6).'</span>,
<span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1710</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1980</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">6912</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">73828</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">67</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">220</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEF 2021 (Faixa 3 de 6).'</span>,
<span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">2100</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">2169</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1792</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">73828</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">67</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">230</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEF 2021 (Faixa 4 de 6).'</span>,
<span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">2290</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">2390</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">2560</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">73828</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">67</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">240</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEF 2021 (Faixa 5 de 6).'</span>,
<span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">2500</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">2690</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">4864</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">73828</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">67</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">250</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEF 2021 (Faixa 6 de 6).'</span>,
<span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3290</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3700</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">10496</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">73828</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">67</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">320</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEC 2021 (Faixa 3 de 10).'</span>,
<span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">320</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">340</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">512</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">73828</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">67</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">330</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEC 2021 (Faixa 4 de 10).'</span>,
<span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">400</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">410</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">256</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">73828</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">67</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">340</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEC 2021 (Faixa 5 de 10).'</span>,
<span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">960</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1219</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">6656</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">73828</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">67</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">350</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEC 2021 (Faixa 6 de 10).'</span>,
<span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1389</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1429</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1280</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">73828</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">67</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">360</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEC 2021 (Faixa 7 de 10).'</span>,
<span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1530</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1649</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3072</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">73828</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">67</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">370</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEC 2021 (Faixa 8 de 10).'</span>,
<span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">2690</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">2899</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">5376</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">73828</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">67</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">380</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEC 2021 (Faixa 9 de 10).'</span>,
<span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">5000</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">5160</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">4096</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">73828</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">67</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">390</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEC 2021 (Faixa 10 de 10).'</span>,
<span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">5339</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">5459</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3328</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">73828</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)]</span>
<span style="font-weight: bold">}</span>
</pre>
A saída da função é um dicionário, com os metadados do arquivo.
## GPS
No entanto as duas chaves mais importantes do dicionário retornado são `gps` e `spectrum`
Se você imprimir a classe retornada pela chave `gps` é retornado um resumo dos seus atributos:
```
print(dados['gps'])
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">GPS Data - Median of Coordinates: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-12.82684</span>:<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-38.47805</span> Altitude: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">120.80</span> #Satellites: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">12.0</span>
</pre>
> Para extrair os atributos em si de dado objeto e retorná-los todos num dicionário, o módulo utils tem a função auxiliar `getattrs`
<h4 id="getattrs" class="doc_header"><code>getattrs</code><a href="https://github.com/ronaldokun/rfpye/tree/master/rfpye/utils.py#L135" class="source_link" style="float:right">[source]</a></h4>
> <code>getattrs</code>(**`obj`**:`Any`, **`attrs`**:`Iterable`=*`None`*, **`as_tuple`**=*`False`*)
Receives an object and return the atributes listed in `attrs`, if attrs is None return its public attributes
```
print(getattrs(dados['gps']))
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">{</span><span style="color: #008000; text-decoration-color: #008000">'altitude'</span>: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">120.8</span>, <span style="color: #008000; text-decoration-color: #008000">'latitude'</span>: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-12.826842</span>, <span style="color: #008000; text-decoration-color: #008000">'longitude'</span>: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-38.478047</span>, <span style="color: #008000; text-decoration-color: #008000">'num_satellites'</span>: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">12.0</span><span style="font-weight: bold">}</span>
</pre>
Os atributos listados são os valores consolidados por meio da __mediana__ dos diversos blocos de GPS do arquivo.
### Dados Brutos de GPS
> Caso desejar a lista original de valores, a classe é iterável num loop normalmente e também é possível selecionar os índices individualmente.
```
dados['gps'][0] , dados['gps'][-1]
```
((-12.826869, -38.478055, 119.9, 9), (-12.826869, -38.478037, 114.4, 12))
```
for coords in dados['gps']:
lat, long, alt, num = coords
print(f'{lat:.6f} {long:.6f} {alt:.6f} {num}')
break
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-12.826869</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-38.478055</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">119.900000</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">9</span>
</pre>
Para saber quantos dados brutos existem, basta utilizar a função `len`:
```
len(dados['gps'])
```
9060
## Dados de Nível Espectral
Cada arquivo bin normalmente possui vários fluxos de espectro distintos, cada fluxo espectral é uma classe Python, na chave `spectrum` é retornado uma lista com todos os fluxos de espectro.
```
fluxos = dados['spectrum']
print(len(fluxos))
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="color: #008080; text-decoration-color: #008080; font-weight: bold">20</span>
</pre>
Vamos investigar um deles:
```
fluxo = fluxos[0]
```
Ao imprimir um fluxo é mostrado informações mínimas sobre o seu conteúdo:
```
print(fluxo)
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">Blocks of Type: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">67</span>, Thread_id: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">300</span>, Start: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">105</span> MHz, Stop: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">140</span> MHz
</pre>
A função `repr` retorna uma representação com todos os metadados do fluxo:
```
print(repr(fluxo))
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">67</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">300</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEC 2021 (Faixa 1 de 10).'</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">105</span>,
<span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">140</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">18457</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>, <span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>
</pre>
Qualquer um dos atributos listados podem ser acessados diretamente:
```
print(fluxo.description) , print(fluxo.bw)
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">PMEC <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">2021</span> <span style="font-weight: bold">(</span>Faixa <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1</span> de <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">10</span><span style="font-weight: bold">)</span>.
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="color: #008080; text-decoration-color: #008080; font-weight: bold">18457</span>
</pre>
(None, None)
No entanto o principal atributo de um fluxo de espectro são os valores de nível medidos, os valores medidos são retornados por meio do atributo `levels`:
```
print(fluxo.levels)
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">[[</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-88.5</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-86</span>. <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-84</span>. <span style="color: #808000; text-decoration-color: #808000">...</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-100</span>. <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-101</span>. <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-107.5</span><span style="font-weight: bold">]</span>
<span style="font-weight: bold">[</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-85</span>. <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-84</span>. <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-85</span>. <span style="color: #808000; text-decoration-color: #808000">...</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-97</span>. <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-101.5</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-103</span>. <span style="font-weight: bold">]</span>
<span style="font-weight: bold">[</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-83</span>. <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-83</span>. <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-84.5</span> <span style="color: #808000; text-decoration-color: #808000">...</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-103.5</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-100</span>. <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-99</span>. <span style="font-weight: bold">]</span>
<span style="color: #808000; text-decoration-color: #808000">...</span>
<span style="font-weight: bold">[</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-89.5</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-88</span>. <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-87</span>. <span style="color: #808000; text-decoration-color: #808000">...</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-99.5</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-102.5</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-114</span>. <span style="font-weight: bold">]</span>
<span style="font-weight: bold">[</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-87.5</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-86.5</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-87.5</span> <span style="color: #808000; text-decoration-color: #808000">...</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-99.5</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-103.5</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-105</span>. <span style="font-weight: bold">]</span>
<span style="font-weight: bold">[</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-92.5</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-91.5</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-90</span>. <span style="color: #808000; text-decoration-color: #808000">...</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-104.5</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-99.5</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-99</span>. <span style="font-weight: bold">]]</span>
</pre>
```
print(f'Formato da matriz com os níveis: {fluxo.levels.shape}')
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">Formato da matriz com os níveis: <span style="font-weight: bold">(</span><span style="color: #008080; text-decoration-color: #008080; font-weight: bold">9060</span>, <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span><span style="font-weight: bold">)</span>
</pre>
O nº de linhas da matriz nos dá o número de pontos medidos naquele dado fluxo e as colunas o número de traços no qual o Span ( Stop - Start ) foi dividido. O número de traços pode ser retornada também por meio da função `len`
```
print(len(fluxo))
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="color: #008080; text-decoration-color: #008080; font-weight: bold">9060</span>
</pre>
A classe `Spectrum` é iterável, ou seja, pode ser acessada como uma lista, é retornada uma tupla com o timestamp e os pontos daquele traço:
```
for time, traço in fluxo:
print(time)
print(traço)
break
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="color: #008080; text-decoration-color: #008080; font-weight: bold">2021</span>-<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">06</span>-30T<span style="color: #00ff00; text-decoration-color: #00ff00; font-weight: bold">09:46:11</span>.<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">447522</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">[</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-88.5</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-86</span>. <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-84</span>. <span style="color: #808000; text-decoration-color: #808000">...</span> <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-100</span>. <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-101</span>. <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-107.5</span><span style="font-weight: bold">]</span>
</pre>
O atributo anterior retorna uma `numpy.ndarray`, que é um formato eficiente para processamento.
### Medidas de nível como pandas dataframe
No entanto temos adicionalmente o método `.matrix()` que retorna a matriz de dados como um _Pandas Dataframe_ formatada com o tempo da medição de cada traço como índice das linhas e as frequências de cada traço como coluna.
Vamos mostrar as cinco primeiras e cinco últimas linhas e colunas.
```
fluxo.matrix().iloc[:5, :5]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th>Frequencies</th>
<th>105.000000</th>
<th>105.009768</th>
<th>105.019537</th>
<th>105.029305</th>
<th>105.039073</th>
</tr>
<tr>
<th>Time</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>2021-06-30 09:46:11.447522</th>
<td>-88.5</td>
<td>-86.0</td>
<td>-84.0</td>
<td>-80.5</td>
<td>-82.5</td>
</tr>
<tr>
<th>2021-06-30 09:47:00.736878</th>
<td>-85.0</td>
<td>-84.0</td>
<td>-85.0</td>
<td>-88.0</td>
<td>-90.5</td>
</tr>
<tr>
<th>2021-06-30 09:48:00.736849</th>
<td>-83.0</td>
<td>-83.0</td>
<td>-84.5</td>
<td>-92.0</td>
<td>-87.0</td>
</tr>
<tr>
<th>2021-06-30 09:49:00.736763</th>
<td>-90.5</td>
<td>-96.5</td>
<td>-90.5</td>
<td>-85.5</td>
<td>-87.5</td>
</tr>
<tr>
<th>2021-06-30 09:50:00.736788</th>
<td>-86.5</td>
<td>-86.0</td>
<td>-86.5</td>
<td>-84.5</td>
<td>-85.0</td>
</tr>
</tbody>
</table>
</div>
```
fluxo.matrix().iloc[-5:, -5:]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th>Frequencies</th>
<th>139.960927</th>
<th>139.970695</th>
<th>139.980463</th>
<th>139.990232</th>
<th>140.000000</th>
</tr>
<tr>
<th>Time</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>2021-07-06 16:41:00.741214</th>
<td>-96.5</td>
<td>-96.5</td>
<td>-97.0</td>
<td>-97.0</td>
<td>-101.0</td>
</tr>
<tr>
<th>2021-07-06 16:42:00.781447</th>
<td>-111.0</td>
<td>-103.0</td>
<td>-102.0</td>
<td>-102.0</td>
<td>-105.0</td>
</tr>
<tr>
<th>2021-07-06 16:43:00.751170</th>
<td>-95.0</td>
<td>-98.0</td>
<td>-99.5</td>
<td>-102.5</td>
<td>-114.0</td>
</tr>
<tr>
<th>2021-07-06 16:44:00.761445</th>
<td>-98.0</td>
<td>-100.0</td>
<td>-99.5</td>
<td>-103.5</td>
<td>-105.0</td>
</tr>
<tr>
<th>2021-07-06 16:45:00.862489</th>
<td>-96.5</td>
<td>-101.0</td>
<td>-104.5</td>
<td>-99.5</td>
<td>-99.0</td>
</tr>
</tbody>
</table>
</div>
Novamente, caso desejado acessar todos os atributos de um fluxo no formato de dicionário, basta utilizar a função `getattrs`
```
print(getattrs(fluxo))
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">{</span>
<span style="color: #008000; text-decoration-color: #008000">'antuid'</span>: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span>,
<span style="color: #008000; text-decoration-color: #008000">'bw'</span>: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">18457</span>,
<span style="color: #008000; text-decoration-color: #008000">'description'</span>: <span style="color: #008000; text-decoration-color: #008000">'PMEC 2021 (Faixa 1 de 10).'</span>,
<span style="color: #008000; text-decoration-color: #008000">'dtype'</span>: <span style="color: #008000; text-decoration-color: #008000">'dBm'</span>,
<span style="color: #008000; text-decoration-color: #008000">'ndata'</span>: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span>,
<span style="color: #008000; text-decoration-color: #008000">'precision'</span>: <span style="font-weight: bold"><</span><span style="color: #ff00ff; text-decoration-color: #ff00ff; font-weight: bold">class</span><span style="color: #000000; text-decoration-color: #000000"> </span><span style="color: #008000; text-decoration-color: #008000">'numpy.float32'</span><span style="font-weight: bold">></span>,
<span style="color: #008000; text-decoration-color: #008000">'processing'</span>: <span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #008000; text-decoration-color: #008000">'start_dateidx'</span>: <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">datetime.datetime</span><span style="font-weight: bold">(</span><span style="color: #008080; text-decoration-color: #008080; font-weight: bold">2021</span>, <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">6</span>, <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">30</span>, <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">9</span>, <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">46</span>, <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">11</span>, <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">447522</span><span style="font-weight: bold">)</span>,
<span style="color: #008000; text-decoration-color: #008000">'start_mega'</span>: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">105</span>,
<span style="color: #008000; text-decoration-color: #008000">'stop_dateidx'</span>: <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">datetime.datetime</span><span style="font-weight: bold">(</span><span style="color: #008080; text-decoration-color: #008080; font-weight: bold">2021</span>, <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">7</span>, <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">6</span>, <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">16</span>, <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">45</span>, <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span>, <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">862489</span><span style="font-weight: bold">)</span>,
<span style="color: #008000; text-decoration-color: #008000">'stop_mega'</span>: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">140</span>,
<span style="color: #008000; text-decoration-color: #008000">'thread_id'</span>: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">300</span>,
<span style="color: #008000; text-decoration-color: #008000">'type'</span>: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">67</span>
<span style="font-weight: bold">}</span>
</pre>
### CRFS Bin - Versão 5 - Arquivos Comprimidos
Vamos listar arquivos da última versão do script Logger, Versão 5, arquivos comprimidos onde o piso de ruído é suprimido.
```
file = r'binfiles\Comprimidos\rfeye002290_210922_T204046_MaskBroken.bin'
```
```
%%time
compressed = parse_bin(file)
```
Wall time: 5.91 s
```
print(compressed)
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">{</span>
<span style="color: #008000; text-decoration-color: #008000">'filename'</span>: <span style="color: #008000; text-decoration-color: #008000">'rfeye002290_210922_T204046_MaskBroken.bin'</span>,
<span style="color: #008000; text-decoration-color: #008000">'file_version'</span>: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">23</span>,
<span style="color: #008000; text-decoration-color: #008000">'string'</span>: <span style="color: #008000; text-decoration-color: #008000">'CRFS DATA FILE V023'</span>,
<span style="color: #008000; text-decoration-color: #008000">'hostname'</span>: <span style="color: #008000; text-decoration-color: #008000">'rfeye002290'</span>,
<span style="color: #008000; text-decoration-color: #008000">'method'</span>: <span style="color: #008000; text-decoration-color: #008000">'ScriptRFeye2021_v2.cfg'</span>,
<span style="color: #008000; text-decoration-color: #008000">'unit_info'</span>: <span style="color: #008000; text-decoration-color: #008000">'Stationary'</span>,
<span style="color: #008000; text-decoration-color: #008000">'file_number'</span>: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span>,
<span style="color: #008000; text-decoration-color: #008000">'identifier'</span>: <span style="color: #008000; text-decoration-color: #008000">'INFO'</span>,
<span style="color: #008000; text-decoration-color: #008000">'gps'</span>: GPS Data - Median of Coordinates: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-23.95765</span>:<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-46.37637</span> Altitude: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">19.70</span> #Satellites:
<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">11.0</span>,
<span style="color: #008000; text-decoration-color: #008000">'spectrum'</span>: <span style="font-weight: bold">[</span><span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">68</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">321</span>, <span style="color: #808000; text-decoration-color: #808000">thresh</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-100</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEC 2021 (Faixa </span>
<span style="color: #008000; text-decoration-color: #008000">3 de 10). @ -80dBm, 100kHz.'</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">320</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">340</span>, <span style="color: #808000; text-decoration-color: #808000">minimum</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-147.5</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>,
<span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">512</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">73828</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>, <span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">68</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">301</span>,
<span style="color: #808000; text-decoration-color: #808000">thresh</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-100</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEC 2021 (Faixa 1 de 10). @ -80dBm, 10kHz.'</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">108</span>,
<span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">137</span>, <span style="color: #808000; text-decoration-color: #808000">minimum</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-147.5</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">14848</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3690</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">68</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">341</span>, <span style="color: #808000; text-decoration-color: #808000">thresh</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-100</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEC 2021 (Faixa 5 de </span>
<span style="color: #008000; text-decoration-color: #008000">10). @ -80dBm, 100kHz.'</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">960</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1219</span>, <span style="color: #808000; text-decoration-color: #808000">minimum</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-147.5</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>,
<span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">6656</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">73828</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>, <span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">68</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">311</span>,
<span style="color: #808000; text-decoration-color: #808000">thresh</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-100</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEC 2021 (Faixa 2 de 10). @ -80dBm, 10kHz.'</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">156</span>,
<span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">163</span>, <span style="color: #808000; text-decoration-color: #808000">minimum</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-147.5</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3690</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">68</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">371</span>, <span style="color: #808000; text-decoration-color: #808000">thresh</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-100</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEC 2021 (Faixa 8 de </span>
<span style="color: #008000; text-decoration-color: #008000">10). @ -80dBm, 100kHz.'</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">2690</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">2899</span>, <span style="color: #808000; text-decoration-color: #808000">minimum</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-147.5</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>,
<span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">5376</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">73828</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>, <span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">68</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">351</span>,
<span style="color: #808000; text-decoration-color: #808000">thresh</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-100</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEC 2021 (Faixa 6 de 10). @ -80dBm, 100kHz.'</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1389</span>,
<span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1429</span>, <span style="color: #808000; text-decoration-color: #808000">minimum</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-147.5</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1280</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">73828</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">68</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">331</span>, <span style="color: #808000; text-decoration-color: #808000">thresh</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-100</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEC 2021 (Faixa 4 de </span>
<span style="color: #008000; text-decoration-color: #008000">10). @ -80dBm, 100kHz.'</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">400</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">410</span>, <span style="color: #808000; text-decoration-color: #808000">minimum</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-147.5</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>,
<span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">256</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">73828</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>, <span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">68</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">361</span>,
<span style="color: #808000; text-decoration-color: #808000">thresh</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-100</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'PMEC 2021 (Faixa 7 de 10). @ -80dBm, 100kHz.'</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1530</span>,
<span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1649</span>, <span style="color: #808000; text-decoration-color: #808000">minimum</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-147.5</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3072</span>, <span style="color: #808000; text-decoration-color: #808000">bw</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">73828</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)]</span>
<span style="font-weight: bold">}</span>
</pre>
```
fluxo = compressed['spectrum'] ; fluxos
```
(#20) [Spectrum(type=67, thread_id=300, description='PMEC 2021 (Faixa 1 de 10).', start_mega=105, stop_mega=140, dtype='dBm', ndata=3584, bw=18457, processing='peak', antuid=0),Spectrum(type=67, thread_id=310, description='PMEC 2021 (Faixa 2 de 10).', start_mega=155, stop_mega=165, dtype='dBm', ndata=1024, bw=18457, processing='peak', antuid=0),Spectrum(type=67, thread_id=100, description='PRD 2021 (Faixa principal 1 de 4).', start_mega=50, stop_mega=90, dtype='dBμV/m', ndata=1024, bw=73828, processing='peak', antuid=0),Spectrum(type=67, thread_id=110, description='PRD 2021 (Faixa principal 2 de 4).', start_mega=70, stop_mega=110, dtype='dBμV/m', ndata=1024, bw=73828, processing='peak', antuid=0),Spectrum(type=67, thread_id=120, description='PRD 2021 (Faixa principal 3 de 4).', start_mega=170, stop_mega=220, dtype='dBμV/m', ndata=1280, bw=73828, processing='peak', antuid=0),Spectrum(type=67, thread_id=130, description='PRD 2021 (Faixa principal 4 de 4).', start_mega=470, stop_mega=700, dtype='dBμV/m', ndata=5888, bw=73828, processing='peak', antuid=0),Spectrum(type=67, thread_id=200, description='PMEF 2021 (Faixa 1 de 6).', start_mega=700, stop_mega=960, dtype='dBm', ndata=6656, bw=73828, processing='peak', antuid=0),Spectrum(type=67, thread_id=210, description='PMEF 2021 (Faixa 2 de 6).', start_mega=1710, stop_mega=1980, dtype='dBm', ndata=6912, bw=73828, processing='peak', antuid=0),Spectrum(type=67, thread_id=220, description='PMEF 2021 (Faixa 3 de 6).', start_mega=2100, stop_mega=2169, dtype='dBm', ndata=1792, bw=73828, processing='peak', antuid=0),Spectrum(type=67, thread_id=230, description='PMEF 2021 (Faixa 4 de 6).', start_mega=2290, stop_mega=2390, dtype='dBm', ndata=2560, bw=73828, processing='peak', antuid=0)...]
```
fluxo = fluxos[0]
fluxo.matrix().iloc[:5, [0, 1, 2, -3, -2, -1]]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th>Frequencies</th>
<th>105.000000</th>
<th>105.009768</th>
<th>105.019537</th>
<th>139.980463</th>
<th>139.990232</th>
<th>140.000000</th>
</tr>
<tr>
<th>Time</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>2021-06-30 09:46:11.447522</th>
<td>-88.5</td>
<td>-86.0</td>
<td>-84.0</td>
<td>-100.0</td>
<td>-101.0</td>
<td>-107.5</td>
</tr>
<tr>
<th>2021-06-30 09:47:00.736878</th>
<td>-85.0</td>
<td>-84.0</td>
<td>-85.0</td>
<td>-97.0</td>
<td>-101.5</td>
<td>-103.0</td>
</tr>
<tr>
<th>2021-06-30 09:48:00.736849</th>
<td>-83.0</td>
<td>-83.0</td>
<td>-84.5</td>
<td>-103.5</td>
<td>-100.0</td>
<td>-99.0</td>
</tr>
<tr>
<th>2021-06-30 09:49:00.736763</th>
<td>-90.5</td>
<td>-96.5</td>
<td>-90.5</td>
<td>-103.5</td>
<td>-105.0</td>
<td>-101.5</td>
</tr>
<tr>
<th>2021-06-30 09:50:00.736788</th>
<td>-86.5</td>
<td>-86.0</td>
<td>-86.5</td>
<td>-104.5</td>
<td>-101.5</td>
<td>-99.5</td>
</tr>
</tbody>
</table>
</div>
```
print(len(fluxo))
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="color: #008080; text-decoration-color: #008080; font-weight: bold">9060</span>
</pre>
### CRFS Bin - Versão 4
```
file = r'binfiles\v4\rfeye002292_210208_T202215_CRFSBINv.4.bin'
blocks = parse_bin(file)
print(blocks)
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">{</span>
<span style="color: #008000; text-decoration-color: #008000">'filename'</span>: <span style="color: #008000; text-decoration-color: #008000">'rfeye002292_210208_T202215_CRFSBINv.4.bin'</span>,
<span style="color: #008000; text-decoration-color: #008000">'file_version'</span>: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">22</span>,
<span style="color: #008000; text-decoration-color: #008000">'string'</span>: <span style="color: #008000; text-decoration-color: #008000">'CRFS DATA FILE V022'</span>,
<span style="color: #008000; text-decoration-color: #008000">'hostname'</span>: <span style="color: #008000; text-decoration-color: #008000">'rfeye002292'</span>,
<span style="color: #008000; text-decoration-color: #008000">'method'</span>: <span style="color: #008000; text-decoration-color: #008000">'Script_CRFSBINv4'</span>,
<span style="color: #008000; text-decoration-color: #008000">'unit_info'</span>: <span style="color: #008000; text-decoration-color: #008000">'Stationary'</span>,
<span style="color: #008000; text-decoration-color: #008000">'file_number'</span>: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span>,
<span style="color: #008000; text-decoration-color: #008000">'identifier'</span>: <span style="color: #008000; text-decoration-color: #008000">'LOGGER_VERSION'</span>,
<span style="color: #008000; text-decoration-color: #008000">'description'</span>: <span style="color: #008000; text-decoration-color: #008000">'ClearWrite. Peak.'</span>,
<span style="color: #008000; text-decoration-color: #008000">'gps'</span>: GPS Data - Median of Coordinates: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-12.97163</span>:<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-38.48149</span> Altitude: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">151.65</span>
#Satellites: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">12.0</span>,
<span style="color: #008000; text-decoration-color: #008000">'spectrum'</span>: <span style="font-weight: bold">[</span><span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">63</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">20</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'Peak'</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">76</span>,
<span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">108</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">8192</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>, <span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">63</span>,
<span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">30</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'Peak'</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">70</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">110</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1024</span>,
<span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>, <span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">63</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">10</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'Peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">105</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">140</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>, <span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>,
<span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">63</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">11</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'Peak'</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">105</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">140</span>,
<span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'average'</span>, <span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">63</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">12</span>,
<span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'Peak'</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">105</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">140</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span>,
<span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>, <span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">63</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">13</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'Peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">105</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">140</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'average'</span>, <span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>,
<span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">64</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">14</span>, <span style="color: #808000; text-decoration-color: #808000">thresh</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-90</span>, <span style="color: #808000; text-decoration-color: #808000">minimum</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-147.5</span>, <span style="color: #808000; text-decoration-color: #808000">description</span>=<span style="color: #008000; text-decoration-color: #008000">'Peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">105</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">140</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'average'</span>, <span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>,
<span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">65</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">15</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">105</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">140</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span>,
<span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>, <span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)]</span>
<span style="font-weight: bold">}</span>
</pre>
```
blocks['spectrum'][0].matrix().iloc[:5, [0, 1, 2, -3, -2, -1]]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th>Frequencies</th>
<th>76.000000</th>
<th>76.003907</th>
<th>76.007813</th>
<th>107.992187</th>
<th>107.996093</th>
<th>108.000000</th>
</tr>
<tr>
<th>Time</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>2021-02-08 20:22:15.500658</th>
<td>-110.0</td>
<td>-100.5</td>
<td>-99.0</td>
<td>-103.0</td>
<td>-99.0</td>
<td>-96.0</td>
</tr>
<tr>
<th>2021-02-08 20:22:16.142770</th>
<td>-105.5</td>
<td>-100.0</td>
<td>-97.5</td>
<td>-94.5</td>
<td>-95.0</td>
<td>-98.0</td>
</tr>
<tr>
<th>2021-02-08 20:22:16.500750</th>
<td>-104.0</td>
<td>-102.5</td>
<td>-105.5</td>
<td>-95.5</td>
<td>-98.5</td>
<td>-93.0</td>
</tr>
<tr>
<th>2021-02-08 20:22:17.132990</th>
<td>-105.0</td>
<td>-107.0</td>
<td>-103.0</td>
<td>-99.5</td>
<td>-99.5</td>
<td>-102.5</td>
</tr>
<tr>
<th>2021-02-08 20:22:17.501352</th>
<td>-97.5</td>
<td>-101.5</td>
<td>-97.0</td>
<td>-104.5</td>
<td>-102.0</td>
<td>-99.5</td>
</tr>
</tbody>
</table>
</div>
### CRFS Bin - Versão 3
```
file = r'binfiles\v3\rfeye002292_210208_T203238_CRFSBINv.3.bin'
blocks = parse_bin(file)
print(blocks)
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">{</span>
<span style="color: #008000; text-decoration-color: #008000">'filename'</span>: <span style="color: #008000; text-decoration-color: #008000">'rfeye002292_210208_T203238_CRFSBINv.3.bin'</span>,
<span style="color: #008000; text-decoration-color: #008000">'file_version'</span>: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">21</span>,
<span style="color: #008000; text-decoration-color: #008000">'string'</span>: <span style="color: #008000; text-decoration-color: #008000">'CRFS DATA FILE V021'</span>,
<span style="color: #008000; text-decoration-color: #008000">'hostname'</span>: <span style="color: #008000; text-decoration-color: #008000">'rfeye002292'</span>,
<span style="color: #008000; text-decoration-color: #008000">'method'</span>: <span style="color: #008000; text-decoration-color: #008000">'Script_CRFSBINv3'</span>,
<span style="color: #008000; text-decoration-color: #008000">'unit_info'</span>: <span style="color: #008000; text-decoration-color: #008000">'Stationary'</span>,
<span style="color: #008000; text-decoration-color: #008000">'file_number'</span>: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span>,
<span style="color: #008000; text-decoration-color: #008000">'identifier'</span>: <span style="color: #008000; text-decoration-color: #008000">'LOGGER_VERSION'</span>,
<span style="color: #008000; text-decoration-color: #008000">'description'</span>: <span style="color: #008000; text-decoration-color: #008000">'ClearWrite. Peak.'</span>,
<span style="color: #008000; text-decoration-color: #008000">'gps'</span>: GPS Data - Median of Coordinates: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-12.97163</span>:<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-38.48149</span> Altitude: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">150.60</span>
#Satellites: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">12.0</span>,
<span style="color: #008000; text-decoration-color: #008000">'spectrum'</span>: <span style="font-weight: bold">[</span><span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">60</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">10</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">105</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">140</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span>,
<span style="color: #808000; text-decoration-color: #808000">nloops</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>, <span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">60</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">11</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">105</span>,
<span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">140</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span>, <span style="color: #808000; text-decoration-color: #808000">nloops</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'average'</span>, <span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">60</span>,
<span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">20</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">76</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">108</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">8192</span>, <span style="color: #808000; text-decoration-color: #808000">nloops</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">60</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">30</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">70</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">110</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1024</span>,
<span style="color: #808000; text-decoration-color: #808000">nloops</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">4</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>, <span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">60</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">12</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">105</span>,
<span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">140</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span>, <span style="color: #808000; text-decoration-color: #808000">nloops</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>, <span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">60</span>,
<span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">13</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">105</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">140</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span>, <span style="color: #808000; text-decoration-color: #808000">nloops</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'average'</span>,
<span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">61</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">14</span>, <span style="color: #808000; text-decoration-color: #808000">thresh</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-90</span>, <span style="color: #808000; text-decoration-color: #808000">minimum</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-147.5</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">105</span>,
<span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">140</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span>, <span style="color: #808000; text-decoration-color: #808000">nloops</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'average'</span>, <span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">62</span>,
<span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">15</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">105</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">140</span>, <span style="color: #808000; text-decoration-color: #808000">thresh</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-90</span>, <span style="color: #808000; text-decoration-color: #808000">sampling</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">9</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span>, <span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)</span>,
<span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">62</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">15</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">105</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">140</span>, <span style="color: #808000; text-decoration-color: #808000">thresh</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-90</span>, <span style="color: #808000; text-decoration-color: #808000">sampling</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">10</span>,
<span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span>, <span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)]</span>
<span style="font-weight: bold">}</span>
</pre>
```
blocks['spectrum'][0].matrix().iloc[:5, [0, 1, 2, -3, -2, -1]]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th>Frequencies</th>
<th>105.000000</th>
<th>105.009768</th>
<th>105.019537</th>
<th>139.980463</th>
<th>139.990232</th>
<th>140.000000</th>
</tr>
<tr>
<th>Time</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>2021-02-08 20:32:39.548000</th>
<td>-76.5</td>
<td>-76.0</td>
<td>-76.5</td>
<td>-94.5</td>
<td>-91.0</td>
<td>-90.0</td>
</tr>
<tr>
<th>2021-02-08 20:32:40.133600</th>
<td>-79.5</td>
<td>-80.5</td>
<td>-79.5</td>
<td>-99.0</td>
<td>-94.5</td>
<td>-92.5</td>
</tr>
<tr>
<th>2021-02-08 20:32:41.858000</th>
<td>-69.0</td>
<td>-69.0</td>
<td>-69.0</td>
<td>-97.5</td>
<td>-92.5</td>
<td>-90.0</td>
</tr>
<tr>
<th>2021-02-08 20:32:42.137500</th>
<td>-70.5</td>
<td>-71.0</td>
<td>-71.5</td>
<td>-97.0</td>
<td>-98.0</td>
<td>-94.5</td>
</tr>
<tr>
<th>2021-02-08 20:32:43.716000</th>
<td>-71.0</td>
<td>-69.5</td>
<td>-70.0</td>
<td>-97.0</td>
<td>-89.0</td>
<td>-87.0</td>
</tr>
</tbody>
</table>
</div>
### CRFS Bin Versão 2
```
from rfpye.parser import parse_bin
file = r'binfiles\v2\rfeye002092_210208_T203131_CRFSBINv.2.bin'
blocks = parse_bin(file)
print(blocks)
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">{</span>
<span style="color: #008000; text-decoration-color: #008000">'filename'</span>: <span style="color: #008000; text-decoration-color: #008000">'rfeye002092_210208_T203131_CRFSBINv.2.bin'</span>,
<span style="color: #008000; text-decoration-color: #008000">'file_version'</span>: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">21</span>,
<span style="color: #008000; text-decoration-color: #008000">'string'</span>: <span style="color: #008000; text-decoration-color: #008000">'CRFS DATA FILE V021'</span>,
<span style="color: #008000; text-decoration-color: #008000">'description'</span>: <span style="color: #008000; text-decoration-color: #008000">'ClearWrite. Peak.'</span>,
<span style="color: #008000; text-decoration-color: #008000">'gps'</span>: GPS Data - Median of Coordinates: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-1.00000</span>:<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-1.00000</span> Altitude: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-1.00</span> #Satellites:
<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0.0</span>,
<span style="color: #008000; text-decoration-color: #008000">'spectrum'</span>: <span style="font-weight: bold">[</span><span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">4</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">20</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">76</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">108</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">8192</span>,
<span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'Peak'</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">4</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">30</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">70</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">110</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1024</span>,
<span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'Peak'</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">4</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">10</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">105</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">140</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span>,
<span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'Peak'</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">4</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">11</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">105</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">140</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span>,
<span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'Average'</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">4</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">12</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">105</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">140</span>,
<span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'Peak'</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">4</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">13</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">105</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">140</span>,
<span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'Average'</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">7</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">14</span>, <span style="color: #808000; text-decoration-color: #808000">thresh</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-90</span>, <span style="color: #808000; text-decoration-color: #808000">minimum</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-147.5</span>,
<span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">105</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">140</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">8</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">15</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">105</span>,
<span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">140</span>, <span style="color: #808000; text-decoration-color: #808000">sampling</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">9</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span><span style="font-weight: bold">)</span>, <span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">8</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">15</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">105</span>,
<span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">140</span>, <span style="color: #808000; text-decoration-color: #808000">sampling</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">10</span>, <span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">3584</span><span style="font-weight: bold">)]</span>
<span style="font-weight: bold">}</span>
</pre>
```
blocks['spectrum'][0].matrix().iloc[:5, [0, 1, 2, -3, -2, -1]]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th>Frequencies</th>
<th>76.000000</th>
<th>76.003907</th>
<th>76.007813</th>
<th>107.992187</th>
<th>107.996093</th>
<th>108.000000</th>
</tr>
<tr>
<th>Time</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>NaN</th>
<td>10.5</td>
<td>11.5</td>
<td>13.0</td>
<td>31.0</td>
<td>26.0</td>
<td>22.5</td>
</tr>
<tr>
<th>NaN</th>
<td>18.0</td>
<td>19.5</td>
<td>16.5</td>
<td>27.5</td>
<td>29.5</td>
<td>25.5</td>
</tr>
<tr>
<th>NaN</th>
<td>11.5</td>
<td>5.0</td>
<td>8.0</td>
<td>30.5</td>
<td>28.5</td>
<td>30.0</td>
</tr>
<tr>
<th>NaN</th>
<td>4.5</td>
<td>4.5</td>
<td>13.5</td>
<td>27.5</td>
<td>31.0</td>
<td>31.0</td>
</tr>
<tr>
<th>NaN</th>
<td>17.5</td>
<td>15.5</td>
<td>11.0</td>
<td>33.5</td>
<td>29.5</td>
<td>30.0</td>
</tr>
</tbody>
</table>
</div>
### Fluxo de Ocupação
```
from rfpye.parser import parse_bin
file = r'binfiles\occ\rfeye002090-VCP_FM_occ15min_191221_085803.bin'
blocks = parse_bin(file)
print(blocks)
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">{</span>
<span style="color: #008000; text-decoration-color: #008000">'filename'</span>: <span style="color: #008000; text-decoration-color: #008000">'rfeye002090-VCP_FM_occ15min_191221_085803.bin'</span>,
<span style="color: #008000; text-decoration-color: #008000">'file_version'</span>: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">22</span>,
<span style="color: #008000; text-decoration-color: #008000">'string'</span>: <span style="color: #008000; text-decoration-color: #008000">'CRFS DATA FILE V022'</span>,
<span style="color: #008000; text-decoration-color: #008000">'hostname'</span>: <span style="color: #008000; text-decoration-color: #008000">'rfeye002090-VCP'</span>,
<span style="color: #008000; text-decoration-color: #008000">'method'</span>: <span style="color: #008000; text-decoration-color: #008000">'CRFS default method'</span>,
<span style="color: #008000; text-decoration-color: #008000">'unit_info'</span>: <span style="color: #008000; text-decoration-color: #008000">'RFeye002090'</span>,
<span style="color: #008000; text-decoration-color: #008000">'file_number'</span>: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span>,
<span style="color: #008000; text-decoration-color: #008000">'identifier'</span>: <span style="color: #008000; text-decoration-color: #008000">'LOGGER_VERSION'</span>,
<span style="color: #008000; text-decoration-color: #008000">'description'</span>: <span style="color: #008000; text-decoration-color: #008000">'Ocupacao em 15 minutos na faixa FM'</span>,
<span style="color: #008000; text-decoration-color: #008000">'gps'</span>: GPS Data - Median of Coordinates: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-1.00000</span>:<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-1.00000</span> Altitude: <span style="color: #008080; text-decoration-color: #008080; font-weight: bold">-1.00</span> #Satellites:
<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0.0</span>,
<span style="color: #008000; text-decoration-color: #008000">'spectrum'</span>: <span style="font-weight: bold">[</span><span style="color: #800080; text-decoration-color: #800080; font-weight: bold">Spectrum</span><span style="font-weight: bold">(</span><span style="color: #808000; text-decoration-color: #808000">type</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">65</span>, <span style="color: #808000; text-decoration-color: #808000">thread_id</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">121</span>, <span style="color: #808000; text-decoration-color: #808000">start_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">80</span>, <span style="color: #808000; text-decoration-color: #808000">stop_mega</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">110</span>, <span style="color: #808000; text-decoration-color: #808000">dtype</span>=<span style="color: #008000; text-decoration-color: #008000">'dBm'</span>,
<span style="color: #808000; text-decoration-color: #808000">ndata</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">1536</span>, <span style="color: #808000; text-decoration-color: #808000">processing</span>=<span style="color: #008000; text-decoration-color: #008000">'peak'</span>, <span style="color: #808000; text-decoration-color: #808000">antuid</span>=<span style="color: #008080; text-decoration-color: #008080; font-weight: bold">0</span><span style="font-weight: bold">)]</span>
<span style="font-weight: bold">}</span>
</pre>
```
blocks['spectrum'][-1].matrix().iloc[:5, 1003:1010]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th>Frequencies</th>
<th>99.602606</th>
<th>99.622150</th>
<th>99.641694</th>
<th>99.661238</th>
<th>99.680782</th>
<th>99.700326</th>
<th>99.719870</th>
</tr>
<tr>
<th>Time</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>2019-12-21 09:00:01.367337</th>
<td>17.5</td>
<td>36.0</td>
<td>53.5</td>
<td>62.5</td>
<td>76.5</td>
<td>80.0</td>
<td>72.5</td>
</tr>
<tr>
<th>2019-12-21 09:15:01.357259</th>
<td>15.0</td>
<td>29.5</td>
<td>48.0</td>
<td>61.0</td>
<td>76.5</td>
<td>78.5</td>
<td>71.0</td>
</tr>
<tr>
<th>2019-12-21 09:30:01.357357</th>
<td>16.0</td>
<td>28.5</td>
<td>46.5</td>
<td>61.0</td>
<td>76.5</td>
<td>77.5</td>
<td>70.0</td>
</tr>
<tr>
<th>2019-12-21 09:45:01.357273</th>
<td>15.0</td>
<td>33.0</td>
<td>49.0</td>
<td>66.0</td>
<td>76.0</td>
<td>78.0</td>
<td>70.0</td>
</tr>
<tr>
<th>2019-12-21 10:00:01.419225</th>
<td>15.5</td>
<td>35.0</td>
<td>50.0</td>
<td>62.0</td>
<td>74.0</td>
<td>77.0</td>
<td>67.5</td>
</tr>
</tbody>
</table>
</div>
| /rfpye-0.3.6.tar.gz/rfpye-0.3.6/README.md | 0.478529 | 0.852629 | README.md | pypi |
import hashlib
import os
import time
import warnings
import xmlrpc.client
import bencodepy
__author__ = 'Braden Baird <bradenbdev@gmail.com>'
__version__ = '0.1.3'
def nested_get(dic, keys):
for key in keys:
dic = dic[key]
return dic
def nested_set(dic, keys, value):
for key in keys[:-1]:
dic = dic.setdefault(key, {})
dic[keys[-1]] = value
def calc_chunks(length, bsize):
div = int(length / bsize)
return div + 1 if length % bsize else div
def calc_info_hash(info):
"""
Calculates a torrent's info hash.
:param info: The 'info' section of a torrent. Use bencode on the torrent to get this.
:return str: The info hash of the torrent.
"""
return hashlib.sha1(bencodepy.bencode(info)).hexdigest()
class FastTorrent:
CHUNK_HASH_SIZE = 20
def __init__(self, tor_file, current_dl_loc, new_dl_loc=None):
"""
Initializes the object.
:param str tor_file: Path to source torrent file that you want fast-resumed.
:param str current_dl_loc: Path where the torrent was downloaded to.
"""
self.tor_file = tor_file
self.current_dl_loc = current_dl_loc
self.new_dl_loc = new_dl_loc
self.num_chunks = 0
self.chunk_size = 0
self.chunk_size = 0
self.total_tor_size = 0
self.has_saved_file = False
self.fr_file_loc = None # Fast resume file location; the path of new torrent file when save_to_file is called
self.has_resumed = False
self.tor_data = None
self._load_file()
self.info_hash = calc_info_hash(self.get_tor_data_val('info'))
def _load_file(self):
"""Loads torrent file data into a dict."""
if not os.path.exists(self.tor_file):
raise RuntimeError('Torrent file was not found. Please pass the path to a valid torrent file.')
with open(self.tor_file, 'rb') as f:
self.tor_data = bencodepy.bdecode(f.read())
if not self.get_tor_data_val('info'):
raise RuntimeError('Invalid torrent data.')
def get_tor_data_val(self, *keys):
"""
Returns value from torrent data located at *keys. You can pass as many keys as you want.
Here's an example of what this does:
# Normally, without using get_tor_data_val, one might do:
FastTorrent.tor_data['key1']['key2']
# With get_tor_data_val, simply use:
get_tor_data_val('key1', 'key2')
:param str keys: The keys that lead to the value you want to access. Keys are encoded automatically,
so pass these as strings.
:return: The value retrieved from self.tor_data using the keys provided.
"""
encoded_keys = [k.encode() for k in keys]
return nested_get(self.tor_data, encoded_keys)
def set_tor_data_val(self, *keys, value):
"""
Identical to get_tor_data_val, but with an additional parameter to set a torrent data value.
:param value: Value to set tor_data value to. Note that value must be a keyword arg.
:param str keys: The keys that lead to the value you want to access. Keys are encoded automatically,
so pass these as strings.
:return: The value retrieved from self.tor_data using the keys provided.
"""
encoded_keys = [k.encode() for k in keys]
nested_set(self.tor_data, encoded_keys, value)
def tor_data_val_exists(self, *keys):
"""Returns True if specified keys are in self.tor_data, False if otherwise."""
try:
_ = self.get_tor_data_val(*keys)
return True
except KeyError:
return False
def calc_file_chunks(self, offset, size):
return calc_chunks(offset + size, self.chunk_size) - calc_chunks(offset + 1, self.chunk_size) + 1
@property
def tor_is_multi_file(self):
"""Is True if torrent has multiple files, False if otherwise."""
return self.tor_data_val_exists('info', 'files')
@property
def dl_files_path(self):
"""The path where this torrent's files (should) have been downloaded."""
path = os.path.expandvars(self.current_dl_loc)
path = os.path.expanduser(path)
return os.path.join(path, self.get_tor_data_val('info', 'name').decode())
@property
def dl_base_path(self):
"""
The BASE path for this torrent's files. If it's a multi-file torrent,
this will be <download directory/torrent name>. If it's a single-file torrent,
it will be <download directory/file name>
"""
return self.dl_files_path if self.tor_is_multi_file else os.path.dirname(self.dl_files_path)
def check_download_locations(self):
"""
Checks download location for files and makes sure that they're complete. Raises RuntimeError if files are
incomplete or not present. If download location exists, updates the torrent data with a
['rtorrent']['directory'] entry.
:return: None
"""
if not os.path.exists(self.dl_files_path):
raise RuntimeError(f'Torrent download was not found at download location {self.dl_files_path}.')
if self.tor_is_multi_file:
if len(os.listdir(self.dl_files_path)) == 0:
raise RuntimeError("Base path for torrent is empty. Can't resume a torrent that hasn't started yet!")
def get_downloaded_files(self):
"""
Returns list of files that should be in the download location. Also checks the download locations to
ensure that they exist. Will raise a RuntimeError if something is wrong with the download location or the
files.
:return: None
"""
self.check_download_locations()
if not self.tor_data_val_exists('info', 'piece length'):
raise RuntimeError('Invalid torrent: No piece length key found.')
self.chunk_size = self.get_tor_data_val('info', 'piece length')
files = []
if self.tor_is_multi_file:
for file in self.get_tor_data_val('info', 'files'):
file_path = os.path.join(*file[b'path']).decode()
files.append(file_path)
self.total_tor_size += file[b'length']
else:
files = [self.get_tor_data_val('info', 'name').decode()]
self.total_tor_size = self.get_tor_data_val('info', 'length')
self.num_chunks = calc_chunks(self.total_tor_size, self.chunk_size)
if self.num_chunks * self.CHUNK_HASH_SIZE != len(self.get_tor_data_val('info', 'pieces')):
raise RuntimeError('Inconsistent chunks hash information!')
return files
def do_resume(self):
"""
Creates and populates torrent's resume data. Will check all download locations and files to ensure they're
done before doing so.
:return: None
"""
files = self.get_downloaded_files()
data_dir = self.dl_base_path
on_disk_size = 0 # on-disk data size counter
block_offset = 0 # block offset
self.set_tor_data_val('libtorrent_resume', 'files', value=[])
for idx, file in enumerate(files):
file_path = os.path.join(data_dir, file)
if not os.path.isfile(file_path):
raise RuntimeError("Something is wrong with the torrent's files. They either don't exist, or are not "
"normal files.")
file_size = os.path.getsize(file_path)
tor_len = self.get_tor_data_val('info', 'files')[idx][b'length'] if self.tor_is_multi_file else \
self.get_tor_data_val('info', 'length')
if tor_len != file_size:
raise RuntimeError("One of the torrent files does not match its expected size. Aborting.")
mtime = int(os.path.getmtime(file_path))
completed = self.calc_file_chunks(block_offset, file_size) if file_size else 0
# Add libtorrent resume data to torrent
self.get_tor_data_val('libtorrent_resume', 'files').insert(idx, {
b'priority': 0,
b'mtime': mtime,
b'completed': completed
})
on_disk_size += file_size
block_offset += tor_len
# Resume failed if on_disk_size = 0 (no files to resume) or on_disk_size doesn't match sum of all files in
# torrent
if on_disk_size != self.total_tor_size or on_disk_size == 0:
raise RuntimeError("File size verification failed. Files are missing.")
# Set vars in torrent
rtorrent_vals = {
b'state': 1, # started
b'state_changed': int(time.time()),
b'state_counter': 1,
b'chunks_wanted': 0,
b'chunks_done': self.num_chunks,
b'complete': 1,
b'hashing': 0,
b'directory': self.new_dl_loc.encode() if self.new_dl_loc else self.dl_base_path.encode(),
b'timestamp.finished': 0,
b'timestamp.started': int(time.time()),
}
libtorrent_resume_vals = {
b'bitfield': self.num_chunks,
b'uncertain_pieces.timestamp': int(time.time())
}
self.set_tor_data_val('rtorrent', value=rtorrent_vals)
self.get_tor_data_val('libtorrent_resume').update(libtorrent_resume_vals)
self.has_resumed = True
def save_to_file(self, dest=None):
"""
Saves torrent data to file.
:param str dest: Path where file should be saved. If not provided, will output to current directory as
*torrent name*_fast.torrent
:return: None
"""
encoded_tor_data = bencodepy.bencode(self.tor_data)
if dest is None:
no_ext = os.path.splitext(self.tor_file)[0]
filename = f'{os.path.basename(no_ext)}_fast.torrent'
dest = os.path.join(os.path.dirname(self.tor_file), filename)
with open(dest, 'wb') as f:
f.write(encoded_tor_data)
self.has_saved_file = True
self.fr_file_loc = dest
def add_to_rtorrent(self, server_url, custom_ratio=None):
"""
Add fast resume torrent to rtorrent via xml rpc.
:param str server_url: URL of the xml rpc server.
:param float custom_ratio: Ratio to set in torrent's custom_ratio field.
:return: None
"""
if not self.has_resumed:
warnings.warn('add_to_rtorrent was called before calling do_resume. Doing this will add the torrent to '
'rtorrent without fast resuming it.')
encoded_tor_data = bencodepy.bencode(self.tor_data)
server = xmlrpc.client.Server(server_url)
server.load.raw_start('', xmlrpc.client.Binary(encoded_tor_data),
f'd.directory.set="{self.new_dl_loc or self.dl_base_path}"', 'd.priority.set=2')
if custom_ratio is not None:
server.d.custom.set(self.info_hash, 'custom_ratio', str(float(custom_ratio)))
def rfr(tor_file, current_dl_loc, new_dl_loc=None, dest=None):
"""
Wrapper for FastTorrent class.
:param tor_file: Path to torrent file that you want to fast resume.
:param current_dl_loc: Path where the torrent was downloaded to.
:param new_dl_loc: New download location to set in the fast resume torrent.
:param dest: Path to write fast resume torrent to.
:return:
"""
tor = FastTorrent(tor_file, current_dl_loc, new_dl_loc)
tor.do_resume()
tor.save_to_file(dest) | /rfr-0.1.3.tar.gz/rfr-0.1.3/rfr.py | 0.711631 | 0.210665 | rfr.py | pypi |
# RFTokenizer
A character-wise tokenizer for morphologically rich languages
For replication of paper results see replication.md
For full NLP pipelines for morphologically rich languages (MRLs) based on this tool, see:
* Coptic: http://www.github.com/CopticScriptorium/coptic-nlp/
* Hebrew: http://www.github.com/amir-zeldes/HebPipe/
Pretrained models are provided for **Coptic**, **Arabic** and **Hebrew**
## Installation
RFTokenizer is available for installation from PyPI:
```
pip install rftokenizer
```
Or you can clone this repo and run
```
python setup.py install
```
## Introduction
This is a simple tokenizer for word-internal segmentation in morphologically rich languages such as Hebrew, Coptic or Arabic, which have big 'super-tokens' (space-delimited words which contain e.g. clitics that need to be segmented) and 'sub-tokens' (the smaller units contained in super-tokens).
Segmentation is based on character-wise binary classification: each character is predicted to have a following border or not. The tokenizer relies on an xgboost classifier, which is fast, very accurate using little training data, and resists overfitting. Solutions do not represent globally optimal segmentations (there is no CRF layer, transition lattices or similar), but at the same time a globally coherent segmentation of each string into known morphological categories is not required, which leads to better OOV item handling. The tokenizer is optimal for medium amounts of data (10K - 200K examples of word forms to segment), and works out of the box with fairly simple dependencies and small model files (see Requirements). For two languages as of summer 2019, RFTokenizer either provides the highest published segmentation accuracy on the official test set (Hebrew) or forms part of an ensemble which does so (Coptic).
To cite this tool, please refer to the following paper:
Zeldes, Amir (2018) A Characterwise Windowed Approach to Hebrew Morphological Segmentation. In: *Proceedings of the 15th SIGMORPHON Workshop on Computational Research in Phonetics, Phonology, and Morphology*. Brussels, Belgium, 101-110.
```
@InProceedings{,
author = {Amir Zeldes},
title = {A CharacterwiseWindowed Approach to {H}ebrew Morphological Segmentation},
booktitle = {Proceedings of the 15th {SIGMORPHON} Workshop on Computational Research in Phonetics, Phonology, and Morphology},
year = {2018},
address = {Brussels, Belgium},
pages = {101--110}
}
```
The data provided for the Hebrew segmentation experiment in this paper, given in the data/ directory, is derived from the Universal Dependencies version of the Hebrew Treebank, which is made available under a CC BY-NC-SA 4.0 license, but using the earlier splits from the 2014 SPMRL shared task. For attribution information for the Hebrew Treebank, see: https://github.com/UniversalDependencies/UD_Hebrew-HTB . The out-of-domain Wikipedia dataset from the paper, called Wiki5K and available in the data/ directory, is available under the same terms as Wikipedia.
Coptic data is derived from Coptic Scriptorium corpora, see more information at http://www.copticscriptorium.org/
Arabic data is derived from the Prague Arabic Dependency Treebank (UD_Arabic-PADT, https://github.com/UniversalDependencies/UD_Arabic-PADT)
## Performance
Current scores on the SPMRL Hebrew dataset (UD_Hebrew, V1 splits), using BERT-based predictions and lexicon data as features:
```
Perfect word forms: 0.9918367346938776
Precision: 0.9885304659498207
Recall: 0.9864091559370529
F-Score: 0.9874686716791979
```
Or without BERT:
```
Perfect word forms: 0.9821036106750393
Precision: 0.9761790182868142
Recall: 0.967103694874851
F-Score: 0.9716201652496708
```
Scores on Hebrew Wiki5K (out-of-domain, with BERT):
```
Perfect word forms: 0.9907224634820371
Precision: 0.9851075565361279
Recall: 0.9845644983461963
F-Score: 0.9848359525778881
```
Prague Arabic Dependency Treebank (UD_Arabic-PADT, currently without BERT):
```
Perfect word forms: 0.9846204866724703
Precision: 0.9744331886155331
Recall: 0.9874853343762221
F-Score: 0.9809158451901132
```
Coptic Scriptorium (UD_Coptic-Scriptorium, currently without BERT):
```
Perfect word forms: 0.952007602755999
Precision: 0.9797786292039166
Recall: 0.9637772194304858
F-Score: 0.971712054042643
```
## Requirements
The tokenizer needs:
* scikit-learn
* numpy
* pandas
* xgboost
* flair (only if BERT is used)
And if you want to run hyperparameter optimization:
* hyperopt
Compatible with Python 2 or 3, but compiled models must be specific to Python 2 / 3 (can't use a model trained under Python 2 with Python 3).
## Using
### Command line
To use the tokenizer, include the model file (e.g. `heb.sm3`) in the tokenizer's directory or in `models/`, then select it using `-m heb` and supply a text file to run segmentation on. The input file should have one word-form per line for segmentation.
```
> python tokenize_rf.py -m heb example_in.txt > example_out.txt
```
Input file format:
```
עשרות
אנשים
מגיעים
מתאילנד
לישראל
```
Output format:
```
עשרות
אנשים
מגיעים
מ|תאילנד
ל|ישראל
```
You can also use the option `-n` to separate segments using a newline instead of the pipe character.
### Importing as a module
You can import RFTokenizer once it is installed (e.g. via pip), for example:
```
>>> from rftokenizer import RFTokenizer
>>> my_tokenizer = RFTokenizer(model="ara")
>>> data = open("test_ara.txt",encoding="utf8").read()
>>> tokenized = my_tokenizer.rf_tokenize(data)
>>> print(tokenized)
```
Note that .rf_tokenize() expects a list of word forms to analyze or a string with word forms separated by new lines. The return value is a list of analyses separated by the separator (default: `|`).
## Training
To train a new model, you will need at least a configuration file and a training file. Ideally, you should also provide a lexicon file containing categorized sub-tokens AND super-tokens
and frequency information for sub-tokens (see below).
Training is invoked like this:
```
> python tokenize_rf.py -t -m <LANG> -c <CONF> -l <LEXICON> -f <FREQS> <TRAINING>
```
This will produce `LANG.sm3`, the compiled model (or `.sm2` under Python 2). If `<CONF>` is not supplied, it is assumed to be called `<LANG>.conf`.
If you wish to use BERT features for classification you must *first* train a flair classifier using `flair_pos_tagger.py`, which trains on .conllu data, and name its model `<LANG>.seg`, which should be placed in `models/`. Then train RFTokenizer using the `--bert` option. **Important note**: the data used to train the BERT classifier must be disjoint from the data used to train RFTokenizer, or else it will produce over-reliance (RFTokenizer will learn that BERT is always right, since BERT magically predicts everything correctly, given that it has already seen this training data). Alternatively, you can use a k-fold training regime.
### Configuration
You must specify some settings for your model in a file usually named `LANG.conf`, e.g. heb.conf for Hebrew. This file is a config parser property file with the following format:
* A section header at the top corresponding to your language model, in brackets, e.g. `[heb]` for Hebrew
* base_letters - characters to consider during classification. All other characters are treated as `_` (useful for OOV/rare characters, emoji's etc.). These characters should be attested in TRAINING
Optionally you may add:
* vowels - if the language distinguishes something like vowels (including matres lectionis), it can be useful to configure them here
* pos_classes - a mapping of POS tags to collapsed POS tags in the lexicon, in order to reduce sparseness (especially if the tag set is big but training data is small). See below for format.
* unused - comma separated list of feature names to permanently disable in this model.
* diacritics - not currently used.
* regex_tok - a set of regular expressions used for rule based tokenization (e.g. for numbers, see example below)
* allowed - mapping of characters that may be followed by a boundary at positive positions in the beginning of the word (starting at 0) or negative positions at the end of the word (-1 is the last character). When this setting is used, no other characters/positions will allow splits (useful for languages with a closed vocabulary of affixes). See below for format.
Example `heb.conf` file for Hebrew:
```
[heb]
base_letters=אבגדהוזחטיכלמנסעפצקרשתןםךףץ'-%".?!/,
vowels=אהוי
unused=next_letter
diacritics=ּ
allowed=
0<-המבלושכ'"-
1<-המבלשכ'"-
2<-המבלכ'"-
3<-ה'"-
-1<-והיךםן
-2<-הכנ
regex_tok=
^([0-9\.,A-Za-z]+)$ \1
^(ב|ל|מ|כ|ה)([-־])([0-9\./,A-Za-z]+)$ \1|\2|\3
^(ב|ל|מ|כ|ה)([0-9\./,A-Za-z]+)$ \1|\2
```
If using POS classes:
```
pos_classes=
V<-VBP|VBZ|VB|VBD|VBG|VBN|MD
N<-NN|NNP
NS<-NNS|NNPS
```
### Training file
A two column text file with word forms in one column, and pipe-delimited segmentations in the second column:
```
עשרות עשרות
אנשים אנשים
מגיעים מגיעים
מתאילנד מ|תאילנד
לישראל ל|ישראל
...
```
It is assumed that line order is meaningful, i.e. each line provides preceding context for the next line. If you have a **shuffled** corpus of trigrams, you can also supply a four column training file with the columns:
* Previous wordform
* Next wordform
* Current wordform
* Current wordform segmentation (pipe-separated)
In this case line order is meaningless.
### Lexicon file
The lexicon file is a tab delimited text file with one word form per line and the POS tag assigned to that word in a second column (a third column with lemmas is reserved for future use). Multiple entries per word are possible, e.g.:
```
צלם NOUN צלם
צלם VERB צילם
צלם CPLXPP צל
...
```
It is recommended (but not required) to include entries for complex super-tokens and give them distinct tags, e.g. the sequence צלם above contains two segments: a noun and a possessor clitic. It is therefore given the tag CPLXPP, 'complex including a personal pronoun'. This tag is not used for any simple sub-token segment in the same lexicon.
### Frequency file
The frequency file is a tab delimited text file with one word form per line and the frequency of that word as an integer. Multiple entries per word are possible if pooling data from multiple sources, in which case the sum of integers is taken. In the following example, the frequency of the repeated first item is the sum of the numbers in the first two lines:
```
שמח 32304
שמח 39546
שמט 314
...
```
### Other training options
* You can specify a train/test split proportion using e.g. `-p 0.2` (default test partition is 0.1 of the data)
* Variable importances can be outputted using `-i`
* You can perform retraining on the entire dataset after evaluation of feature importances using `-r`
* You can ablate certain features using `-a` and a comma separated list of features
* Hyperparameter optimization can be run with `-o`
If you want to test different classifiers/modify default hyperparameters, you can modify the cross-validation code in the train() routine or use a fixed dev set (look for `cross_val_test`).
| /rftokenizer-2.0.1.tar.gz/rftokenizer-2.0.1/README.md | 0.630912 | 0.920968 | README.md | pypi |
# RFVis [](https://pypi.org/project/rfvis/) [](https://pypi.org/project/rfvis/) [](https://pypi.org/project/rfvis/)
A tool for visualizing the structure and performance of Random Forests (and other ensemble methods based on decision trees).

RFVis offers a [Command Line API](#command-line-api) and a [Python API](#python-api) which works on a [sklearn.ensemble.RandomForestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html).
## Getting Started
Install and update RFVis via [pip](https://pip.pypa.io/en/stable/quickstart/):
```
$ pip install rfvis
```
This will allow you interactively visualize a fitted Random Forest (RF) in your
browser. To directly generate SVG files from your model you also need to install
[Node.js](https://nodejs.org/en/download/), see [Command Line Interface](#command-line-interface) for more information.
## Command Line API
RFVis offers a command line tool to either generate SVG files directly from
your input data (`rfvis cli <data>`) or to spin up a web-based GUI for a more
interactive analysis (`rfvis gui <data>`).
To see all available commands run:
```
$ rfvis --help
Usage: rfvis [OPTIONS] COMMAND [ARGS]...
A tool for visualizing the structure and performance of Random Forests
Options:
--version Show the version and exit.
--help Show this message and exit.
Commands:
cli Command line interface to generate SVGs.
gui Web-based graphical user interface.
```
### Graphical User Interface
To interactively analyze your forest with the web-based GUI run:
```
$ rfvis gui /path/to/data
* Running on http://127.0.0.1:8080/ (Press CTRL+C to quit)
```
You can now open up your browser at <http://localhost:8080> to see something like this:

### Command Line Interface
To use the Command Line Interface (CLI) you need to have
[Node.js](https://nodejs.org/en/download/) v8+ installed on your system. This
is a technical limitation due to the fact that the rendering is written in
Javascript. You do not need to install any other package though, the CLI
integrates into the command line tool you already installed via pip:
```
$ rfvis cli /path/to/data
>> Exported "/dev/random-forest-visualization/tree-0.svg"
>> Exported "/dev/random-forest-visualization/tree-1.svg"
>> Exported "/dev/random-forest-visualization/tree-2.svg"
>> Exported "/dev/random-forest-visualization/tree-3.svg"
...
```
Get a full list of available options with `--help`:
```
$ rfvis cli --help
Usage: rfvis cli [OPTIONS] FOREST_JSON
Web-based graphical user interface.
As Python is unable to render React components, we make a subprocess call to a small
Node.js application which will do the rendering and also store the created SVG
files. This command requires that Node.js is installed on your system!
FOREST_JSON: Path to the JSON file that contains the forest's data.
Options:
-o, --out PATH Output path of the SVG files. [default: (current
working directory)]
-w, --width INTEGER Width of the SVG. [default: 800]
-h, --height INTEGER Height of the SVG. [default: 800]
--trunk-length INTEGER Length of the trunk which influences the overall tree
size. [default: 100]
--display-depth INTEGER Maximum depth of the tree rendering. Cut of leaves are
visualized as pie chart consolidation nodes.
--branch-color [Impurity] Coloring of the branches. [default: Impurity]
--leaf-color [Impurity|Best Class]
Coloring of the leaves. [default: Impurity]
--help Show this message and exit.
```
### Input Data
The data for the Command Line API must be available on your filesystem as a JSON file
for the forest and additionally one CSV file per tree. Both data formats will
be extended with properties in the future, this is just the minimal set.
You can find a working example under `examples/PolSAR`.
#### Forest JSON
The main `forest.json` holds all information about the ensemble model:
- **name** (string): Name of your forest, will be displayed in the GUI
- **error** (float): The error (e.g. the out-of-bag or validation error) of the
entire ensemble model, will be displayed in the GUI
- **n_samples** (int): Number of samples the model was trained on
- **correlationMatrix** (float[][]): Correlation between the single trees within
the model. Has dimensions `NxN` where `N` is the number of trees.
This will be used to compute the forest map.
- **classes**: The output classes
- **name** (string): Name of the class
- **color** (int, int, int): RGB values in the range of 0-255 which
determine the color of the class in the visualization
- **trees**: The trees in the forest
- **error** (float): The error (again could be either the out-of-bag or
validation error) of the single tree
- **data** (string): Relative path to the CSV file containing the tree data
#### Tree CSV
For each tree specified in the `forest.json` RFVis expects a CSV file where one
entry represents one node in the tree. An entry has the following format:
- **id** (int): ID of the node
- **depth** (int) Depth of the node in the tree (starting at `0`)
- **n_node_samples** (int): Number of training samples reaching the node
- **impurity** (float): Impurity of the node (`0`-`1`)
- **value** (int[]): Class distribution within the node, i.e. every entry
represents the amount of samples within the node that respond to a specific
class. The index corresponds to the indices in `forest.classes`.
## Python API
RFVis also offers a Python API which works directly on a scikit-learn RandomForestClassifier.
You can find a working example under `examples/scikit_learn.py`.
The function `rfvis.gui()` visualizes a fitted RandomForestClassifier in a web based graphical user interface.
The server runs in a separate process and is available at `http://localhost:<port>`.
```python
gui(model, data=None, target=None, name=None, class_names=None, class_colors=None, port=8080)
```
Args:
- **model** ([sklearn.ensemble.RandomForestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html)):
The model to visualize.
- **data** (array-like, shape=(n_samples, n_features)): The training input samples that were used to fit the model.
Used to compute the out-of-bag error and correlation of the individual trees.
If not provided, the forest view will have no significance.
- **target** (array-like, shape=n_samples): The target values (class labels) that were used to fit the model.
Used to compute the out-of-bag error and correlation of the individual trees.
If not provided, the forest view will have no significance.
- **name** (str): Optional name of the model which will be displayed in the frontend.
- **class_names** (List[str]): Optional list of names of the target classes
- **class_colors** (List[str]): Optional list of browser interpretable colors for the target classes.
See https://developer.mozilla.org/en-US/docs/Web/CSS/color_value.
- **port** (int): Port on which the frontend will run on. Defaults to 8080.
Returns:
- **process** (multiprocessing.Process): Subprocess that runs the server. Can be terminated with
[process.terminate()](https://docs.python.org/3/library/multiprocessing.html#multiprocessing.Process.terminate).
## Development
The repository contains a `Pipfile` for conveniently creating a virtualenv
for development. Just install [pipenv](https://pipenv.readthedocs.io/en/latest/)
and run:
```
$ pipenv install
```
You can now e.g. start the server on the default port 8080 via:
```
$ pipenv run rfvis gui <path_to_forest_json>
```
Note that you need to build the frontend bundle first before you can
actually see the application working on `http://localhost:8080`.
To build the frontend you need Node.js installed. First install all
dev-dependencies by running the following
from within the `./rfvis/client` directory:
```
$ npm install
```
Now you can build a production-ready bundle via:
```
$ npm run build
```
If you have the Python server running you should now be able to see the
application at `http://localhost:8080`.
For developing on the frontend more conveniently run:
```
$ npm start
```
To start a development server with hot reloading at `http://localhost:3000`.
## Publications
If you are using RFVis in your research, please cite the following paper:
- Ronny Hänsch, Philipp Wiesner, Sophie Wendler, and Olaf Hellwich. "[Colorful Trees: Visualizing Random Forests for Analysis and Interpretation](https://ieeexplore.ieee.org/abstract/document/8658566)" In 2019 *IEEE Winter Conference on Applications of Computer Vision (WACV)*, pp. 294-302. IEEE, 2019.
| /rfvis-0.3.3.tar.gz/rfvis-0.3.3/README.md | 0.565419 | 0.989938 | README.md | pypi |
# rfwtools
This package provides commonly used functionality around CEBAF C100 RF Waveforms collected by the JLab harvestser. This
includes data management such as download capture files, reading data from disk, parsing label files, running feature
extraction tasks, and generating data reports and visualizations.
## Latest API Documentation
https://jeffersonlab.github.io/rfwtools/
## Installation
This package has been posted to PyPI to ease installation.
```bash
pip install rfwtools
```
If you would rather edit the code while using it you should do a git clone to a local directory, then install that
package in edit-able mode.
```bash
cd /some/place
git clone https://github.com/JeffersonLab/rfwtools .
# Install the package (recommended that you use a virtual environment, etc.)
pip install -e /some/place/rfwtools
```
## Configuration
Internally the package leverages a Config class that contains directory locations, URLs for network services, etc.. On
first reference, this class looks for and parses a config file, ./rfwtools.cfg. Below is simplified example file.
```yaml
data_dir: /some/path/rfw-research/data/waveforms/data/rf
label_dir: /some/path/rfw-research/data/labels
output_dir: /some/path/rfw-research/processed-output
```
data_dir
: Base directory containing RF waveform data directory structures (i.e., directory containing zone directories). This
path may include a symlink on Linux if you do not wish to duplicate data. The path structure should mimic that found in
opsdata.
label_dir
: Directory contain label files (typically provided by Tom Powers)
output_dir
: Default directory for writing/reading saved files and other processed output
If no file is found, file system paths are relative the project base, which is assumed to be the current working
directory. You can adjust these parameters in code as in the example below.
```python
from rfwtools.config import Config
Config().data_dir = "/some/new/path"
```
## Usage
Previous usage of this was to download a template directory structure with source code. This proved cumbersome, and
did not result in widespread usage. Below is a simple example that assume the above locations were sensibly defined.
It shows some of what you can accomplish with the package.
```python
from rfwtools.data_set import DataSet
from rfwtools.extractor.autoregressive import autoregressive_extractor
# Create a DataSet. For demo-purposes, I would make a small label file and run through. This can take hours/days to
# process all of our data
ds = DataSet(label_files=['my-sample-labels.txt'])
# This will process the label files you have and create an ExampleSet under ds.example_set
ds.produce_example_set()
# Save a CSV of the examples.
ds.save_example_set_csv("my_example_set.csv")
# Show data from label sources, color by fault_label
ds.example_set.display_frequency_barplot(x='label_source', color_by="fault_label")
# Show heatmaps for 1L22-1L26
ds.example_set.display_zone_label_heatmap(zones=['1L22', '1L23', '1L24', '1L25', '1L26'])
# Generate autoregressive features for this data set. This can take a while - e.g. a few seconds per example.
ds.produce_feature_set(autoregressive_extractor)
# Save the feature_set to a CSV
ds.save_feature_set_csv("my_feature_set.csv")
# Do dimensionality reduction
ds.feature_set.do_pca_reduction(n_components=10)
# Plot out some different aspects
# Color by fault, marker style by cavity
ds.feature_set.display_2d_scatterplot(hue="fault_label", style="cavity_label")
# Color by zone, marker style by cavity, only microphonics faults
ds.feature_set.display_2d_scatterplot(hue="zone", style="cavity_label", query="fault_label == 'Microphonics'")
```
## Developer Notes
Here are some notes on the development process.
First clone the repo. Then create a venv for development.
```bash
git clone https://github.com/JeffersonLab/rfwtools
python3.7 -m venv venv
```
Activate the venv and install the development requirements. These packages are used strictly in packaging, deploying,
and testing
```bash
source venv/bin/activate.csh
pip3 install -r requirements-dev.txt
```
Now you can build wheels and source distributions, run unit tests, and upload to the test PyPI or PyPI. One thing
I like to do is create a project in a different directory and then install this package in editable mode. Instead
of actually installing it, pip creates a symlink back to your package directory and your source changes are reflected
in the downstream project without reinstalling. You do have to re-import packages or restart your interpreter though.
```bash
mkdir /some/other/my_project
cd /some/other/my_project
python -m venv venv
source venv/bin/activate.csh
pip install -e /path/to/rfwtools
```
If you want to make source changes then you will need to install the packages in requirements.txt. The versions
listed where the ones used in the last development cycle. You may want to update those versions, but make sure to
test!
```bash
pip install -r requirements.txt
```
To run a unittests in multiple environments. Windows and linux have slightly different configurations. These match
the environment lists.
```bash
tox -e py37-windows
tox -e py37-linux
```
To run them directly in an IDE with builtin test runner, do the equivalent of this.
```bash
cd /path/to/.../rfwtools
python3 -m unittest
```
To build documentation that can be used in github.
From windows:
```PowerShell
cd docsrc
.\make.bat github
git add .
git commit -m"Updated documentation"
```
From Linux:
```bash
docsrc/build-docs.bash
git add .
git commit -m"Updated documentation"
```
You should increment version numbers in setup.cfg and put out a new package to PyPI once a release is ready(shown below)
.
Update the requirements files if they changed. At a minimum, this should always be requirements.txt. See comments
below for details.
```bash
pip freeze > requirements.txt
```
To build distributions. You may need to remove directory content if rebuilding
```bash
rm dist/*
```
To upload to the test PyPI repo. You may need to add the `--cert /etc/pki/tls/cert.pem` option for SSL problems.
Make sure to edit setup.cfg with latest info as shown below using vi and have built the package.
```csh
vi setup.cfg
source venv/bin/activate.csh
python -m build
twine upload --repository testpypi dist/*
```
To upload to the production PyPI repo. First edit setup.cfg with latest info.
```bash
twine upload --repository pypi dist/*
```
To install from production PyPI:
```bash
pip install rfwtools
```
To install from Test PyPI:
```bash
pip3 install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple/ rfwtools
```
### Additional Developer Comments
requirements.txt are the versions that were used from within my IDE and with my IDE unit test runner. This is the set
that worked during development, installation set will probably be different. This is every installed package, not just
the ones that my package directly uses.
requirements-dev.txt are the versions of tools required to build, test, and distribute the package. These are the set
that worked during the last development cycle.
requirements-testing.txt are the packages that needed to be installed for testing to work. They are basically the same
as requirements.txt, but with a few extras used exclusively in tests and the local rfwtools package itself.
### Certified Notes
The process for certified installation are largely captured in the `setup-certified.bash` script. Most of the basic
developer process is the same, but you will need to run through the certified installation process completely to make
sure that everything works as expected. At the end of this process you will have dropped the package files in a
directory. That's all the get installed in the certified area.
1. Generate a certified tarball once you think development is done.
```./setup-certified tarball rfwtools<version>```
2. Copy this tarball to a temp directory and unzip it.
```csh
cd ..
mkdir tmp
mv rfwtools<version>.tar.gz tmp
cd tmp
tar -xzf rfwtools<version>.tar.gz
cd rfwtools<version>
```
3. Now run through the standard process described by `setup-certified.bash -h`. Make sure to review the docs directory
when done.This is something like the following:
```bash
./setup-certified.bash test
./setup-certified.bash docs
./setup-certified.bash build
```
4. You can also test the final installation if you have a target directory ready. You should find some wheel or tar.gz
files in the target directory when done.
```bash
mkdir -p /tmp/pretend-certified/rfwtools/<version>
./setup-certified.bash install /tmp/pretend-certified/rfwtools/<version>
```
5. Now compact the tarball to ensure that the to-be-archived code is what you want.
```bash
./setup-certified.bash compact
``` | /rfwtools-1.3.0.tar.gz/rfwtools-1.3.0/README.md | 0.73914 | 0.849535 | README.md | pypi |
# JSPyBridge
[](http://npmjs.com/package/pythonia)
[](https://pypi.org/project/javascript/)
[](https://github.com/extremeheat/JSPyBridge/actions/workflows/)
[](https://gitpod.io/#https://github.com/extremeheat/jspybridge)
Interoperate Node.js and Python. You can run Python from Node.js, *or* run Node.js from Python. **Work in progress.**
Requires Node.js 14 and Python 3.8 or newer.
## Key Features
* Ability to call async and sync functions and get object properties with a native feel
* Built-in garbage collection
* Bidirectional callbacks with arbitrary arguments
* Iteration and exception handling support
* Object inspection allows you to easily `console.log` or `print()` any foreign objects
* (Bridge to call Python from JS) Python class extension and inheritance. [See pytorch and tensorflow examples](https://github.com/extremeheat/JSPyBridge/blob/master/examples/javascript/pytorch-train.js).
* (Bridge to call JS from Python) Native decorator-based event emitter support
* (Bridge to call JS from Python) **First-class Jupyter Notebook/Google Colab support.** See some Google Colab uses below.
## Basic usage example
See some examples [here](https://github.com/extremeheat/JSPyBridge/tree/master/examples). See [documentation](https://github.com/extremeheat/JSPyBridge#documentation) below and in [here](https://github.com/extremeheat/JSPyBridge/tree/master/docs).
### Access JavaScript from Python
```sh
pip3 install javascript
```
```py
from javascript import require, globalThis
chalk, fs = require("chalk"), require("fs")
print("Hello", chalk.red("world!"), "it's", globalThis.Date().toLocaleString())
fs.writeFileSync("HelloWorld.txt", "hi!")
```
### Access Python from JavaScript
Make sure to have the dependencies installed before hand!
```sh
npm i pythonia
```
```js
import { python } from 'pythonia'
// Import tkinter
const tk = await python('tkinter')
// All Python API access must be prefixed with await
const root = await tk.Tk()
// A function call with a $ suffix will treat the last argument as a kwarg dict
const a = await tk.Label$(root, { text: 'Hello World' })
await a.pack()
await root.mainloop()
python.exit() // Make sure to exit Python in the end to allow node to exit. You can also use process.exit.
```
### Examples
[](https://gitpod.io/#https://github.com/extremeheat/jspybridge)
Check out some cool examples below! Try them on Gitpod! Click the Open in Gitpod link above, and then open the examples folder.
[](https://github.com/extremeheat/JSPyBridge/blob/master/examples/javascript/pytorch-train.js)
[](https://github.com/extremeheat/JSPyBridge/blob/master/examples/javascript/matplotlib.js)
[](https://github.com/extremeheat/JSPyBridge/blob/master/examples/javascript/tensorflow.js)
[](https://github.com/extremeheat/JSPyBridge/blob/master/examples/python/mineflayer.py)
<!-- <img src="https://matplotlib.org/stable/_static/logo2_compressed.svg" alt="matplotlib" width="120" height="70">
-->
### Bridge feature comparison
Unlike other bridges, you may notice you're not just writing Python code in JavaScript, or vice-versa. You can operate on objects
on the other side of the bridge as if the objects existed on your side. This is achieved through real interop support: you can call
callbacks, and do loss-less function calls with any arguments you like (with the exception of floating points percision of course).
| | python(ia) bridge | javascript bridge | [npm:python-bridge](https://www.npmjs.com/package/python-bridge) |
|---|---|---|---|
| Garbage collection | ✔ | ✔ | ❌ |
| Class extension support | ✔ | Not built-in (rare use case), can be manually done with custom proxy | ❌ |
| Passthrough stdin | ❌ (Standard input is not piped to bridge processes. Instead, listen to standard input then expose an API on the other side of the bridge recieve the data.) | ❌ | ✔ |
| Passthrough stdout, stderr | ✔ | ✔ | ✔ |
| Long-running sync calls | ✔ | ✔ | ✔ |
| Long-running async calls | ❌ (need to manually create new thread) | ✔ (AsyncTask) | ❌ (need to manually create new thread) |
| Callbacks | ✔ | ✔ | ❌ |
| Call classes | ✔ | ✔ | |
| Iterators | ✔ | ✔ | ❌ |
| Inline eval | ✔ | ✔ | |
| Dependency Management | ❌ | ✔ | ❌ |
| Local File Imports | ✔ | ✔ | ❌ |
| Error Management | ✔ | ✔ | ✔ |
| Object inspection | ✔ | ✔ | ❌ |
## Who's using it
* [PrismarineJS/mineflayer](https://github.com/PrismarineJS/mineflayer) -- [](https://colab.research.google.com/github/PrismarineJS/mineflayer/blob/master/docs/mineflayer.ipynb)
# Documentation
## From Python
You can import the bridge module with
```py
from javascript import require
```
This will import the require function which you can use just like in Node.js. This is a slightly
modified require function which does dependency management for you. The first paramater is the name
or location of the file to import. Internally, this calls the ES6 dynamic `import()` function. Which
supports both CommonJS and ES6 modules.
If you are passing a module name (does not start with / or include a .) such as 'chalk', it will search
for the dependency in the internal node_module folder and if not found, install it automatically.
This install will only happen once, it won't impact startup afterwards.
The second paramater to the built-in require function is the version of the package you want, for
example `require('chalk', '^3')` to get a version greater than major version 3. Just like you would
if you were using `npm install`. It's reccomended to only use the major version as the name and version
will be internally treated as a unique package, for example 'chalk--^3'. If you leave this empty,
we will install `latest` version instead, or use the version that may already be installed globally.
### Usage
* All function calls to JavaScript are thread synchronous
* ES6 classes can be constructed without new
* ES5 classes can be constructed with the .new psuedo method
* Use `@On` decorator when binding event listeners. Use `off()` to disable it.
* All callbacks run on a dedicated callback thread. DO NOT BLOCK in a callback or all other events will be blocked. Instead:
* Use the @AsyncTask decorator when you need to spawn a new thread for an async JS task.
For more, see [docs/python.md](https://github.com/extremeheat/JSPyBridge/blob/master/docs/python.md).
### Usage
<details>
<summary>👉 Click here to see some code usage examples 👈</summary>
### Basic import
Let's say we have a file in JS like this called `time.js` ...
```js
function whatTimeIsIt() {
return (new Date()).toLocaleString()
}
module.exports = { whatTimeIsIt }
```
Then we can call it from Python !
```py
from javascript import require
time = require('./time.js')
print(time.whatTimeIsIt())
```
### Event emitter
*You must use the provided On, Once, decorator and off function over the normal dot methods.*
emitter.js
```js
const { EventEmitter } = require('events')
class MyEmitter extends EventEmitter {
counter = 0
inc() {
this.emit('increment', ++this.counter)
}
}
module.exports = { MyEmitter }
```
listener.py
```py
from javascript import require, On, off
MyEmitter = require('./emitter.js')
# New class instance
myEmitter = MyEmitter()
# Decorator usage
@On(myEmitter, 'increment')
def handleIncrement(this, counter):
print("Incremented", counter)
# Stop listening. `this` is the this variable in JS.
off(myEmitter, 'increment', handleIncrement)
# Trigger the event handler
myEmitter.inc()
```
### ES5 class
es5.js
```js
function MyClass(num) {
this.getNum = () => num
}
module.exports = { MyClass }
```
es5.py
```py
MyEmitter = require('./es5.js')
myClass = MyClass.new(3)
print(myClass.getNum())
```
### Iteration
items.js
```js
module.exports = { items: [5, 6, 7, 8] }
```
items.py
```py
items = require('./items.js')
for item in items:
print(item)
```
### Callback
callback.js
```js
export function method(cb, salt) {
cb(42 + salt)
}
```
callback.py
```py
method = require('./callback').method
# Example with a lambda, but you can also pass a function ref
method(lambda v: print(v), 2) # Prints 44
```
</details>
## From JavaScript
* All the Python APIs are async. You must await them all.
* Use `python.exit()` or `process.exit()` at the end to quit the Python process.
* This library doesn't manage the packaging.
* Right now you need to install all the deps from pip globally, but later on we may allow loading from pip-envs.
* When you do a normal Python function call, you can supply "positional" arguments, which must
be in the correct order to what the Python function expects.
* Some Python objects accept arbitrary keyword arguments. You can call these functions by using
the special `$` function syntax.
* When you do a function call with a `$` before the parenthesis, such as `await some.pythonCall$()`,
the final argument is evaluated as a kwarg dictionary. You can supply named arguments this way.
* Property access with a $ at the end acts as a error suppression operator.
* Any errors will be ignored and instead undefined will be returned
* See [docs/javascript.md](docs/javascript.md) for more docs, and the examples for more info
### Usage
<details>
<summary>👉 Click here to see some code usage examples 👈</summary>
### Basic import
Let's say we have a file in Python like this called `time.py` ...
```py
import datetime
def what_time_is_it():
return str(datetime.datetime.now())
```
Then we can call it from JavaScript !
```js
import { python } from 'pythonia'
const time = await python('./time.py')
console.log("It's", await time.what_time_is_it())
python.exit()
```
### Iterating
* When iterating a Python object, you *must* use a `for await` loop instead of a normal `for-of` loop.
iter.py
```py
import os
def get_files():
for f in os.listdir():
yield f
```
iter.js
```js
const iter = await python('./iter.py')
const files = await iter.get_files()
for await (const file of files) {
console.log(file)
}
```
</details>
## Details
* When doing a function call, any foreign objects will be sent to you as a reference. For example,
if you're in JavaScript and do a function call to Python that returns an array, you won't get a
JS array back, but you will get a reference to the Python array. You can still access the array
normally with the [] notation, as long as you use await. If you would like the bridge to turn
the foreign refrence to something native, you can request a primitive value by calling `.valueOf()`
on the Python array. This would give you a JS array. It works the same the other way around.
* The above behavior makes it very fast to pipe data from one function onto another, avoiding costly
conversions.
* This above behavior is not present for callbacks and function parameters. The bridge will try to
serialize what it can, and will give you a foreign reference if it's unable to serialize something.
So if you pass a JS object, you'll get a Python dict, but if the dict contains something like a class,
you'll get a reference in its place.
#### Notable details
* The `ffid` keyword is reserved. You cannot use it in variable names, object keys or values as this is used to internlly track objects.
* On the bridge to call JavaScript from Python, due to the limiatations of Python and cross-platform IPC, we currently communicate over standard error which means that JSON output in JS standard error can interfere with the bridge. The same issue exists on Windows with python. You are however very unlikely to have issues with this.
* You can set the Node.js/Python binary paths by setting the `NODE_BIN` or `PYTHON_BIN` enviornment variables before importing the library. Otherwise, the `node` and `python3` or `python` binaries will be called relative to your PATH enviornment variable.
| /rg_javascript-1!1.0.4.tar.gz/rg_javascript-1!1.0.4/README.md | 0.437343 | 0.908089 | README.md | pypi |
from Acquisition import ImplicitAcquisitionWrapper
from UserDict import UserDict
from collective.z3cform.datagridfield.blockdatagridfield import BlockDataGridField # noqa
from plone.app.textfield import RichText
from plone.app.textfield.interfaces import IRichText
from plone.app.textfield.widget import IRichTextWidget, RichTextWidget
from plone.app.z3cform.utils import closest_content
from plone.directives import form
from rg.infocard import rg_infocard_msgfactory as _
from z3c.form.interfaces import IFieldWidget, IFormLayer
from z3c.form.widget import FieldWidget
from zope.interface import Interface
from zope import schema
from zope.component import adapter
from zope.interface import implementer
class InfocardDataGridField(BlockDataGridField):
''' Like its parent but with allow_reorder
'''
allow_reorder = True
@adapter(schema.interfaces.IField, IFormLayer)
@implementer(IFieldWidget)
def InfocardDataGridFieldFactory(field, request):
"""IFieldWidget factory for DataGridField."""
return FieldWidget(field, InfocardDataGridField(request))
class ICellRichTextWidget(IRichTextWidget):
''' Custom Rich Text widget to be used in DG cells
'''
@implementer(ICellRichTextWidget)
class CellRichTextWidget(RichTextWidget):
''' Custom Rich Text widget to be used in DG cells
'''
def wrapped_context(self):
return self.form.parentForm.context
@adapter(IRichText, IFormLayer)
@implementer(ICellRichTextWidget)
def CellRichTextFieldWidget(field, request):
"""IFieldWidget factory for CellRichTextWidget."""
return FieldWidget(field, CellRichTextWidget(request))
class IInfocardComplexField(Interface):
arg_title = schema.TextLine(
title=_("infocard_complex_field_title", "Label"),
default=u"",
required=True,
)
arg_public = schema.Bool(
title=_("infocard_complex_field_public", "Public?"),
default=False,
required=True,
)
arg_value = RichText(
title=_("infocard_complex_field_value", "Value"),
default=u"",
required=False,
)
form.widget(arg_value=CellRichTextFieldWidget) | /rg.infocard-0.9.5.tar.gz/rg.infocard-0.9.5/rg/infocard/models/base.py | 0.614278 | 0.16378 | base.py | pypi |
from .. import rg_infocard_msgfactory as _
from ..models.infocardcontainer import IInfocardcontainer
from ..vocs.infocard_recipients import InfocardRecipients
from ..vocs.infocard_servicetypes import InfocardServicetypes
from Products.CMFPlone import PloneMessageFactory as __
from five import grok
from plone import api
from plone.directives.form import Schema
from plone.directives.form import SchemaForm
from z3c.form import button
from zope import schema
from zope.interface import Invalid
from zope.interface import invariant
class IInfocardcontainerSearchForm(Schema):
""" Define form fields """
text = schema.TextLine(
title=_(
'label_search_text',
u'Search text'
),
required=False,
)
servicetype = schema.Choice(
title=_(
'label_servicetype',
u"Service type"
),
source=InfocardServicetypes,
required=False,
)
recipient = schema.Choice(
title=_(
'label_for_who_is_it',
u"For who is it?"
),
source=InfocardRecipients,
required=False,
)
@invariant
def at_least_one(data):
if data.servicetype or data.recipient or data.text:
return
raise Invalid(
_(
'label_at_least_one_search_parameter',
u'You should specify at least one search parameter'
)
)
class Form(SchemaForm):
'''
'''
grok.name('view')
grok.require('zope2.View')
grok.context(IInfocardcontainer)
@property
def template(self):
return grok.PageTemplateFile('templates/infocardcontainer_search.pt')
ignoreContext = True
schema = IInfocardcontainerSearchForm
method = "get"
label = u""
description = u""
table_fields = [
{
'id': 'title',
'label': __('title'),
},
{
'id': 'description',
'label': __('description'),
},
{
'id': 'servicetypes',
'label': _(
'label_servicetypes',
u"Service types"
),
},
{
'id': 'recipients',
'label': _(
'label_for_who_is_it',
u"For who is it?"
),
},
]
def accept_infocard(self, infocard, data):
''' Given the data in the parameters filter the infocard
'''
if data.get('servicetype'):
if not data.get('servicetype') in infocard.servicetypes:
return False
if data.get('recipient'):
if not data.get('recipient') in infocard.recipients:
return False
if data.get('text'):
infocard_view = api.content.get_view('view', infocard, self.request) # noqa
if not data.get('text').lower() in infocard_view.searched_text:
return False
return True
def search_results(self, data):
'''
'''
infocards = self.context.listFolderContents(
{'portal_type': 'infocard'}
)
results = []
for infocard in infocards:
if self.accept_infocard(infocard, data):
infocard_view = api.content.get_view('view', infocard, self.request) # noqa
results.append(
{
'review_state': api.content.get_state(infocard),
'url': infocard.absolute_url,
'title': infocard.title,
'description': infocard.description,
'servicetypes': infocard_view.servicetypes,
'recipients': infocard_view.recipients,
},
)
sorted(results, key=lambda x: x['title'])
return results
@button.buttonAndHandler(__('label_search', u'Search'))
def handleSearch(self, action):
self.searching = True
data, errors = self.extractData()
if errors:
self.status = self.formErrorsMessage
self.results = []
return
self.results = self.search_results(data)
@button.buttonAndHandler(__('label_cancel', u'Cancel'))
def handleCancel(self, action):
"""User cancelled. Redirect back to the front page.
""" | /rg.infocard-0.9.5.tar.gz/rg.infocard-0.9.5/rg/infocard/browser/infocardcontainer_search.py | 0.484624 | 0.156749 | infocardcontainer_search.py | pypi |
from .. import rg_infocard_logger as logger
from .. import rg_infocard_msgfactory as _
from ..vocs.infocard_servicetypes import InfocardServicetypes
from ..vocs.infocard_recipients import InfocardRecipients
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from plone import api
from plone.app.form.widgets.wysiwygwidget import WYSIWYGWidget
from plone.app.portlets.portlets import base
from plone.app.vocabularies.catalog import SearchableTextSourceBinder
from plone.memoize.view import memoize
from plone.portlets.interfaces import IPortletDataProvider
from zope.formlib import form
from zope.interface import implementer
from zope import schema
class ISearchPortlet(IPortletDataProvider):
"""A portlet that allows searching in an infocard container
"""
name = schema.TextLine(
title=_(u"name", default=u"Portlet name"),
default=u'Infocard search portlet',
required=False,
)
display_title = schema.Bool(
title=_(u"label_display_title", default=u"Display title"),
description=_(
u"help_display_title",
u"If checked the portlet will display a title based on name"
),
default=True,
required=True,
)
target = schema.Choice(
title=_(u"Target infocard container"),
description=_(
"help_target",
u"Choose the infocard container in which you can search"
),
source=SearchableTextSourceBinder({'portal_type': 'infocardcontainer'})
)
display_filters = schema.Bool(
title=_('label_display_filters', u"Display filters"),
description=_(
"help_display_filters",
u'By default the portlet displays one input '
u'to search on infocard text. '
u'If you select this checkbox two additional selects will appear. '
u'They will allow to search in the fields '
u'"Service type" and "Recipient".'
),
default=False,
)
text_before = schema.Text(
title=_(u"Text before search fields"),
description=_(u"This text will appear before the search fields"),
required=False
)
text_after = schema.Text(
title=_(u"Text after search fields"),
description=_(u"This text will appear after the search fields"),
required=False
)
@implementer(ISearchPortlet)
class Assignment(base.Assignment):
"""Portlet assignment."""
def __init__(
self,
name=u'',
display_title=True,
target=None,
display_filters=False,
text_before=u"",
text_after=u""
):
self.name = name
self.display_title = display_title
self.target = target
self.display_filters = display_filters
self.text_before = text_before
self.text_after = text_after
@property
def title(self):
title = u"Infocard search"
if self.data.name:
title = u"%s: %s" % (title, self.data.name)
return title
class AddForm(base.AddForm):
label = _(u"Add Infocard search portlet")
description = _(u"This portlet displays a search form.")
schema = ISearchPortlet
form_fields = form.Fields(schema)
form_fields['text_after'].custom_widget = WYSIWYGWidget
form_fields['text_before'].custom_widget = WYSIWYGWidget
def create(self, data):
return Assignment(**data)
class EditForm(base.EditForm):
label = _(u"Edit Recent Portlet")
description = _(u"This portlet displays a search form.")
schema = ISearchPortlet
form_fields = form.Fields(schema)
form_fields['text_after'].custom_widget = WYSIWYGWidget
form_fields['text_before'].custom_widget = WYSIWYGWidget
class Renderer(base.Renderer):
''' Render he search form
'''
render = ViewPageTemplateFile('search.pt')
@memoize
def available(self):
'''
The portlet will be available if the target is visible
'''
return all(
self.target,
'View' in api.user.get_permissions(obj=self.target),
)
@property
@memoize
def target(self):
''' Get's the object related to the target
'''
try:
return api.portal.get().unrestrictedTraverse(self.data.target[1:])
except:
msg = "Unable to find target: %s" % self.data.target
logger.exception(msg)
@property
@memoize
def display_title(self):
''' Check out the configuration to see if we can display title
'''
return self.data.display_title
@property
@memoize
def text_before(self):
''' Display text before search fields
'''
return self.data.text_before
@property
@memoize
def text_after(self):
''' Display text after search fields
'''
return self.data.text_after
@property
@memoize
def display_filters(self):
''' Check out the configuration to see if we can display additional
checkboxes for filtering on recipents and service types
'''
return self.data.display_filters
@property
@memoize
def recipients(self):
''' Get the recipient vocabulary for target
'''
return InfocardRecipients(self.target)
@property
@memoize
def servicetypes(self):
''' Get the recipient vocabulary for target
'''
return InfocardServicetypes(self.target) | /rg.infocard-0.9.5.tar.gz/rg.infocard-0.9.5/rg/infocard/portlets/search.py | 0.654453 | 0.239049 | search.py | pypi |
from DateTime import DateTime
from Products.Five.browser import BrowserView
from datetime import datetime, timedelta, date
from plone import api
from plone.memoize.view import memoize
from rg.prenotazioni import get_or_create_obj, tznow, _
from rg.prenotazioni.adapters.booker import IBooker
from rg.prenotazioni.adapters.conflict import IConflictManager
from rg.prenotazioni.adapters.slot import ISlot, BaseSlot
from rg.prenotazioni.utilities.urls import urlify
def hm2handm(hm):
""" This is a utility function that will return the hour and date of day
to the value passed in the string hm
:param hm: a string in the format "%H%m"
"""
if (not hm) or (not isinstance(hm, basestring)) or (len(hm) != 4):
raise ValueError(hm)
return (hm[:2], hm[2:])
def hm2DT(day, hm):
""" This is a utility function that will return the hour and date of day
to the value passed in the string hm
:param day: a datetime date
:param hm: a string in the format "%H%m"
"""
if not hm:
return None
date = day.strftime("%Y/%m/%d")
h, m = hm2handm(hm)
tzone = DateTime().timezone()
return DateTime('%s %s:%s %s' % (date, h, m, tzone))
def hm2seconds(hm):
""" This is a utility function that will return
to the value passed in the string hm
:param hm: a string in the format "%H%m"
"""
if not hm:
return None
h, m = hm2handm(hm)
return int(h) * 3600 + int(m) * 60
class PrenotazioniContextState(BrowserView):
"""
This is a view to for checking prenotazioni context state
"""
active_review_state = ['published', 'pending']
add_view = 'prenotazione_add'
day_type = 'PrenotazioniDay'
week_type = 'PrenotazioniWeek'
year_type = 'PrenotazioniYear'
busy_slot_booking_url = {
'url': '',
'title': _('busy', u'Busy'),
}
unavailable_slot_booking_url = {
'url': '',
'title': ' ',
}
@property
@memoize
def is_anonymous(self):
"""
Return the conflict manager for this context
"""
return api.user.is_anonymous()
@property
@memoize
def user_permissions(self):
""" The dict with the user permissions
"""
return api.user.get_permissions(obj=self.context)
@property
@memoize
def user_roles(self):
""" The dict with the user permissions
"""
return api.user.get_roles(obj=self.context)
@property
@memoize
def user_can_manage(self):
""" States if the authenticated user can manage this context
"""
if self.is_anonymous:
return False
if self.user_permissions.get('Modify portal content', False):
return True
return False
@property
@memoize
def user_can_view(self):
""" States if the authenticated user can manage this context
"""
if self.is_anonymous:
return False
if self.user_can_manage:
return True
if u'Reader' in self.user_roles:
return True
return False
@property
@memoize
def user_can_search(self):
""" States if the user can see the search button
"""
return self.user_can_manage
@property
@memoize
def booker(self):
"""
Return the conflict manager for this context
"""
return IBooker(self.context.aq_inner)
@property
@memoize
def today(self):
""" Cache for today date
"""
return date.today()
@property
@memoize
def yesterday(self):
""" Cache for today date
"""
return self.today - timedelta(days=1)
@property
@memoize
def tomorrow(self):
""" Cache for today date
"""
return self.today + timedelta(days=1)
@property
@memoize
def first_bookable_day(self):
""" The first day when you can book stuff
;return; a datetime.date object
"""
return max(
self.context.getDaData(),
self.tomorrow
)
@property
@memoize
def last_bookable_day(self):
""" The last day (if set) when you can book stuff
;return; a datetime.date object or None
"""
adata = self.context.getAData()
if not adata:
return
return adata
@memoize
def is_vacation_day(self, date):
"""
Check if today is a vacation day
"""
year = repr(date.year)
date_it = date.strftime('%d/%m/%Y')
holidays = self.context.getFestivi()
if not holidays:
return False
for holiday in holidays:
if date_it in holiday.replace('*', year):
return True
return False
@memoize
def is_configured_day(self, day):
""" Returns True if the day has been configured
"""
weekday = day.weekday()
week_table = self.context.getSettimana_tipo()
day_table = week_table[weekday]
return any((day_table['inizio_m'],
day_table['end_m'],
day_table['inizio_p'],
day_table['end_p'],))
def is_before_allowed_period(self, day):
""" Returns True if the day is before the first bookable day
"""
date_limit = self.minimum_bookable_date
if not date_limit:
return False
if day <= date_limit.date():
return True
return False
@memoize
def is_valid_day(self, day):
""" Returns True if the day is valid
"""
if day < self.first_bookable_day:
return False
if self.is_vacation_day(day):
return False
if self.last_bookable_day and day > self.last_bookable_day:
return False
if self.is_before_allowed_period(day):
return False
return self.is_configured_day(day)
@property
@memoize
def conflict_manager(self):
"""
Return the conflict manager for this context
"""
return IConflictManager(self.context.aq_inner)
@memoize
def get_state(self, context):
""" Facade to the api get_state method
"""
if not context:
return
return api.content.get_state(context)
@property
@memoize
def remembered_params(self):
""" We want to remember some parameters
"""
params = dict(
(key, value)
for key, value in self.request.form.iteritems()
if (value
and key.startswith('form.')
and not key.startswith('form.action')
and not key in ('form.booking_date',
)
or key in ('disable_plone.leftcolumn',
'disable_plone.rightcolumn')
)
)
for key, value in params.iteritems():
if isinstance(value, unicode):
params[key] = value.encode('utf8')
return params
@property
@memoize
def base_booking_url(self):
""" Return the base booking url (no parameters) for this context
"""
return ('%s/%s' % (self.context.absolute_url(), self.add_view))
def get_booking_urls(self, day, slot, slot_min_size=0):
""" Returns, if possible, the booking urls
"""
# we have some conditions to check
if not self.is_valid_day(day):
return []
if self.maximum_bookable_date:
if day > self.maximum_bookable_date.date():
return []
date = day.strftime("%Y-%m-%d")
params = self.remembered_params.copy()
times = slot.get_values_hr_every(300, slot_min_size=slot_min_size)
base_url = self.base_booking_url
urls = []
now_str = tznow().strftime("%Y-%m-%d %H:%M")
for t in times:
form_booking_date = " ".join((date, t))
params['form.booking_date'] = form_booking_date
booking_date = DateTime(params['form.booking_date']).asdatetime() # noqa
urls.append(
{
'title': t,
'url': urlify(base_url, params=params),
'class': t.endswith(':00') and 'oclock' or None,
'booking_date': booking_date,
'future': (now_str <= form_booking_date),
}
)
return urls
def get_all_booking_urls_by_gate(self, day, slot_min_size=0):
""" Get all the booking urls divided by gate
"""
slots_by_gate = self.get_free_slots(day)
urls = {}
for gate in slots_by_gate:
slots = slots_by_gate[gate]
for slot in slots:
slot_urls = self.get_booking_urls(day, slot,
slot_min_size=slot_min_size)
urls.setdefault(gate, []).extend(slot_urls)
return urls
def get_all_booking_urls(self, day, slot_min_size=0):
""" Get all the booking urls
Not divided by gate
"""
urls_by_gate = self.get_all_booking_urls_by_gate(day, slot_min_size)
urls = {}
for gate in urls_by_gate:
for url in urls_by_gate[gate]:
urls[url['title']] = url
return sorted(urls.itervalues(), key=lambda x: x['title'])
def is_slot_busy(self, day, slot):
""" Check if a slot is busy (i.e. the is no free slot overlapping it)
"""
free_slots = self.get_free_slots(day)
for gate in free_slots:
for free_slot in free_slots[gate]:
intersection = slot.intersect(free_slot)
if intersection:
if intersection.lower_value != intersection.upper_value:
return False
return True
@memoize
def get_anonymous_booking_url(self, day, slot, slot_min_size=0):
""" Returns, the the booking url for an anonymous user
"""
# First we check if we have booking urls
all_booking_urls = self.get_all_booking_urls(day, slot_min_size)
if not all_booking_urls:
# If not the slot can be unavailable or busy
if self.is_slot_busy(day, slot):
return self.busy_slot_booking_url
else:
return self.unavailable_slot_booking_url
# Otherwise we check if the URL fits the slot boundaries
slot_start = slot.start()
slot_stop = slot.stop()
for booking_url in all_booking_urls:
if slot_start <= booking_url['title'] < slot_stop:
if self.is_booking_date_bookable(booking_url['booking_date']):
return booking_url
return self.unavailable_slot_booking_url
@memoize
def get_gates(self):
"""
Get's the gates, available and unavailable
"""
return self.context.getGates() or ['']
@memoize
def get_unavailable_gates(self):
"""
Get's the gates declared unavailable
"""
return self.context.getUnavailable_gates()
@memoize
def get_available_gates(self):
"""
Get's the gates declared available
"""
total = set(self.get_gates())
if self.get_unavailable_gates():
unavailable = set(self.get_unavailable_gates())
else:
unavailable = set()
return total - unavailable
def get_busy_gates_in_slot(self, booking_date, booking_end_date=None):
"""
The gates already associated to a Prenotazione object for booking_date
:param booking_date: a DateTime object
"""
active_review_states = ['published', 'pending']
brains = self.conflict_manager.unrestricted_prenotazioni(
Date={
'query': [
DateTime(booking_date.date().__str__()),
DateTime(booking_date.date().__str__()) + 1
],
'range': 'minmax'
},
review_state=active_review_states)
gates = self.get_full_gates_in_date(
prenotazioni=brains,
booking_date=booking_date,
booking_end_date=booking_end_date
)
# unavailable gates are always busy
if self.get_unavailable_gates():
gates.update(self.get_unavailable_gates())
return gates
def get_full_gates_in_date(self, prenotazioni, booking_date, booking_end_date=None): # noqa
gates = set()
for brain in prenotazioni:
prenotazione = brain._unrestrictedGetObject()
start = prenotazione.getData_prenotazione()
end = prenotazione.getData_scadenza()
if booking_date < start:
# new booking starts before current booking
if booking_end_date > start:
# new booking intersect current booking
gates.add(prenotazione.getGate())
elif booking_date == start:
# starts at the same time, so disable curretn booking gate
gates.add(prenotazione.getGate())
else:
if booking_date < end:
# new booking starts inside current booking interval
gates.add(prenotazione.getGate())
return gates
def get_free_gates_in_slot(self, booking_date, booking_end_date=None):
"""
The gates not associated to a Prenotazione object for booking_date
:param booking_date: a DateTime object
"""
available = set(self.get_available_gates())
busy = set(self.get_busy_gates_in_slot(booking_date, booking_end_date))
return available - busy
@memoize
def get_day_intervals(self, day):
""" Return the time ranges of this day
"""
weekday = day.weekday()
week_table = self.context.getSettimana_tipo()
day_table = week_table[weekday]
# Convert hours to DateTime
inizio_m = hm2DT(day, day_table['inizio_m'])
end_m = hm2DT(day, day_table['end_m'])
inizio_p = hm2DT(day, day_table['inizio_p'])
end_p = hm2DT(day, day_table['end_p'])
# Get's the daily schedule
day_start = inizio_m or inizio_p
day_end = end_p or end_m
break_start = end_m or end_p
break_stop = inizio_p or end_m
return {
'morning': BaseSlot(inizio_m, end_m),
'break': BaseSlot(break_start, break_stop),
'afternoon': BaseSlot(inizio_p, end_p),
'day': BaseSlot(day_start, day_end),
'stormynight': BaseSlot(0, 86400),
}
@property
@memoize
def weektable_boundaries(self):
""" Return the boundaries to draw the week table
return a dict_like {'morning': slot1,
'afternoon': slot2}
"""
week_table = self.context.getSettimana_tipo()
boundaries = {}
for key in ('inizio_m', 'inizio_p'):
boundaries[key] = min(day_table[key]
for day_table in week_table
if day_table[key])
for key in ('end_m', 'end_p'):
boundaries[key] = max(day_table[key]
for day_table in week_table
if day_table[key])
for key, value in boundaries.iteritems():
boundaries[key] = hm2seconds(value)
return {'morning': BaseSlot(boundaries['inizio_m'],
boundaries['end_m'],),
'afternoon': BaseSlot(boundaries['inizio_p'],
boundaries['end_p'],),
}
@property
@memoize
def maximum_bookable_date(self):
""" Return the maximum bookable date
return a datetime or None
"""
future_days = self.context.getFutureDays()
if not future_days:
return
date_limit = tznow() + timedelta(future_days)
return date_limit
@property
@memoize
def minimum_bookable_date(self):
""" Return the minimum bookable date
return a datetime or None
"""
notbefore_days = self.context.getNotBeforeDays()
if not notbefore_days:
return
date_limit = tznow() + timedelta(notbefore_days)
return date_limit
def get_container(self, booking_date, create_missing=False):
""" Return the container for bookings in this date
:param booking_date: a date as a string, DateTime or datetime
:param create_missing: if set to True and the container is missing,
create it
"""
if isinstance(booking_date, basestring):
booking_date = DateTime(booking_date)
if not create_missing:
relative_path = booking_date.strftime('%Y/%W/%u')
return self.context.unrestrictedTraverse(relative_path, None)
year_id = booking_date.strftime('%Y')
year = get_or_create_obj(self.context, year_id, self.year_type)
week_id = booking_date.strftime('%W')
week = get_or_create_obj(year, week_id, self.week_type)
day_id = booking_date.strftime('%u')
day = get_or_create_obj(week, day_id, self.day_type)
return day
@memoize
def get_bookings_in_day_folder(self, booking_date):
"""
The Prenotazione objects for today, unfiltered but sorted by dates
:param booking_date: a date as a datetime or a string
"""
day_folder = self.get_container(booking_date)
if not day_folder:
return []
allowed_portal_type = self.booker.portal_type
bookings = [item[1] for item in day_folder.items()
if item[1].portal_type == allowed_portal_type]
bookings.sort(key=lambda x: (x.getData_prenotazione(),
x.getData_scadenza()))
return bookings
@memoize
def get_existing_slots_in_day_folder(self, booking_date):
"""
The Prenotazione objects for today
:param booking_date: a date as a datetime or a string
"""
bookings = self.get_bookings_in_day_folder(booking_date)
return map(ISlot, bookings)
def get_busy_slots_in_stormynight(self, booking_date):
""" This will show the slots that will not show elsewhere
"""
morning_slots = self.get_busy_slots_in_period(booking_date,
'morning')
afternoon_slots = self.get_busy_slots_in_period(booking_date,
'afternoon')
all_slots = self.get_existing_slots_in_day_folder(booking_date)
return sorted([slot for slot in all_slots
if not (slot in morning_slots
or slot in afternoon_slots)])
@memoize
def get_busy_slots_in_period(self, booking_date, period='day'):
"""
The busy slots objects for today: this filters the slots by review
state
:param booking_date: a datetime object
:param period: a string
:return: al list of slots
[slot1, slot2, slot3]
"""
if period == 'stormynight':
return self.get_busy_slots_in_stormynight(booking_date)
interval = self.get_day_intervals(booking_date)[period]
allowed_review_states = ['pending', 'published']
# all slots
slots = self.get_existing_slots_in_day_folder(booking_date)
# the ones in the interval
slots = [slot for slot in slots if slot in interval]
# the one with the allowed review_state
slots = [slot for slot in slots
if self.get_state(slot.context) in allowed_review_states]
return sorted(slots)
@memoize
def get_busy_slots(self, booking_date, period='day'):
""" This will return the busy slots divided by gate:
:param booking_date: a datetime object
:param period: a string
:return: a dictionary like:
{'gate1': [slot1],
'gate2': [slot2, slot3],
}
"""
slots_by_gate = {}
slots = self.get_busy_slots_in_period(booking_date, period)
for slot in slots:
slots_by_gate.setdefault(slot.gate, []).append(slot)
return slots_by_gate
@memoize
def get_free_slots(self, booking_date, period='day'):
""" This will return the free slots divided by gate
:param booking_date: a datetime object
:param period: a string
:return: a dictionary like:
{'gate1': [slot1],
'gate2': [slot2, slot3],
}
"""
day_intervals = self.get_day_intervals(booking_date)
if period == 'day':
intervals = [day_intervals['morning'], day_intervals['afternoon']]
else:
intervals = [day_intervals[period]]
slots_by_gate = self.get_busy_slots(booking_date, period)
gates = self.get_gates()
availability = {}
for gate in gates:
# unavailable gates doesn't have free slots
# XXX Riprendi da qui:
if self.get_unavailable_gates() and gate in self.get_unavailable_gates():
availability[gate] = []
else:
availability.setdefault(gate, [])
gate_slots = slots_by_gate.get(gate, [])
for interval in intervals:
if interval:
availability[gate].extend(interval - gate_slots)
return availability
def get_freebusy_slots(self, booking_date, period='day'):
""" This will return all the slots (free and busy) divided by gate
:param booking_date: a datetime object
:param period: a string
:return: a dictionary like:
{'gate1': [slot1],
'gate2': [slot2, slot3],
}
"""
free = self.get_free_slots(booking_date, period)
busy = self.get_busy_slots(booking_date, period)
keys = set(free.keys() + busy.keys())
return dict(
(key, sorted(free.get(key, []) + busy.get(key, [])))
for key in keys
)
def get_anonymous_slots(self, booking_date, period='day'):
""" This will return all the slots under the fake name
anonymous_gate
:param booking_date: a datetime object
:param period: a string
:return: a dictionary like:
{'anonymous_gate': [slot2, slot3],
}
"""
interval = self.get_day_intervals(booking_date)[period]
slots_by_gate = {'anonymous_gate': []}
if not interval or len(interval) == 0:
return slots_by_gate
start = interval.lower_value
stop = interval.upper_value
hours = set(3600 * i for i in range(24) if start <= i * 3600 <= stop)
hours = sorted(hours.union(set((start, stop))))
slots_number = len(hours) - 1
slots = [BaseSlot(hours[i], hours[i + 1]) for i in range(slots_number)]
slots_by_gate['anonymous_gate'] = slots
return slots_by_gate
@property
@memoize
def tipology_durations(self):
""" The durations of all known tipologies
@return a dict like this:
{'tipology1': 10,
'tipology2': 20,
...
}
"""
return dict(
(x['name'], int(x['duration']))
for x in self.context.getTipologia()
)
def get_tipology_duration(self, tipology):
""" Return the seconds for this tipology
"""
if isinstance(tipology, dict):
return int(tipology['duration']) * 60
if isinstance(tipology, basestring) and not isinstance(tipology, unicode):
tipology = tipology.decode('utf8')
return self.tipology_durations.get(tipology, 1)
@memoize
def tipologies_bookability(self, booking_date):
"""
:param booking_date: a datetime object
Return a dictionary like this:
{'bookable': ['tipology 00', 'tipology 01', ...],
'unbookable': ['tipology 10', 'tipology 10', ...],
}
Bookability is calculated from the booking_date and the available slots
"""
data = {'booking_date': booking_date}
bookability = {'bookable': [], 'unbookable': []}
for tipology in self.tipology_durations:
data['tipology'] = tipology
if self.conflict_manager.conflicts(data):
bookability['unbookable'].append(tipology)
else:
bookability['bookable'].append(tipology)
return bookability
@memoize
def is_booking_date_bookable(self, booking_date):
""" Check if we have enough time to book this date
:param booking_date: a date as a datetime
"""
bookability = self.tipologies_bookability(booking_date)
return bool(bookability['bookable'])
def get_first_slot(self, tipology, booking_date, period='day'):
"""
The Prenotazione objects for today
:param tipology: a dict with name and duration
:param booking_date: a date as a datetime or a string
:param period: a DateTime object
"""
if booking_date < self.first_bookable_date:
return
availability = self.get_free_slots(booking_date, period)
good_slots = []
duration = self.get_tipology_duration(tipology)
hm_now = datetime.now().strftime('%H:%m')
for slots in availability.itervalues():
for slot in slots:
if (len(slot) >= duration and
(booking_date > self.first_bookable_date
or slot.start() >= hm_now)):
good_slots.append(slot)
if not good_slots:
return
good_slots.sort(key=lambda x: x.lower_value)
return good_slots[0]
def get_less_used_gates(self, booking_date):
"""
Find which gate is les busy the day of the booking
"""
availability = self.get_free_slots(booking_date)
# Create a dictionary where keys is the time the gate is free, and
# value is a list of gates
free_time_map = {}
for gate, free_slots in availability.iteritems():
free_time = sum(map(BaseSlot.__len__, free_slots))
free_time_map.setdefault(free_time, []).append(gate)
# Get a random choice among the less busy one
max_free_time = max(free_time_map.keys())
return free_time_map[max_free_time]
def __call__(self):
""" Return itself
"""
return self | /rg.prenotazioni-4.0.tar.gz/rg.prenotazioni-4.0/src/rg/prenotazioni/browser/prenotazioni_context_state.py | 0.760117 | 0.358072 | prenotazioni_context_state.py | pypi |
from Products.CMFCore.utils import getToolByName
from datetime import date, timedelta
from plone import api
from plone.memoize.view import memoize
from rg.prenotazioni import _
from rg.prenotazioni.browser.base import BaseView
from rg.prenotazioni.browser.interfaces import IDontFollowMe
from rg.prenotazioni.utilities.urls import urlify
from zope.deprecation import deprecate
from zope.interface.declarations import implements
from zope.schema.vocabulary import getVocabularyRegistry
class View(BaseView):
''' Display appointments this week
'''
implements(IDontFollowMe)
@property
@memoize
def translation_service(self):
''' The translation_service tool
'''
return getToolByName(self.context, 'translation_service')
@property
@memoize
def localized_time(self):
''' Facade for context/@@plone/toLocalizedTime
'''
return api.content.get_view('plone',
self.context,
self.request).toLocalizedTime
def DT2time(self, value):
'''
Converts a DateTime in to a localized time
:param value: a DateTime object
'''
return self.localized_time(value, time_only=True)
def get_day_msgid(self, day):
''' Translate the week day
'''
return self.translation_service.day_msgid(day.isoweekday() % 7)
@property
@memoize
@deprecate('Use the prenotazioni_context_state property instead')
def user_can_manage(self):
''' States if the authenticated user can manage this context
'''
if api.user.is_anonymous():
return False
permissions = api.user.get_permissions(obj=self.context)
return permissions.get('Modify portal content', False)
@property
@memoize
@deprecate('Use the prenotazioni_context_state property instead')
def user_can_view(self):
''' States if the authenticated user can manage this context
'''
if api.user.is_anonymous():
return False
if self.prenotazioni.user_can_manage:
return True
return u'Reader' in api.user.get_roles(obj=self.context)
@property
@memoize
@deprecate('Use the prenotazioni_context_state property instead')
def user_can_search(self):
''' States if the user can see the search button
'''
return self.prenotazioni.user_can_manage
@property
@memoize
def day_period_macro(self):
''' Which macro should I use to display a day period
'''
prenotazione_macros = self.prenotazione_macros
if self.prenotazioni.user_can_manage:
return prenotazione_macros['manager_day_period']
if self.prenotazioni.user_can_view:
return prenotazione_macros['manager_day_period']
return prenotazione_macros['anonymous_day_period']
@property
@memoize
def slot_macro(self):
''' Which macro should I use to display the slot
'''
prenotazione_macros = self.prenotazione_macros
if self.prenotazioni.user_can_manage:
return prenotazione_macros['manager_slot']
if self.prenotazioni.user_can_view:
return prenotazione_macros['manager_slot']
return self.prenotazione_macros['anonymous_slot']
@property
@memoize
def periods(self):
''' Return the periods
'''
if self.prenotazioni.user_can_manage:
return ('morning', 'afternoon', 'stormynight')
else:
return ('morning', 'afternoon')
@property
@memoize
def actual_date(self):
""" restituisce il nome del mese e l'anno della data in request
"""
day = self.request.get('data', '')
try:
day_list = day.split('/')
data = date(int(day_list[2]), int(day_list[1]), int(day_list[0]))
except (ValueError, IndexError):
data = self.prenotazioni.today
return data
@property
@memoize
def actual_week_days(self):
""" The days in this week
"""
actual_date = self.actual_date
weekday = actual_date.weekday()
monday = actual_date - timedelta(weekday)
return [monday + timedelta(x) for x in range(0, 7)]
@property
@memoize
def actual_translated_month(self):
''' The translated Full name of this month
'''
return self.translation_service.month(self.actual_date.month)
@property
@memoize
def prev_week(self):
""" The actual date - 7 days
"""
return (self.actual_date - timedelta(days=7)).strftime('%d/%m/%Y')
@property
@memoize
def next_week(self):
""" The actual date + 7 days
"""
return (self.actual_date + timedelta(days=7)).strftime('%d/%m/%Y')
@property
@memoize
def prev_week_url(self):
""" The link to the previous week
"""
qs = {'data': self.prev_week}
qs.update(self.prenotazioni.remembered_params)
return urlify(self.request.getURL(), params=qs)
@property
@memoize
def next_week_url(self):
""" The link to the next week
"""
qs = {'data': self.next_week}
qs.update(self.prenotazioni.remembered_params)
return urlify(self.request.getURL(), params=qs)
@property
@memoize
def toggle_columns_url(self):
""" The link to enable/disable the columns
"""
params = self.prenotazioni.remembered_params.copy()
if ('disable_plone.leftcolumn' in params or 'disable_plone.rightcolumn' in params): # noqa
params.pop('disable_plone.leftcolumn', '')
params.pop('disable_plone.rightcolumn', '')
else:
params['disable_plone.leftcolumn'] = 1
params['disable_plone.rightcolumn'] = 1
data = self.request.form.get('data', '')
if data:
params['data'] = data
return urlify(self.request.getURL(), params=params)
@memoize
def get_search_gate_url(self, gate, day):
''' Search a a gate
'''
params = {
'start': day,
'end': day,
'actions.search': 1
}
vr = getVocabularyRegistry()
voc = vr.get(self.context, 'rg.prenotazioni.gates')
try:
params['gate'] = voc.getTerm(gate).token
except LookupError:
params['text'] = gate
return urlify(
self.context.absolute_url(),
'@@prenotazioni_search',
params=params
)
@memoize
def show_day_column(self, day):
''' Return True or False according to the fact that the column should
be shown
'''
if self.prenotazioni.user_can_manage:
return True
periods = self.prenotazioni.get_day_intervals(day)
return bool(periods['day'])
def get_foreseen_booking_time(self, day, slot):
""" Return the foreseen booking time message
"""
booking_url = self.prenotazioni.get_anonymous_booking_url(day, slot)
message = _(
'foreseen_booking_time',
default=u"Foreseen booking time: ${booking_time}",
mapping={'booking_time': booking_url['title']}
)
return message
def __call__(self):
''' Hide the portlets before serving the template
'''
# self.request.set('disable_plone.leftcolumn', 1)
# self.request.set('disable_plone.rightcolumn', 1)
return super(View, self).__call__() | /rg.prenotazioni-4.0.tar.gz/rg.prenotazioni-4.0/src/rg/prenotazioni/browser/week.py | 0.677261 | 0.183484 | week.py | pypi |
from z3c.form import interfaces
from z3c.form.widget import FieldWidget
from z3c.form.browser.radio import RadioWidget
from z3c.form.interfaces import IRadioWidget
from Products.Five.browser.metaconfigure import ViewMixinForTemplates
from zope.browserpage.viewpagetemplatefile import ViewPageTemplateFile as VPTF
from z3c.form.widget import FieldWidget
from Products.Five.browser import BrowserView
from plone.memoize.view import memoize
from zope.schema.interfaces import IVocabularyFactory
from zope.component import getUtility
from plone import api
from zope.i18n import translate
from z3c.form import interfaces, util
from Products.CMFPlone.utils import safe_unicode
from zope.pagetemplate.interfaces import IPageTemplate
from z3c.form.widget import SequenceWidget
import zope
class ICustomRadioFieldWidget(interfaces.IFieldWidget):
""" """
class ICustomRadioWidget(IRadioWidget):
""" """
class RenderWidget(ViewMixinForTemplates, BrowserView):
index = VPTF('templates/tipology_radio_widget.pt')
@property
@memoize
def prenotazione_add(self):
''' Returns the prenotazioni_context_state view.
Everyone should know about this!
'''
return api.content.get_view('prenotazione_add',
self.context.context.aq_inner,
self.request).\
form(
self.context.context.aq_inner,
self.request
)
@property
@memoize
def vocabulary(self):
voc_name = self.context.field.vocabularyName
if voc_name:
return getUtility(IVocabularyFactory, name=voc_name)(
self.context.context.aq_inner
)
@property
@memoize
def tipologies_bookability(self):
''' Get tipology bookability
'''
booking_date = self.prenotazione_add.booking_DateTime.asdatetime()
prenotazioni = self.prenotazione_add.prenotazioni
return prenotazioni.tipologies_bookability(booking_date)
@property
@memoize
def unbookable_items(self):
''' Get tipology bookability
'''
keys = sorted(self.tipologies_bookability['unbookable'])
keys = [key.decode('utf8') for key in keys]
return [self.vocabulary.getTerm(key) for key in keys if key in self.context.terms]
@zope.interface.implementer_only(ICustomRadioWidget)
class CustomRadioWidget(RadioWidget):
""" """
@property
@memoize
def prenotazione_add(self):
''' Returns the prenotazioni_context_state view.
Everyone should know about this!
'''
return api.content.get_view(
'prenotazione_add',
self.context,
self.request).form(
self.context,
self.request
)
@property
@memoize
def vocabulary(self):
voc_name = self.field.vocabularyName
if voc_name:
return getUtility(IVocabularyFactory, name=voc_name)(
self.context
)
@property
@memoize
def tipologies_bookability(self):
''' Get tipology bookability
'''
booking_date = self.prenotazione_add.booking_DateTime.asdatetime()
prenotazioni = self.prenotazione_add.prenotazioni
return prenotazioni.tipologies_bookability(booking_date)
@property
@memoize
def bookable_items(self):
''' Get tipology bookability
'''
keys = sorted(self.tipologies_bookability['bookable'])
keys = [safe_unicode(key) for key in keys]
return [self.vocabulary.getTerm(key) for key in keys if key in self.terms]
@property
@memoize
def unbookable_items(self):
''' Get tipology bookability
'''
keys = sorted(self.tipologies_bookability['unbookable'])
keys = [safe_unicode(key) for key in keys]
return [self.vocabulary.getTerm(key) for key in keys if key in self.context.terms]
@property
def items(self):
bookable = self.bookable_items
if not bookable:
return
results = []
for count, term in enumerate(self.bookable_items):
checked = self.isChecked(term)
id = '%s-%i' % (self.id, count)
if zope.schema.interfaces.ITitledTokenizedTerm.providedBy(term):
label = translate(term.title, context=self.request,
default=term.title)
else:
label = util.toUnicode(term.value)
results.append({'id': id, 'name': self.name, 'value': term.token,
'label': label, 'checked': checked, 'index': count})
return results
def renderForValue(self, value, index=None):
# customize 'cause we need to pass index
terms = list(self.terms)
try:
term = self.terms.getTermByToken(value)
except LookupError:
if value == SequenceWidget.noValueToken:
term = SimpleTerm(value)
terms.insert(0, term)
else:
raise
checked = self.isChecked(term)
# id = '%s-%i' % (self.id, terms.index(term))
id = '%s-%i' % (self.id, index)
item = {'id': id, 'name': self.name, 'value': term.token,
'checked': checked}
template = zope.component.getMultiAdapter(
(self.context, self.request, self.form, self.field, self),
IPageTemplate, name=self.mode + '_single')
return template(self, item)
@zope.component.adapter(zope.schema.interfaces.IField, interfaces.IFormLayer)
@zope.interface.implementer(ICustomRadioFieldWidget)
def CustomRadioFieldWidget(field, request):
"""IFieldWidget factory for RadioWidget."""
return FieldWidget(field, CustomRadioWidget(request)) | /rg.prenotazioni-4.0.tar.gz/rg.prenotazioni-4.0/src/rg/prenotazioni/browser/z3c_custom_widget.py | 0.703753 | 0.165155 | z3c_custom_widget.py | pypi |
from Products.Five.browser import BrowserView
from plone import api
from plone.memoize.view import memoize
from rg.prenotazioni.config import MIN_IN_DAY
from rg.prenotazioni.utilities.urls import urlify
class PrenotazioneView(BrowserView):
"""View for Prenotazione"""
@property
@memoize
def prenotazioni_folder(self):
''' The parent prenotazioni folder
'''
return self.context.getPrenotazioniFolder()
@property
@memoize
def prenotazioni(self):
''' The context state of the parent prenotazioni folder
'''
return api.content.get_view('prenotazioni_context_state',
self.prenotazioni_folder,
self.request)
@property
def booking_date(self):
''' The parent prenotazioni folder
'''
return self.context.getData_prenotazione()
@property
@memoize
def back_url(self):
''' Go back parent prenotazioni folder in the right day
'''
booking_date = self.booking_date
target = self.prenotazioni_folder.absolute_url()
if booking_date:
qs = {'data': booking_date.strftime('%d/%m/%Y')}
target = urlify(target, params=qs)
return target
@property
@memoize
def move_url(self):
''' move this booking visiting this url
'''
booking_date = self.booking_date
target = '/'.join((self.context.absolute_url(), 'prenotazione_move'))
if booking_date:
qs = {'data': booking_date.strftime('%d/%m/%Y')}
target = urlify(target, params=qs)
return target
@property
@memoize
def review_state(self):
''' The review_state of this object
'''
return self.prenotazioni.get_state(self.context)
class ResetDuration(PrenotazioneView):
''' Reset data scadenza prenotazione: sometime is needed :p
'''
def reset_duration(self):
''' Reset the duration for this booking object
Tries to get the duration information from the request,
fallbacks to the tipology, and finally to 1 minute
'''
tipology = self.context.getTipologia_prenotazione()
duration = self.request.form.get('duration', 0)
if not duration:
duration = self.prenotazioni.get_tipology_duration(tipology)
duration = (float(duration) / MIN_IN_DAY)
self.context.setData_scadenza(self.booking_date + duration)
def __call__(self):
''' Reset the dates
'''
self.reset_duration()
return self.request.response.redirect(self.back_url) | /rg.prenotazioni-4.0.tar.gz/rg.prenotazioni-4.0/src/rg/prenotazioni/browser/prenotazione.py | 0.661048 | 0.285164 | prenotazione.py | pypi |
from DateTime import DateTime
from pyinter.interval import Interval
from zope.component import Interface
from zope.interface.declarations import implements
def slots_to_points(slots):
''' Return a list of point starting from the slots
'''
points = []
[points.extend([x.lower_value, x.upper_value]) for x in slots]
return sorted(points)
class ISlot(Interface):
'''
Interface for a Slot object
'''
class LowerEndpoint(int):
''' Lower Endpoint
'''
class UpperEndpoint(int):
''' Upper Endpoint
'''
class BaseSlot(Interval):
''' Overrides and simplifies pyinter.Interval
'''
implements(ISlot)
_lower = Interval.CLOSED
_upper = Interval.CLOSED
context = None
gate = ''
extra_css_styles = []
@staticmethod
def time2seconds(value):
'''
Takes a value and converts it into seconds
:param value: a datetime or DateTime object
'''
if isinstance(value, int):
return value
if not value:
return None
if isinstance(value, DateTime):
value = value.asdatetime()
return (value.hour * 60 * 60 + value.minute * 60 + value.second)
def __init__(self, start, stop, gate=''):
'''
Initialize an BaseSlot
:param start:
:param stop:
:param gate:
'''
if start is not None:
self._lower_value = LowerEndpoint(self.time2seconds(start))
if stop is not None:
self._upper_value = UpperEndpoint(self.time2seconds(stop))
self.gate = gate
def __len__(self):
''' The length of this object
'''
if not self:
return 0
return self._upper_value - self.lower_value
def __nonzero__(self):
''' Check if this should be True
'''
if (isinstance(self._lower_value, int) and
isinstance(self._upper_value, int)):
return 1
else:
return 0
def __sub__(self, value):
''' Subtract something from this
'''
if isinstance(value, Interval):
value = [value]
# We filter not overlapping intervals
good_intervals = [x for x in value if x.overlaps(self)]
points = slots_to_points(good_intervals)
start = self.lower_value
intervals = []
for x in points:
if isinstance(x, LowerEndpoint) and x > start:
intervals.append(BaseSlot(start, x))
# we raise the bar waiting for another stop
start = self.upper_value
elif isinstance(x, UpperEndpoint):
start = x
intervals.append(BaseSlot(start, self.upper_value))
return intervals
def value_hr(self, value):
''' format value in a human readable fashion
'''
if not value:
return ''
hour = str(value // 3600).zfill(2)
minute = str((value % 3600) / 60).zfill(2)
return '%s:%s' % (hour, minute)
def start(self):
''' Return the starting time
'''
return self.value_hr(self._lower_value)
def stop(self):
''' Return the starting time
'''
return self.value_hr(self._upper_value)
def css_styles(self):
''' the css styles for this slot
The height of the interval in pixel is equal
to the interval length in minnutes
'''
styles = []
if self._upper_value and self._lower_value:
# we add 1px for each hour to account for the border
# between the slots
height = len(self) / 60 * 1.0 + len(self) / 3600
styles.append("height:%dpx" % height)
styles.extend(self.extra_css_styles)
return ';'.join(styles)
def get_values_hr_every(self, width, slot_min_size=0):
''' This partitions this slot if pieces of length width and
return the human readable value of the starts
If slot is [0, 1000]
calling this with width 300 will return
["00:00", "00:05", "00:10"]
If slot_min_size is passed it will not return values whose distance
from slot upper value is lower than this
'''
number_of_parts = len(self) / width
values = set([])
start = self.lower_value
end = self.upper_value
for i in range(number_of_parts):
value = start + width * i
if (end - value) >= slot_min_size:
values.add(value)
return map(self.value_hr, sorted(values))
class Slot(BaseSlot):
implements(ISlot)
def __eq__(self, other):
""" We need to compare also the context before comparing the boundaries
"""
return (
self.context == other.context and
super(Slot, self).__eq__(other)
)
def __init__(self, context):
'''
@param context: a Prenotazione object
'''
self.context = context
BaseSlot.__init__(self,
context.getData_prenotazione(),
context.getData_scadenza(),
self.context.getGate()) | /rg.prenotazioni-4.0.tar.gz/rg.prenotazioni-4.0/src/rg/prenotazioni/adapters/slot.py | 0.868088 | 0.350936 | slot.py | pypi |
from plone.app.textfield import RichText
from plone.autoform import directives
from plone.dexterity.content import Container
from plone.namedfile import field as namedfile
from plone.supermodel import model
from plone.supermodel.directives import fieldset
from plone.autoform import directives as form
from z3c.form.browser.radio import RadioFieldWidget
from zope import schema
from zope.interface import implementer, Interface
from collective import dexteritytextindexer
from rg.prenotazioni import _
from collective.z3cform.datagridfield import DataGridFieldFactory, DictRow
from collective.z3cform.datagridfield import BlockDataGridFieldFactory
class ISettimanaTipoRow(model.Schema):
giorno = schema.TextLine(
title=_(u"Giorno"),
required=False
)
inizio_m = schema.Choice(
title=_(u"Ora inizio mattina"),
vocabulary="rg.prenotazioni.VocOreInizio",
required=False,
)
end_m = schema.Choice(
title=_(u"Ora fine mattina"),
vocabulary="rg.prenotazioni.VocOreInizio",
required=False,
)
inizio_p = schema.Choice(
title=_(u"Ora inizio pomeriggio"),
vocabulary="rg.prenotazioni.VocOreInizio",
required=False,
)
end_p = schema.Choice(
title=_(u"Ora fine pomeriggio"),
vocabulary="rg.prenotazioni.VocOreInizio",
required=False,
)
class ITipologiaRow(Interface):
name = schema.TextLine(
title=_(u"Typology name"),
required=True,
)
duration = schema.Choice(
title=_(u"Duration value"),
required=True,
vocabulary="rg.prenotazioni.VocDurataIncontro"
)
class IPrenotazioniFolder(model.Schema):
""" Marker interface and Dexterity Python Schema for PrenotazioniFolder
"""
dexteritytextindexer.searchable('descriptionAgenda')
descriptionAgenda = RichText(
required=False,
title=_(u'Descrizione Agenda', default=u'Descrizione Agenda'),
description=(u"Inserire il testo di presentazione "
u"dell'agenda corrente"),
)
daData = schema.Date(
title=_(u'Data inizio validità'),
)
aData = schema.Date(
title=_(u'Data fine validità'),
description=_("aData_help",
default=u"Leave empty, and this Booking Folder will never expire"), # noqa
required=False
)
settimana_tipo = schema.List(
title=_(u"Settimana Tipo"),
description=_(u"Indicare la composizione della settimana tipo"),
required=True,
value_type=DictRow(
schema=ISettimanaTipoRow
),
default = [
{'giorno': u'Lunedì', 'inizio_m': None, 'inizio_p': None, 'end_m': None, 'end_p': None},
{'giorno': u'Martedì', 'inizio_m': None, 'inizio_p': None, 'end_m': None, 'end_p': None},
{'giorno': u'Mercoledì', 'inizio_m': None, 'inizio_p': None, 'end_m': None, 'end_p': None},
{'giorno': u'Giovedì', 'inizio_m': None, 'inizio_p': None, 'end_m': None, 'end_p': None},
{'giorno': u'Venerdì', 'inizio_m': None, 'inizio_p': None, 'end_m': None, 'end_p': None},
{'giorno': u'Sabato', 'inizio_m': None, 'inizio_p': None, 'end_m': None, 'end_p': None},
{'giorno': u'Domenica', 'inizio_m': None, 'inizio_p': None, 'end_m': None, 'end_p': None},
]
)
form.widget(settimana_tipo=DataGridFieldFactory)
festivi = schema.List(
title=_(u"Giorni festivi"),
description=_(
'help_holidays',
u"Indicare i giorni festivi (uno per riga) "
u"nel formato GG/MM/AAAA. Al posto dell'anno puoi mettere un "
u"asterisco per indicare un evento che ricorre annualmente."
),
required=False,
value_type=schema.TextLine(),
default=[]
)
futureDays = schema.Int(
default=0,
title=_(u'Max days in the future'),
description=_('futureDays',
default=u"Limit booking in the future to an amount "
u"of days in the future starting from "
u"the current day. \n"
u"Keep 0 to give no limits."),
)
notBeforeDays = schema.Int(
default=2,
title=_(u'Days booking is not allowed before'),
description=_('notBeforeDays',
default=u"Booking is not allowed before the amount "
u"of days specified. \n"
u"Keep 0 to give no limits."),
)
tipologia = schema.List(
title=_(u"Tipologie di richiesta"),
description=_('tipologia_help',
default=u"Put booking types there (one per line).\n"
u"If you do not provide this field, "
u"not type selection will be available"),
value_type=DictRow(
schema=ITipologiaRow
)
)
form.widget(tipologia=DataGridFieldFactory)
gates = schema.List(
title=_('gates_label', "Gates"),
description=_('gates_help',
default=u"Put gates here (one per line). "
u"If you do not fill this field, "
u"one gate is assumed"),
required=False,
value_type=schema.TextLine(),
default=[]
)
unavailable_gates = schema.List(
title=_('unavailable_gates_label', "Unavailable gates"),
description=_('unavailable_gates_help',
default=u'Add a gate here (one per line) if, '
u'for some reason, '
u'it is not be available.'
u'The specified gate will not be taken in to ' # noqa
u'account for slot allocation. '
u'Each line should match a corresponding '
u'line in the "Gates" field'
),
required=False,
value_type=schema.TextLine(),
default=[]
)
# XXX validate email
email_responsabile = schema.TextLine(
title=_(u'Email del responsabile'),
description=_(u"Inserisci l'indirizzo email del responsabile "
"delle prenotazioni"),
required=False,
)
@implementer(IPrenotazioniFolder)
class PrenotazioniFolder(Container):
"""
"""
def getDescriptionAgenda(self):
return self.descriptionAgenda
def getSettimana_tipo(self):
return self.settimana_tipo
def getGates(self):
return self.gates
def getUnavailable_gates(self):
return self.unavailable_gates
def getDaData(self):
return self.daData
def getAData(self):
return self.aData
def getTipologia(self):
return self.tipologia
def getFestivi(self):
return self.festivi
def getFutureDays(self):
return self.futureDays
def getNotBeforeDays(self):
return self.notBeforeDays | /rg.prenotazioni-4.0.tar.gz/rg.prenotazioni-4.0/src/rg/prenotazioni/content/prenotazioni_folder.py | 0.487307 | 0.289635 | prenotazioni_folder.py | pypi |
from rga.base.exceptions import InstCommunicationError, InstSetError, InstQueryError
from rga.base.commands import IntCommand, IntGetCommand, FloatCommand
class RgaIntCommand(IntCommand):
"""
Descriptor for an RGA100 remote command to
**set** and **query** an **integer** value.
Setting a value returns a status byte, which is stored as last_set_status
"""
def __set__(self, instance, value):
if instance is None:
return
set_string = self.remote_command
try:
if callable(self._set_convert_function):
converted_value = self._set_convert_function(value)
else:
converted_value = value
set_string = '{} {}'.format(self.remote_command, converted_value)
reply = int(instance.comm.query_text_with_long_timeout(set_string))
instance.last_set_status = reply
except InstCommunicationError:
raise InstSetError('Error during setting: CMD:{} '.format(set_string))
except ValueError:
raise InstSetError('Error during conversion: CMD: {}'
.format(set_string))
class RgaFloatCommand(RgaIntCommand):
"""
Descriptor for an RGA100 remote command to
**set** and **query** a **float** value.
Setting a value returns a status byte, which is stored as last_set_status
"""
def __init__(self, remote_command_name):
super().__init__(remote_command_name)
self._get_convert_function = float
self._set_convert_function = float
class RgaIonEnergyCommand(RgaIntCommand):
"""
Descriptor for a RGA100 remote command
to **set** and **query** ion energy. only 8 and 12 eV are allowed.
Setting a value returns a status byte, which is stored as last_set_status
"""
def __init__(self, remote_command_name):
super().__init__(remote_command_name)
self._get_convert_function = lambda a: 12 if int(a) != 0 else 8
self._set_convert_function = lambda a: 1 if a >= 12 else 0
class RgaTotalPressureCommand(IntGetCommand):
"""
Descriptor for a RGA100 remote command to **query** total pressure value
returned as a binary long integer. To set a value is not allowed.
"""
def __get__(self, instance, instance_type):
query_string = '{}?'.format(self.remote_command)
reply = None
try:
with instance.comm.get_lock():
instance.comm._send(query_string)
intensity = instance.comm._read_long()
self._value = intensity
except InstCommunicationError:
raise InstQueryError('Error during querying: CMD: {}'.format(query_string))
except ValueError:
raise InstQueryError('Error during conversion CMD: {} Reply: {}'
.format(query_string, reply))
return self._value
class RgaStoredCEMGainCommand(FloatCommand):
"""
Descriptor for a RGA100 remote command
to **set** and **query** Cem gain stored.
The raw data is stored as the gain divided by 1000.
And the descriptor converts back to the original value
"""
def __init__(self, remote_command_name):
super().__init__(remote_command_name)
self._get_convert_function = lambda a: float(a) * 1000.0
self._set_convert_function = lambda a: float(a) / 1000.0 | /rga100/commands.py | 0.793586 | 0.239672 | commands.py | pypi |
from rga.base import Component
from rga.base.commands import IntCommand, IntGetCommand,\
FloatCommand, BoolSetCommand, \
GetCommand
from .commands import RgaIntCommand, RgaFloatCommand, \
RgaIonEnergyCommand, RgaTotalPressureCommand, \
RgaStoredCEMGainCommand
from rga.base.exceptions import InstException
from .errors import query_errors, fetch_error_descriptions
class Ionizer(Component):
electron_energy = RgaIntCommand('EE')
ion_energy = RgaIonEnergyCommand('IE')
focus_voltage = RgaIntCommand('VF')
emission_current = RgaFloatCommand('FL')
"""
For typical operation, set emission current to 1 (mA).
When the emission current is set to 0, the filament will be turned off
"""
last_set_status = 0
"""
RgaCommand returns status byte with set operations
heck this value after ionizer set commands
"""
def get_parameters(self):
"""
Get electron energy, ion energy, focus voltage setting values
Returns
--------
tuple
(electron_energy, ion_energy, focus_voltage)
"""
return self.electron_energy, self.ion_energy, self.focus_voltage
def set_parameters(self, electron_energy, ion_energy, focus_voltage):
"""
Set electron energy, ion energy and focus voltage
Parameters
-----------
electron_energy : int
electron energy electron impact ionization
ion_energy : int
ion energy, 12 eV or 8 eV
focus_voltage : int
focus plate voltage
Returns
--------
int
error status after setting
"""
self.electron_energy = electron_energy
self.ion_energy = ion_energy
self.focus_voltage = focus_voltage
return self.last_set_status
class Filament(Component):
last_set_status = 0
"""
RgaCommand returns status byte with set operations
heck this value after ionizer set commands
"""
def start_degas(self, degas_minute=3):
"""
Start degas. Subsequent commands are blocked until the degas is over for RGA100.
"""
self.comm.query_text_with_long_timeout('DG{}'.format(degas_minute), degas_minute * 65)
def turn_on(self, target_emission_current=1.0):
"""
Turn on filament to the target emission current
Parameters
-----------
target_emission_current : int, optional
Default is 1.0 mA
Returns
--------
error_status : int
Error status byte
"""
self._parent.ionizer.emission_current = target_emission_current
return self.last_set_status
def turn_off(self):
"""
Turn off the filament
"""
self._parent.ionizer.emission_current = 0.0
return self.last_set_status
class CEM(Component):
stored_gain = RgaStoredCEMGainCommand('MG')
"""
Stored CEM gain. Underlying remote command 'MG' returns
the gain divided by 1000. This descriptor generates
the original value, 1000 times of the raw remote command value.
"""
stored_voltage = FloatCommand('MV')
voltage = RgaIntCommand('HV')
def turn_on(self):
"""
Set CEM HV to the stored CEM voltage
"""
self.voltage = self.stored_voltage
def turn_off(self):
"""
Set CEM HV to the stored CEM voltage
"""
self.voltage = 0
class Pressure(Component):
partial_pressure_sensitivity = FloatCommand('SP')
"""
Partial pressure sensitivity is used to convert a spectrum
in current unit to partial pressure unit.
The partial pressure sensitivity in the unit of mA/Torr
"""
total_pressure_sensitivity = FloatCommand('ST')
"""
Total pressure sensitivity is used to convert total pressure measured
in current unit to pressure unit.
The total pressure sensitivity in the unit of mA/Torr
"""
total_pressure_enable = BoolSetCommand('TP')
total_pressure = RgaTotalPressureCommand('TP')
"""
Total pressure measured in ion current in 0.1 fA
"""
def get_total_pressure_in_torr(self):
factor = 1e-13 / self.total_pressure_sensitivity
if self._parent.cem.voltage > 10:
factor /= self._parent.cem.stored_gain
return self.total_pressure * factor
def get_partial_pressure_sensitivity_in_torr(self):
factor = 1e-13 / self.partial_pressure_sensitivity
if self._parent.cem.voltage > 10:
factor /= self._parent.cem.stored_gain
return factor
class QMF(Component):
class RF(Component):
offset = FloatCommand('RI')
slope = FloatCommand('RS')
class DC(Component):
offset = FloatCommand('DI')
slope = FloatCommand('DS')
def __init__(self, parent):
super().__init__(parent)
self.rf = QMF.RF(self)
self.dc = QMF.DC(self)
class Status(Component):
id_string = GetCommand('ID')
error_status = IntGetCommand('ER')
error_ps = IntGetCommand('EP')
error_detector = IntGetCommand('ED')
error_qmf = IntGetCommand('EQ')
error_cem = IntGetCommand('EC')
error_filament = IntGetCommand('EF')
error_rs232 = IntGetCommand('EC')
def get_errors(self):
"""
Get RGA100 error bits in a string
Call get_status() with the returned error bis string to get human friendy message
Returns
--------
str
error bits coded in a string
"""
return query_errors(self)
def get_error_text(self, error_bits=''):
"""
Get human-firendly error message
Parameters
-----------
error_bits : str, optional
error bits in string obtained with get_errors()
"""
if error_bits:
return fetch_error_descriptions(error_bits)
return fetch_error_descriptions(self.get_errors()) | /rga100/components.py | 0.904797 | 0.349616 | components.py | pypi |
BIT7 = 1 << 7
BIT6 = 1 << 6
BIT5 = 1 << 5
BIT4 = 1 << 4
BIT3 = 1 << 3
BIT2 = 1 << 2
BIT1 = 1 << 1
BIT0 = 1 << 0
ERROR_DICT = {
'NE': 'No Error',
'PS': '* 24V Power Supply Error: ',
'PS7': 'Voltage > 26V',
'PS6': 'Voltage < 22V',
'DET': '* Electrometer Error: ',
'DET7': 'ADC16 test failure',
'DET6': 'DETECT fails to read +5nA input current',
'DET5': 'DETECT fails to read -5nA input current',
'DET4': 'COMPENSATE fails to read +5nA input current',
'DET3': 'COMPENSATE fails to read -5nA input current',
'DET1': 'OP-AMP Input Offset Voltage out of range',
'RF': '* Quadrupole Mass Filter RF P/S error: ',
'RF7': 'RF_CT exceeds (V_EXT- 2V) at M_MAX',
'RF6': 'Primary current exceeds 2.0A',
'RF4': 'Power supply in current limited mode',
'EM': 'Electron Multiplier error: ',
'EM7': 'No Electron Multiplier Option installed',
'FL': '* Filament Error: ',
'FL7': 'No filament detected',
'FL6': 'Unable to set the requested emission current',
'FL5': 'Vacuum Chamber pressure too high',
'FL0': 'Single filament operation',
'CM': '* Communications Error: ',
'CM6': 'Parameter conflict',
'CM5': 'Jumper protection violation',
'CM4': 'Transmit buffer overwrite',
'CM3': 'OVERWRITE in receiving',
'CM2': 'Command-too-long',
'CM1': 'Bad Parameter received',
'CM0': 'Bad command received',
}
def query_errors(status):
"""
Query all the status registers of RGA100
Returns
--------
str
string that contains colon separated register name and bits
"""
error_string = ''
status_byte = status.error_status
if status_byte == 0:
return 'NE'
if status_byte & BIT6:
result = status.error_ps
# error_string += 'PS:'
if result & BIT7:
error_string += 'PS7:'
if result & BIT6:
error_string += 'PS6:'
if status_byte & BIT5:
result = status.error_detector
# error_string += 'DET:'
if result & BIT7:
error_string += 'DET7:'
if result & BIT6:
error_string += 'DET6:'
if result & BIT5:
error_string += 'DET5:'
if result & BIT4:
error_string += 'DET4:'
if result & BIT3:
error_string += 'DET3:'
if result & BIT1:
error_string += 'DET1:'
if status_byte & BIT4:
result = status.error_qmf
# error_string += 'RF:'
if result & BIT7:
error_string += 'RF7:'
if result & BIT6:
error_string += 'RF6:'
if result & BIT4:
error_string += 'RF4:'
if status_byte & BIT3:
result = status.error_cem
# error_string += 'EM:'
if result & BIT7:
error_string += 'EM7:'
if status_byte & BIT1:
result = status.error_filament
# error_string += 'FL:'
if result & BIT7:
error_string += 'FL7:'
if result & BIT6:
error_string += 'FL6:'
if result & BIT5:
error_string += 'FL5:'
if result & BIT0:
error_string += 'FL0:'
if status_byte & BIT0:
result = status.error_rs232
# error_string += 'CM:'
if result & BIT6:
error_string += 'CM6:'
if result & BIT5:
error_string += 'CM5:'
if result & BIT4:
error_string += 'CM4:'
if result & BIT3:
error_string += 'CM3:'
if result & BIT2:
error_string += 'CM2:'
if result & BIT1:
error_string += 'CM1:'
if result & BIT0:
error_string += 'CM0:'
return error_string[:-1] # remove the last colon
def fetch_error_descriptions(error_string):
"""
Fetch long description of each error bits from the error bit sring from
query_errors()
Returns
--------
str
comma separated long description of error bits
"""
err_list = error_string.split(':')
err_description = ''
for key in err_list:
err_description += ERROR_DICT[key] + ', '
return err_description[:-2] # remove the last comma | /rga100/errors.py | 0.5144 | 0.273314 | errors.py | pypi |
import struct
import socket
class Packet(object):
# Status bits
DEVICE_STATUS_CONNECTED = 1 << 0 # TCP/IP port is occupied
DEVICE_STATUS_CONFIG_ENABLED = 1 << 1 # IP configuration mode is enabled
DEVICE_STATUS_DHCP_RUNNING = 1 << 2 # DHCP client is running
DEVICE_STATUS_DHCP_SUCESS = 1 << 3 # DHCP was succeeded
DEVICE_STATUS_DHCP_FAILED = 1 << 4 # DHCP failed
DEVICE_STATUS_IP_CONFLICT = 1 << 5
DEVICE_STATUS_INVALID_LENGTH = 1 << 6
# SICP sequence number
SICP_SEQ_CALL = 0x01
SICP_SEQ_REPLY = 0x02
SICP_SEQ_SET = 0x03
SICP_SEQ_DHCP = 0x04
PacketSize = 100
def __init__(self, data):
self.input_data = data
self.data = bytearray(self.input_data)
if len(self.data) != Packet.PacketSize:
raise ValueError(f'Packet size is not {Packet.PacketSize}, but {len(self.data)}')
self.decode()
def set_name(self, name: str):
length = 15
if len(name) < length:
length = len(name)
index = 22
for d in name:
self.data[index] = ord(d)
index += 1
self.data[index] = 0
def set_ip_address(self, ip:str):
s = ip.split('.')
if len(s) == 4:
index = 38
for n in s:
self.data[index] = int(n)
index += 1
else:
raise ValueError(f'Invalid IP address string: {ip}')
def set_password_reset(self, value=True):
if value:
self.data[60:64] = b'\x01\x01\x01\xff'
else:
self.data[60] = 0xff
def decode(self):
if self.data[0:4] == b'SRS\0':
self.class_id, self.device_id, self.sequence_number \
= struct.unpack('>3h', self.data[4:10])
self.serial_number, = struct.unpack('>1l', self.data[10:14])
self.mac_address = self.data[14:20]
self.device_status, = struct.unpack('>1h', self.data[20:22])
self.device_name = self.data[22:38].split(b"\0")[0].decode() # Get null-terminated string
self.ip_address = self.data[38:42]
self.subnet_mask = self.data[42:46]
self.gateway = self.data[46:50]
self.dns_server = self.data[50:54]
self.version = self.data[54:60].split(b"\0")[0].decode()
else:
raise AssertionError('Invalid header from the packet')
def get_ip_address_string(self):
return self.convert_to_ip_format(self.ip_address)
def get_mac_address_string(self):
return self.convert_to_mac_format(self.mac_address)
def is_connected(self):
return self.device_status & Packet.DEVICE_STATUS_CONNECTED != 0
def is_configurable(self):
return self.device_status & Packet.DEVICE_STATUS_CONFIG_ENABLED != 0
def is_dhcp_running(self):
return self.device_status & Packet.DEVICE_STATUS_DHCP_RUNNING != 0
def is_dhcp_successful(self):
return self.device_status & Packet.DEVICE_STATUS_DHCP_SUCESS != 0
def is_dhcp_failed(self):
return self.device_status & Packet.DEVICE_STATUS_DHCP_FAILED != 0
def is_ip_conflicted(self):
return self.device_status & Packet.DEVICE_STATUS_IP_CONFLICT != 0
def is_data_length_invalid(self):
return self.device_status & Packet.DEVICE_STATUS_INVALID_LENGTH != 0
@staticmethod
def convert_to_ip_format(s):
if len(s) == 4:
return "%d.%d.%d.%d"%(s[0], s[1], s[2], s[3])
else:
return ""
@staticmethod
def convert_to_mac_format(s):
if len(s) == 6:
return "%02x-%02x-%02x-%02x-%02x-%02x" % (s[0], s[1], s[2], s[3], s[4], s[5])
else:
return ""
def print_raw(self):
for i, d in enumerate(self.data):
if 47 < d < 58 or 63 < d < 91 or 96 < d < 123:
print(f"'{chr(d)}'", end='')
else:
print(f'{d:02x} ', end='')
if (i+1) % 16 == 0:
print('')
print('')
def print_info(self):
self.decode()
print("Class ID : {}".format(self.class_id))
print("Device ID : {}".format(self.device_id))
print("SICP Seq. No.: {}".format(self.sequence_number))
print("Serial No. : {}".format(self.serial_number))
print("MAC address : {}".format(self.convert_to_mac_format(self.mac_address)))
print("Device name : {}".format(self.device_name))
print("IP address : {}".format(self.convert_to_ip_format(self.ip_address)))
print("Subnet mask : {}".format(self.convert_to_ip_format(self.subnet_mask)))
print("Gateway : {}".format(self.convert_to_ip_format(self.gateway)))
print("DNS server : {}".format(self.convert_to_ip_format(self.dns_server)))
print("Version : {}".format(self.version))
print("Connected to a client : {}".format(self.is_connected()))
print("Config. enabled : {}".format(self.is_configurable()))
print("DHCP running : {}".format(self.is_dhcp_running()))
print("DHCP success : {}".format(self.is_dhcp_successful()))
print("DHCP failed : {}".format(self.is_dhcp_failed()))
print("IP address conflict : {}".format(self.is_ip_conflicted()))
print("Invalid packet length : {}".format(self.is_data_length_invalid()))
print("Device status : {}".format(self.device_status))
print("===============================")
def get_short_status_from_packet(self):
if self.is_connected():
return 'Connected'
elif self.is_configurable():
return 'Configurable'
elif self.is_ip_conflicted():
return 'IP_conflict'
elif self.is_dhcp_running():
return 'DHCP running'
elif self.is_dhcp_failed():
return 'DHCP failed'
elif self.is_data_length_invalid():
return 'Error during SICP'
else:
return 'Available'
def set_sequence_number(self, number: int):
if number == Packet.SICP_SEQ_SET or number == Packet.SICP_SEQ_DHCP:
self.data[8:10] = 0x0, number
else:
raise ValueError(f'Invalid sequence number: {number}')
class SICP(object):
BROADCAST_ADDRESS = b"255.255.255.255"
PORT = 818
CALL_MSG_RGA_REA = b"SRS\x00\x00\x01\x00\x04\x00\x01"
CALL_MSG_RGA_ALL = b"SRS\x00\x00\x01\x00\xFF\x00\x01"
def __init__(self):
self.packet_list = []
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
def _call(self, msg = CALL_MSG_RGA_ALL):
self.socket.sendto(msg, (SICP.BROADCAST_ADDRESS, SICP.PORT))
def _get_replies(self, timeout=3):
old_timeout = self.socket.gettimeout()
self.socket.settimeout(timeout)
self.packet_list=[]
while True:
try:
data, address = self.socket.recvfrom(128)
packet = Packet(data)
self.packet_list.append(packet)
except:
break
self.socket.settimeout(old_timeout)
def find(self, timeout=3):
self._call()
self._get_replies(timeout)
def send_packet(self, packet, timeout=3):
old_timeout = self.socket.gettimeout()
self.socket.settimeout(timeout)
self.socket.sendto(packet.data, (SICP.BROADCAST_ADDRESS, SICP.PORT))
data, address = self.socket.recvfrom(128)
self.socket.settimeout(old_timeout)
return Packet(data)
if __name__ == "__main__":
s = SICP()
s.find()
for packet in s.packet_list:
packet.print_info()
#packet.print_raw() | /rga100/sicp.py | 0.419053 | 0.163746 | sicp.py | pypi |
**This repository is a fork of Felix Krull's `rgain` repository on Bitbucket
which aims to port the codebase to a modern Python 3 version.**
# rgain3 -- ReplayGain tools and Python library
This Python package provides modules to read, write and calculate Replay Gain
as well as 2 scripts that utilize these modules to do Replay Gain.
[Replay Gain][1] is a proposed standard (and has been for some time -- but it's
widely accepted) that's designed to solve the problem of varying volumes between
different audio files. I won't lay it all out for you here, go read it yourself.
## Requirements
- Python >= 3.5 -- http://python.org/
- GStreamer -- http://gstreamer.org/
- Cairo 2D -- https://www.cairographics.org/
To install these dependencies on Debian or Ubuntu (16.10 or newer):
```console
$ apt install \
gir1.2-gstreamer-1.0 \
gstreamer1.0-plugins-base \
gstreamer1.0-plugins-good \
gstreamer1.0-plugins-bad \
gstreamer1.0-plugins-ugly \
libcairo2-dev \
libgirepository1.0-dev \
python3
```
You will also need GStreamer decoding plugins for any audio formats you want to
use.
## Installation
Just install it like any other Python package using `pip`:
```console
$ python3 -m pip install --user rgain3
```
## `replaygain`
This is a program like, say, **vorbisgain** or **mp3gain**, the difference
being that instead of supporting a mere one format, it supports several:
- Ogg Vorbis (or probably anything you can put into an Ogg container)
- Flac
- WavPack
- MP4 (commonly using the AAC codec)
- MP3
Basic usage is simple:
```console
$ replaygain AUDIOFILE1 AUDIOFILE2 ...
```
There are some options; see them by running:
```console
$ replaygain --help
```
## `collectiongain`
This program is designed to apply Replay Gain to whole music collections, plus
the ability to simply add new files, run **collectiongain** and have it
replay-gain those files without asking twice.
To use it, simply run:
```console
$ collectiongain PATH_TO_MUSIC
```
and re-run it whenever you add new files. Run:
```console
$ collectiongain --help
```
to see possible options.
If, however, you want to find out how exactly **collectiongain** works, read on
(but be warned: It's long, boring, technical, incomprehensible and awesome).
**collectiongain** runs in two phases: The file collecting phase and the actual
run. Prior to analyzing any audio data, **collectiongain** gathers all audio files in
the directory and determines a so-called album ID for each from the file's tags:
- If the file contains a Musicbrainz album ID, that is used.
- Otherwise, if the file contains an *album* tag, it is joined with either
* a MusicBrainz album artist ID, if that exists
* an *albumartist* tag, if that exists,
* or the *artist* tag
* or nothing if none of the above tags exist.
The resulting artist-album combination is the album ID for that file.
- If the file doesn't contain a Musicbrainz album ID or an *album* tag, it is
presumed to be a single track without album; it will only get track gain, no
album gain.
Since this step takes a relatively long time, the album IDs are cached between
several runs of **collectiongain**. If a file was modified or a new file was
added, the album ID will be (re-)calculated for that file only.
The program will also cache an educated guess as to whether a file was already
processed and had Replay Gain added -- if **collectiongain** thinks so, that
file will totally ignored for the actual run. This flag is set whenever the file
is processed in the actual run phase (save for dry runs, which you can enable
with the **--dry-run** switch) and is cleared whenever a file was changed. You
can pass the **ignore-cache** switch to make **collectiongain** totally ignore
the cache; in that case, it will behave as if no cache was present and read your
collection from scratch.
For the actual run, **collectiongain** will simply look at all files that have
survived the cleansing described above; for files that don't contain Replay Gain
information, **collectiongain** will calculate it and write it to the files (use
the **--force** flag to calculate gain even if the file already has gain data).
Here comes the big moment of the album ID: files that have the same album ID are
considered to be one album (duh) for the calculation of album gain. If only one
file of an album is missing gain information, the whole album will be
recalculated to make sure the data is up-to-date.
## MP3 formats
Proper Replay Gain support for MP3 files is a bit of a mess: on the one hand,
there is the **mp3gain** [application][2] which was relatively widely used (I
don't know if it still is) -- it directly modifies the audio data which has the
advantage that it works with pretty much any player, but it also means you have
to decide ahead of time whether you want track gain or album gain. Besides, it's
just not very elegant. On the other hand, there are at least two commonly used
ways [to store proper Replay Gain information in ID3v2 tags][3].
Now, in general you don't have to worry about this when using this package: by
default, **replaygain** and **collectiongain** will read and write Replay Gain
information in the two most commonly used formats. However, if for whatever
reason you need more control over the MP3 Replay Gain information, you can use
the **--mp3-format** option (supported by both programs) to change the
behaviour. Possible choices with this switch are:
*replaygain.org* (alias: *fb2k*)
Replay Gain information is stored in ID3v2 TXXX frames. This format is
specified on the replaygain.org website as the recommended format for MP3
files. Notably, this format is also used by the [foobar2000 music player for
Windows][4].
*legacy* (alias: *ql*)
Replay Gain information is stored in ID3v2.4 RVA2 frames. This format is
described as "legacy" by replaygain.org; however, it is still the primary
format for at least the [Quod Libet music player][5] and possibly others. It
should be noted that this format does not support volume adjustments of more
than 64 dB: if the calculated gain value is smaller than -64 dB or greater
than or equal to +64 dB, it is clamped to these limit values.
*default*
This is the default implementation used by both **replaygain** and
**collectiongain**. When writing Replay Gain data, both the *replaygain.org*
as well as the *legacy* format are written. As for reading, if a file
contains data in both formats, both data sets are read and then compared. If
they match up, that Replay Gain information is returned for the file.
However, if they don't match, no Replay Gain data is returned to signal that
this file does not contain valid (read: consistent) Replay Gain information.
# Development
Fork and clone this repository. Inside the checkout create a `virtualenv` and install `rgain3` in develop mode:
Note that developing from source requires the Python headers and therefore the
`python3.x-dev` system package to be installed.
```console
$ python3 -m venv env
$ source env/bin/activate
(env) $ python -m pip install -Ue .
```
### Running Tests
To run the tests with the Python version of your current virtualenv, simply
invoke `pytest` installing `test` extras:
```console
(env) $ python -m pip install -Ue ".[test]"
(env) $ pytest
```
You can run tests for all supported Python version using `tox` like so:
```console
(env) $ tox
```
# Copyright
With the exception of the manpages, all files are::
- Copyright (c) 2009-2015 Felix Krull <f_krull@gmx.de>
- Copyright (c) 2019-2020 Christian Haudum <developer@christianhaudum.at>
The manpages were originally written for the Debian project and are::
- Copyright (c) 2011 Simon Chopin <chopin.simon@gmail.com>
- Copyright (c) 2012-2015 Felix Krull <f_krull@gmx.de>
[1]: https://wiki.hydrogenaud.io/index.php?title=ReplayGain
[2]: http://mp3gain.sourceforce.net
[3]: http://wiki.hydrogenaudio.org/index.php?title=ReplayGain_specification#ID3v2
[4]: http://foobar2000.org
[5]: http://code.google.com/p/quodlibet
| /rgain3-1.1.1.tar.gz/rgain3-1.1.1/README.md | 0.67971 | 0.862699 | README.md | pypi |
__ALL__ = [ "RGBColor", "colorize" ]
class RGBColor():
color_names = {
"red": [255, 0, 0],
"green": [0, 255, 0],
"blue": [0, 100, 255],
"white": [255, 255, 255],
"black": [0, 0, 0],
"orange": [255, 153, 0],
"yellow": [255, 255, 0],
"cyan": [0, 255, 255],
"lightblue": [0, 200, 255],
"violet": [120, 70, 200],
}
def __init__(self, red: int = -1, green: int = -1, blue: int = -1, color_name: str = "") -> None:
if color_name:
assert isinstance(color_name, str), f"Type of color_name must be str but now is {type(color_name).__name__}"
if not self.color_names.get(color_name):
raise ValueError((
f"Invalid color name '{color_name}'."
f" Use {__name__}.{__class__.__name__}.get_color_names() and choose one of them or"
f" set new by metod {__name__}.{__class__.__name__}.set_color_name()"
))
self.init(*self.color_names[color_name])
else:
self.init(red, green, blue)
def init(self, red: int, green: int, blue: int) -> None:
assert isinstance(red, int), f"Type of red must be int but now is {type(red).__name__}"
assert isinstance(green, int), f"Type of green must be int but now is {type(green).__name__}"
assert isinstance(blue, int), f"Type of blue must be str int now is {type(blue).__name__}"
for x in [red, green, blue]:
if x < 0 or x > 255:
raise ValueError("Use integer from 0 to 255")
self.red = red
self.green = green
self.blue = blue
@staticmethod
def get_color_names() -> tuple:
return tuple(__class__.color_names.keys())
@staticmethod
def set_color_name(red: int, green: int, blue: int, color_name: str) -> None:
if not color_name:
raise ValueError("Param color_name must not be empty")
assert isinstance(red, int), f"Type of red must be int but now is {type(red).__name__}"
assert isinstance(green, int), f"Type of green must be int but now is {type(green).__name__}"
assert isinstance(blue, int), f"Type of blue must be str int now is {type(blue).__name__}"
assert isinstance(color_name, str), f"Type of color_name must be str but now is {type(color_name).__name__}"
for x in [red, green, blue]:
if x < 0 or x > 255:
raise ValueError("Use integer from 0 to 255")
__class__.color_names[color_name] = [red, green, blue]
def colorize(text: str, color: RGBColor, on_color: RGBColor = None) -> str:
assert isinstance(text, str), f"Type of text must be str but now is {type(text).__name__}"
assert isinstance(color, RGBColor), f"Type of color must be RGBColor but now is {type(color).__name__}"
bg = ""
if on_color:
assert isinstance(color, RGBColor), f"Type of color must be RGBColor but now is {type(color).__name__}"
bg = f"48;2;{on_color.red};{on_color.green};{on_color.blue};"
return (f"\033[{bg}38;2;{color.red};{color.green};{color.blue}m{text}\033[0m")
if __name__ == "__main__":
print(colorize("Here a simple usage of red color by name", RGBColor(color_name="red")))
print(colorize("Here a simple usage of red color by rgb params", RGBColor(255, 0, 0)))
RGBColor.set_color_name(120, 70, 200, "my_color")
print(colorize("Here a simple usage of custom color by name", RGBColor(color_name="my_color")))
print(colorize("But note that two names can have same rgb params (here use violet color name)\n", RGBColor(color_name="violet")))
print(colorize("Also you can get color names that available", RGBColor(color_name="green")))
for __color in RGBColor.get_color_names():
print(colorize(f"This text have {__color} color", RGBColor(color_name=__color)))
print(colorize("\nAnd you can set background color with on_color param (by name or rgb values), but it's not necessary\n", RGBColor(color_name="blue")))
print(colorize("For example this text has red color on yellow background", RGBColor(color_name="red"), RGBColor(color_name="yellow")))
print(colorize("\nSometimes for some reason on different platform colors may look different", RGBColor(color_name="blue"))) | /rgb_colorizer-0.0.6-py3-none-any.whl/rgb_colorizer.py | 0.91058 | 0.463748 | rgb_colorizer.py | pypi |
from typing import List, Dict
PACKAGES_DATA: Dict[str, List[str]] = {
"init": [
"071f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"071f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"07050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"071f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"071f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"071f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"07290000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"071f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"071f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"071f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"07050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"07070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"071f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"071f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"072a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"071f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"071f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"072a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"071f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"071f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"07290000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"071e0100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"07090000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"07050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"072f002e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"070c0000000000000680000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"070c0000000000000680000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"07030605000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"07060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"07030601000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"07030601000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"07030605000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"07030601000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"07030601000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
],
"final": [
"07050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
],
}
RGB_PACKAGES_PATTERNS = {
"R": ["0703060b0000{}", "0703060c0000{}"],
"G": ["070306090000{}", "0703060a0000{}"],
"B": ["070306070000{}", "070306080000{}"],
}
class HexColor:
hex_color: str
def __init__(self, hex_color: str = "000000"):
self.hex_color = hex_color
@property
def R(self) -> str:
return self.hex_color[:2]
@property
def G(self) -> str:
return self.hex_color[2:4]
@property
def B(self) -> str:
return self.hex_color[4:6]
def __getitem__(self, key: str) -> str:
value = getattr(self, key, None)
if value is None:
raise KeyError
return value
class RGBProfile:
hex_values: HexColor
packages: List[str]
order_rgb_keys = ("B", "G", "R")
def __init__(self, hex_color: HexColor):
self.hex_color = hex_color
self.packages_data = []
self.__prepare_packages()
def __return_rgb_packages(self, key: str) -> List[str]:
rgb_packages = []
for pattern in RGB_PACKAGES_PATTERNS[key]:
rgb_packages.append(pattern.format(self.hex_color[key] * 58))
return rgb_packages
def __prepare_packages(self) -> None:
self.packages_data = PACKAGES_DATA["init"]
for rgb_key in self.order_rgb_keys:
self.packages_data += self.__return_rgb_packages(rgb_key)
self.packages_data += PACKAGES_DATA["final"] | /rgb-control-0.0.1.tar.gz/rgb-control-0.0.1/rgb_control/rgb_utils.py | 0.844697 | 0.435181 | rgb_utils.py | pypi |
try:
import struct
except ImportError:
import ustruct as struct
try:
from micropython import const
except ImportError:
def const(n): return n
from rgb_display.rgb import DisplayDevice
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/jrmoser/RGB_Display.git"
_NOP = const(0x00)
_SWRESET = const(0x01)
_RDDID = const(0x04)
_RDDST = const(0x09)
_SLPIN = const(0x10)
_SLPOUT = const(0x11)
_PTLON = const(0x12)
_NORON = const(0x13)
_INVOFF = const(0x20)
_INVON = const(0x21)
_DISPOFF = const(0x28)
_DISPON = const(0x29)
_CASET = const(0x2A)
_RASET = const(0x2B)
_RAMWR = const(0x2C)
_RAMRD = const(0x2E)
_PTLAR = const(0x30)
_COLMOD = const(0x3A)
_MADCTL = const(0x36)
_FRMCTR1 = const(0xB1)
_FRMCTR2 = const(0xB2)
_FRMCTR3 = const(0xB3)
_INVCTR = const(0xB4)
_DISSET5 = const(0xB6)
_PWCTR1 = const(0xC0)
_PWCTR2 = const(0xC1)
_PWCTR3 = const(0xC2)
_PWCTR4 = const(0xC3)
_PWCTR5 = const(0xC4)
_VMCTR1 = const(0xC5)
_RDID1 = const(0xDA)
_RDID2 = const(0xDB)
_RDID3 = const(0xDC)
_RDID4 = const(0xDD)
_PWCTR6 = const(0xFC)
_GMCTRP1 = const(0xE0)
_GMCTRN1 = const(0xE1)
class ST7735(DisplayDevice):
"""
A simple driver for the ST7735-based displays.
"""
_COLUMN_SET = _CASET
_PAGE_SET = _RASET
_RAM_WRITE = _RAMWR
_RAM_READ = _RAMRD
_INIT = (
(_SWRESET, None),
(_SLPOUT, None),
(_COLMOD, b"\x05"), # 16bit color
# fastest refresh, 6 lines front porch, 3 line back porch
(_FRMCTR1, b"\x00\x06\x03"),
(_MADCTL, b"\x08"), # bottom to top refresh
# 1 clk cycle nonoverlap, 2 cycle gate rise, 3 sycle osc equalie,
# fix on VTL
(_DISSET5, b"\x15\x02"),
(_INVCTR, b"0x00"), # line inversion
(_PWCTR1, b"\x02\x70"), # GVDD = 4.7V, 1.0uA
(_PWCTR2, b"\x05"), # VGH=14.7V, VGL=-7.35V
(_PWCTR3, b"\x01\x02"), # Opamp current small, Boost frequency
(_VMCTR1, b"\x3c\x38"), # VCOMH = 4V, VOML = -1.1V
(_PWCTR6, b"\x11\x15"),
(
_GMCTRP1,
b"\x09\x16\x09\x20\x21\x1b\x13\x19" b"\x17\x15\x1e\x2b\x04\x05\x02\x0e",
), # Gamma
(
_GMCTRN1,
b"\x08\x14\x08\x1e\x22\x1d\x18\x1e" b"\x18\x1a\x24\x2b\x06\x06\x02\x0f",
),
(_CASET, b"\x00\x02\x00\x81"), # XSTART = 2, XEND = 129
(_RASET, b"\x00\x02\x00\x81"), # XSTART = 2, XEND = 129
(_NORON, None),
(_DISPON, None),
)
_ENCODE_PIXEL = ">H"
_ENCODE_POS = ">HH"
# pylint: disable-msg=useless-super-delegation, too-many-arguments
def __init__(
self,
port,
dc,
rst=None,
width=128,
height=128,
*,
x_offset=0,
y_offset=0,
rotation=0
):
super().__init__(
port,
dc,
rst,
width,
height,
x_offset=x_offset,
y_offset=y_offset,
rotation=rotation,
)
class ST7735R(ST7735):
"""A simple driver for the ST7735R-based displays."""
_INIT = (
(_SWRESET, None),
(_SLPOUT, None),
(_MADCTL, b"\xc8"),
(_COLMOD, b"\x05"), # 16bit color
(_INVCTR, b"\x07"),
(_FRMCTR1, b"\x01\x2c\x2d"),
(_FRMCTR2, b"\x01\x2c\x2d"),
(_FRMCTR3, b"\x01\x2c\x2d\x01\x2c\x2d"),
(_PWCTR1, b"\x02\x02\x84"),
(_PWCTR2, b"\xc5"),
(_PWCTR3, b"\x0a\x00"),
(_PWCTR4, b"\x8a\x2a"),
(_PWCTR5, b"\x8a\xee"),
(_VMCTR1, b"\x0e"),
(_INVOFF, None),
(
_GMCTRP1,
b"\x02\x1c\x07\x12\x37\x32\x29\x2d" b"\x29\x25\x2B\x39\x00\x01\x03\x10",
), # Gamma
(
_GMCTRN1,
b"\x03\x1d\x07\x06\x2E\x2C\x29\x2D" b"\x2E\x2E\x37\x3F\x00\x00\x02\x10",
),
)
# pylint: disable-msg=useless-super-delegation, too-many-arguments
def __init__(
self,
port,
dc,
rst=None,
width=128,
height=160,
*,
x_offset=0,
y_offset=0,
rotation=0,
bgr=False
):
self._bgr = bgr
super().__init__(
port,
dc,
rst,
width,
height,
x_offset=x_offset,
y_offset=y_offset,
rotation=rotation,
)
def init(self):
super().init()
cols = struct.pack(">HH", 0, self.width - 1)
rows = struct.pack(">HH", 0, self.height - 1)
for command, data in (
(_CASET, cols),
(_RASET, rows),
(_NORON, None),
(_DISPON, None),
):
self.write(command, data)
if self._bgr:
self.write(_MADCTL, b"\xc0")
class ST7735S(ST7735):
"""A simple driver for the ST7735S-based displays."""
_INIT = (
# Frame Rate
(_FRMCTR1, b"\x01\x2c\x2d"),
(_FRMCTR2, b"\x01\x2c\x2d"),
(_FRMCTR3, b"\x01\x2c\x2d\x01\x2c\x2d"),
# Column inversion
(_INVCTR, b"\x07"),
# Power Sequence
(_PWCTR1, b"\xa2\x02\x84"),
(_PWCTR2, b"\xc5"),
(_PWCTR3, b"\x0a\x00"),
(_PWCTR4, b"\x8a\x2a"),
(_PWCTR5, b"\x8a\xee"),
# VCOM
(_VMCTR1, b"\x0e"),
# Gamma
(
_GMCTRP1,
b"\x0f\x1a\x0f\x18\x2f\x28\x20\x22" b"\x1f\x1b\x23\x37\x00\x07\x02\x10",
),
(
_GMCTRN1,
b"\x0f\x1b\x0f\x17\x33\x2c\x29\x2e" b"\x30\x30\x39\x3f\x00\x07\x03\x10",
),
# 65k mode
(_COLMOD, b"\x05"),
# set scan direction: up to down, right to left
(_MADCTL, b"\x60"),
(_SLPOUT, None),
(_DISPON, None),
)
# pylint: disable-msg=useless-super-delegation, too-many-arguments
def __init__(
self,
port,
dc,
bl,
rst=None,
width=128,
height=160,
*,
x_offset=2,
y_offset=1,
rotation=0
):
self._bl = bl
# Turn on backlight
self._bl.on()
super().__init__(
port,
dc,
rst,
width,
height,
x_offset=x_offset,
y_offset=y_offset,
rotation=rotation,
) | /rgb-display-0.0.5.tar.gz/rgb-display-0.0.5/rgb_display/st7735.py | 0.438545 | 0.229115 | st7735.py | pypi |
import time
try:
import numpy
except ImportError:
numpy = None
try:
import struct
except ImportError:
import ustruct as struct
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/jrmoserbaltimore/RGB_Display.git"
# This is the size of the buffer to be used for fill operations, in 16-bit
# units.
_BUFFER_SIZE = 256
try:
import platform
if "CPython" in platform.python_implementation():
_BUFFER_SIZE = 320 * 240 # blit the whole thing
except ImportError:
pass
def color565(r, g=0, b=0):
"""Convert red, green and blue values (0-255) into a 16-bit 565 encoding. As
a convenience this is also available in the parent adafruit_rgb_display
package namespace."""
try:
r, g, b = r # see if the first var is a tuple/list
except TypeError:
pass
return (r & 0xF8) << 8 | (g & 0xFC) << 3 | b >> 3
def image_to_data(image):
"""Generator function to convert a PIL image to 16-bit 565 RGB bytes."""
# NumPy is much faster at doing this. NumPy code provided by:
# Keith (https://www.blogger.com/profile/02555547344016007163)
data = numpy.array(image.convert("RGB")).astype("uint16")
color = (
((data[:, :, 0] & 0xF8) << 8)
| ((data[:, :, 1] & 0xFC) << 3)
| (data[:, :, 2] >> 3)
)
return numpy.dstack(((color >> 8) & 0xFF, color & 0xFF)).flatten().tolist()
class DummyPin:
"""Can be used in place of a ``DigitalInOut()`` when you don't want to skip it."""
def deinit(self):
"""Dummy DigitalInOut deinit"""
def switch_to_output(self, *args, **kwargs):
"""Dummy switch_to_output method"""
def switch_to_input(self, *args, **kwargs):
"""Dummy switch_to_input method"""
@property
def value(self):
"""Dummy value DigitalInOut property"""
@value.setter
def value(self, val):
pass
@property
def direction(self):
"""Dummy direction DigitalInOut property"""
@direction.setter
def direction(self, val):
pass
@property
def pull(self):
"""Dummy pull DigitalInOut property"""
@pull.setter
def pull(self, val):
pass
class Display: # pylint: disable-msg=no-member
"""Base class for all RGB display devices
:param width: number of pixels wide
:param height: number of pixels high
"""
_PAGE_SET = None
_COLUMN_SET = None
_RAM_WRITE = None
_RAM_READ = None
_X_START = 0 # pylint: disable=invalid-name
_Y_START = 0 # pylint: disable=invalid-name
_INIT = ()
_ENCODE_PIXEL = ">H"
_ENCODE_POS = ">HH"
_DECODE_PIXEL = ">BBB"
def __init__(self, width, height, rotation):
self.width = width
self.height = height
if rotation not in (0, 90, 180, 270):
raise ValueError("Rotation must be 0/90/180/270")
self._rotation = rotation
self.init()
def init(self):
"""Run the initialization commands."""
for command, data in self._INIT:
self.write(command, data)
# pylint: disable-msg=invalid-name,too-many-arguments
def _block(self, x0, y0, x1, y1, data=None):
"""Read or write a block of data."""
self.write(
self._COLUMN_SET, self._encode_pos(x0 + self._X_START, x1 + self._X_START)
)
self.write(
self._PAGE_SET, self._encode_pos(y0 + self._Y_START, y1 + self._Y_START)
)
if data is None:
size = struct.calcsize(self._DECODE_PIXEL)
return self.read(self._RAM_READ, (x1 - x0 + 1) * (y1 - y0 + 1) * size)
self.write(self._RAM_WRITE, data)
return None
# pylint: enable-msg=invalid-name,too-many-arguments
def _encode_pos(self, x, y):
"""Encode a postion into bytes."""
return struct.pack(self._ENCODE_POS, x, y)
def _encode_pixel(self, color):
"""Encode a pixel color into bytes."""
return struct.pack(self._ENCODE_PIXEL, color)
def _decode_pixel(self, data):
"""Decode bytes into a pixel color."""
return color565(*struct.unpack(self._DECODE_PIXEL, data))
def pixel(self, x, y, color=None):
"""Read or write a pixel at a given position."""
if color is None:
return self._decode_pixel(self._block(x, y, x, y))
if 0 <= x < self.width and 0 <= y < self.height:
self._block(x, y, x, y, self._encode_pixel(color))
return None
def image(self, img, rotation=None, x=0, y=0):
"""Set buffer to value of Python Imaging Library image. The image should
be in 1 bit mode and a size not exceeding the display size when drawn at
the supplied origin."""
if rotation is None:
rotation = self.rotation
if not img.mode in ("RGB", "RGBA"):
raise ValueError("Image must be in mode RGB or RGBA")
if rotation not in (0, 90, 180, 270):
raise ValueError("Rotation must be 0/90/180/270")
if rotation != 0:
img = img.rotate(rotation, expand=True)
imwidth, imheight = img.size
if x + imwidth > self.width or y + imheight > self.height:
raise ValueError(
"Image must not exceed dimensions of display ({0}x{1}).".format(
self.width, self.height
)
)
if numpy:
pixels = list(image_to_data(img))
else:
# Slower but doesn't require numpy
pixels = bytearray(imwidth * imheight * 2)
for i in range(imwidth):
for j in range(imheight):
pix = color565(img.getpixel((i, j)))
pixels[2 * (j * imwidth + i)] = pix >> 8
pixels[2 * (j * imwidth + i) + 1] = pix & 0xFF
self._block(x, y, x + imwidth - 1, y + imheight - 1, pixels)
# pylint: disable-msg=too-many-arguments
def fill_rectangle(self, x, y, width, height, color):
"""Draw a rectangle at specified position with specified width and
height, and fill it with the specified color."""
x = min(self.width - 1, max(0, x))
y = min(self.height - 1, max(0, y))
width = min(self.width - x, max(1, width))
height = min(self.height - y, max(1, height))
self._block(x, y, x + width - 1, y + height - 1, b"")
chunks, rest = divmod(width * height, _BUFFER_SIZE)
pixel = self._encode_pixel(color)
if chunks:
data = pixel * _BUFFER_SIZE
for _ in range(chunks):
self.write(None, data)
self.write(None, pixel * rest)
# pylint: enable-msg=too-many-arguments
def fill(self, color=0):
"""Fill the whole display with the specified color."""
self.fill_rectangle(0, 0, self.width, self.height, color)
def hline(self, x, y, width, color):
"""Draw a horizontal line."""
self.fill_rectangle(x, y, width, 1, color)
def vline(self, x, y, height, color):
"""Draw a vertical line."""
self.fill_rectangle(x, y, 1, height, color)
@property
def rotation(self):
"""Set the default rotation"""
return self._rotation
@rotation.setter
def rotation(self, val):
if val not in (0, 90, 180, 270):
raise ValueError("Rotation must be 0/90/180/270")
self._rotation = val
class DisplayDevice(Display):
"""Base class for devices"""
# pylint: disable-msg=too-many-arguments
def __init__(
self,
port,
dc,
rst=None,
width=1,
height=1,
*,
x_offset=0,
y_offset=0,
rotation=0
):
self.port = port
self.dc_pin = dc
self.rst = rst
self.dc_pin.off()
if self.rst:
self.rst.off()
self.reset()
self._X_START = x_offset # pylint: disable=invalid-name
self._Y_START = y_offset # pylint: disable=invalid-name
super().__init__(width, height, rotation)
# pylint: enable-msg=too-many-arguments
def reset(self):
"""Reset the device"""
self.rst.off()
time.sleep(0.050) # 50 milliseconds
self.rst.on()
time.sleep(0.050) # 50 milliseconds
# pylint: disable=no-member
def write(self, command=None, data=None):
"""write to the device: commands and data"""
if command is not None:
self.dc_pin.off()
self.port.send(bytearray([command]))
if data is not None:
self.dc_pin.on()
self.port.send(data)
def read(self, command=None, count=0):
"""read from device with optional command"""
data = bytearray(count)
self.dc_pin.off()
with self.port as port:
if command is not None:
port.send(bytearray([command]))
if count:
port.readinto(data)
return data | /rgb-display-0.0.5.tar.gz/rgb-display-0.0.5/rgb_display/rgb.py | 0.744563 | 0.433921 | rgb.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.