Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/category_encoding.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/discretization.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/feature_space.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/hashed_crossing.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/hashing.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/index_lookup.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/integer_lookup.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/mel_spectrogram.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/normalization.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/pipeline.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/rescaling.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/stft_spectrogram.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/string_lookup.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/text_vectorization.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/tf_data_layer.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/category_encoding.py +166 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/discretization.py +337 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/feature_space.py +815 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/hashed_crossing.py +227 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/hashing.py +287 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_contrast.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_crop.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_flip.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_grayscale.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_hue.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_posterization.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_rotation.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_saturation.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_sharpness.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_shear.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_translation.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_zoom.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/resizing.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/solarization.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/auto_contrast.py +109 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/base_image_preprocessing_layer.py +385 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/center_crop.py +273 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/equalization.py +224 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/max_num_bounding_box.py +89 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/mix_up.py +180 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/rand_augment.py +235 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/random_brightness.py +158 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/random_color_degeneration.py +132 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/random_color_jitter.py +210 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/random_contrast.py +149 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/random_crop.py +276 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/random_flip.py +236 -0
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (206 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/category_encoding.cpython-310.pyc
ADDED
|
Binary file (6.51 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/discretization.cpython-310.pyc
ADDED
|
Binary file (12.5 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/feature_space.cpython-310.pyc
ADDED
|
Binary file (24.8 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/hashed_crossing.cpython-310.pyc
ADDED
|
Binary file (7.74 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/hashing.cpython-310.pyc
ADDED
|
Binary file (9.44 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/index_lookup.cpython-310.pyc
ADDED
|
Binary file (28.9 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/integer_lookup.cpython-310.pyc
ADDED
|
Binary file (18.2 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/mel_spectrogram.cpython-310.pyc
ADDED
|
Binary file (11.6 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/normalization.cpython-310.pyc
ADDED
|
Binary file (11.7 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/pipeline.cpython-310.pyc
ADDED
|
Binary file (3.16 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/rescaling.cpython-310.pyc
ADDED
|
Binary file (3.01 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/stft_spectrogram.cpython-310.pyc
ADDED
|
Binary file (11.6 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/string_lookup.cpython-310.pyc
ADDED
|
Binary file (17.8 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/text_vectorization.cpython-310.pyc
ADDED
|
Binary file (23 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/__pycache__/tf_data_layer.cpython-310.pyc
ADDED
|
Binary file (2.62 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/category_encoding.py
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src.api_export import keras_export
|
| 2 |
+
from keras.src.backend import KerasTensor
|
| 3 |
+
from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer
|
| 4 |
+
from keras.src.utils import backend_utils
|
| 5 |
+
from keras.src.utils import numerical_utils
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@keras_export("keras.layers.CategoryEncoding")
|
| 9 |
+
class CategoryEncoding(TFDataLayer):
|
| 10 |
+
"""A preprocessing layer which encodes integer features.
|
| 11 |
+
|
| 12 |
+
This layer provides options for condensing data into a categorical encoding
|
| 13 |
+
when the total number of tokens are known in advance. It accepts integer
|
| 14 |
+
values as inputs, and it outputs a dense or sparse representation of those
|
| 15 |
+
inputs. For integer inputs where the total number of tokens is not known,
|
| 16 |
+
use `keras.layers.IntegerLookup` instead.
|
| 17 |
+
|
| 18 |
+
**Note:** This layer is safe to use inside a `tf.data` pipeline
|
| 19 |
+
(independently of which backend you're using).
|
| 20 |
+
|
| 21 |
+
Examples:
|
| 22 |
+
|
| 23 |
+
**One-hot encoding data**
|
| 24 |
+
|
| 25 |
+
>>> layer = keras.layers.CategoryEncoding(
|
| 26 |
+
... num_tokens=4, output_mode="one_hot")
|
| 27 |
+
>>> layer([3, 2, 0, 1])
|
| 28 |
+
array([[0., 0., 0., 1.],
|
| 29 |
+
[0., 0., 1., 0.],
|
| 30 |
+
[1., 0., 0., 0.],
|
| 31 |
+
[0., 1., 0., 0.]]>
|
| 32 |
+
|
| 33 |
+
**Multi-hot encoding data**
|
| 34 |
+
|
| 35 |
+
>>> layer = keras.layers.CategoryEncoding(
|
| 36 |
+
... num_tokens=4, output_mode="multi_hot")
|
| 37 |
+
>>> layer([[0, 1], [0, 0], [1, 2], [3, 1]])
|
| 38 |
+
array([[1., 1., 0., 0.],
|
| 39 |
+
[1., 0., 0., 0.],
|
| 40 |
+
[0., 1., 1., 0.],
|
| 41 |
+
[0., 1., 0., 1.]]>
|
| 42 |
+
|
| 43 |
+
**Using weighted inputs in `"count"` mode**
|
| 44 |
+
|
| 45 |
+
>>> layer = keras.layers.CategoryEncoding(
|
| 46 |
+
... num_tokens=4, output_mode="count")
|
| 47 |
+
>>> count_weights = np.array([[.1, .2], [.1, .1], [.2, .3], [.4, .2]])
|
| 48 |
+
>>> layer([[0, 1], [0, 0], [1, 2], [3, 1]], count_weights=count_weights)
|
| 49 |
+
array([[0.1, 0.2, 0. , 0. ],
|
| 50 |
+
[0.2, 0. , 0. , 0. ],
|
| 51 |
+
[0. , 0.2, 0.3, 0. ],
|
| 52 |
+
[0. , 0.2, 0. , 0.4]]>
|
| 53 |
+
|
| 54 |
+
Args:
|
| 55 |
+
num_tokens: The total number of tokens the layer should support. All
|
| 56 |
+
inputs to the layer must integers in the range `0 <= value <
|
| 57 |
+
num_tokens`, or an error will be thrown.
|
| 58 |
+
output_mode: Specification for the output of the layer.
|
| 59 |
+
Values can be `"one_hot"`, `"multi_hot"` or `"count"`,
|
| 60 |
+
configuring the layer as follows:
|
| 61 |
+
- `"one_hot"`: Encodes each individual element in the input
|
| 62 |
+
into an array of `num_tokens` size, containing a 1 at the
|
| 63 |
+
element index. If the last dimension is size 1, will encode
|
| 64 |
+
on that dimension. If the last dimension is not size 1,
|
| 65 |
+
will append a new dimension for the encoded output.
|
| 66 |
+
- `"multi_hot"`: Encodes each sample in the input into a single
|
| 67 |
+
array of `num_tokens` size, containing a 1 for each
|
| 68 |
+
vocabulary term present in the sample. Treats the last
|
| 69 |
+
dimension as the sample dimension, if input shape is
|
| 70 |
+
`(..., sample_length)`, output shape will be
|
| 71 |
+
`(..., num_tokens)`.
|
| 72 |
+
- `"count"`: Like `"multi_hot"`, but the int array contains a
|
| 73 |
+
count of the number of times the token at that index
|
| 74 |
+
appeared in the sample.
|
| 75 |
+
For all output modes, currently only output up to rank 2 is
|
| 76 |
+
supported.
|
| 77 |
+
Defaults to `"multi_hot"`.
|
| 78 |
+
sparse: Whether to return a sparse tensor; for backends that support
|
| 79 |
+
sparse tensors.
|
| 80 |
+
|
| 81 |
+
Call arguments:
|
| 82 |
+
inputs: A 1D or 2D tensor of integer inputs.
|
| 83 |
+
count_weights: A tensor in the same shape as `inputs` indicating the
|
| 84 |
+
weight for each sample value when summing up in `count` mode.
|
| 85 |
+
Not used in `"multi_hot"` or `"one_hot"` modes.
|
| 86 |
+
"""
|
| 87 |
+
|
| 88 |
+
def __init__(
|
| 89 |
+
self, num_tokens=None, output_mode="multi_hot", sparse=False, **kwargs
|
| 90 |
+
):
|
| 91 |
+
super().__init__(**kwargs)
|
| 92 |
+
|
| 93 |
+
# Support deprecated names for output_modes.
|
| 94 |
+
if output_mode == "binary":
|
| 95 |
+
output_mode = "multi_hot"
|
| 96 |
+
|
| 97 |
+
# 'output_mode' must be one of ("count", "one_hot", "multi_hot")
|
| 98 |
+
if output_mode not in ("count", "one_hot", "multi_hot"):
|
| 99 |
+
raise ValueError(f"Unknown arg for output_mode: {output_mode}")
|
| 100 |
+
|
| 101 |
+
if num_tokens is None:
|
| 102 |
+
raise ValueError(
|
| 103 |
+
"num_tokens must be set to use this layer. If the "
|
| 104 |
+
"number of tokens is not known beforehand, use the "
|
| 105 |
+
"IntegerLookup layer instead."
|
| 106 |
+
)
|
| 107 |
+
if num_tokens < 1:
|
| 108 |
+
raise ValueError(
|
| 109 |
+
f"`num_tokens` must be >= 1. Received: num_tokens={num_tokens}."
|
| 110 |
+
)
|
| 111 |
+
self.num_tokens = num_tokens
|
| 112 |
+
self.output_mode = output_mode
|
| 113 |
+
self.sparse = sparse
|
| 114 |
+
self._allow_non_tensor_positional_args = True
|
| 115 |
+
self._convert_input_args = False
|
| 116 |
+
|
| 117 |
+
def _encode(self, inputs, count_weights=None):
|
| 118 |
+
inputs = self.backend.core.convert_to_tensor(inputs)
|
| 119 |
+
return numerical_utils.encode_categorical_inputs(
|
| 120 |
+
inputs,
|
| 121 |
+
output_mode=self.output_mode,
|
| 122 |
+
depth=self.num_tokens,
|
| 123 |
+
dtype=self.dtype,
|
| 124 |
+
sparse=self.sparse,
|
| 125 |
+
count_weights=count_weights,
|
| 126 |
+
backend_module=self.backend,
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
def compute_output_shape(self, input_shape):
|
| 130 |
+
if (input_shape is not None) & (len(input_shape) == 0):
|
| 131 |
+
return (self.num_tokens,)
|
| 132 |
+
if self.output_mode == "one_hot":
|
| 133 |
+
if input_shape[-1] != 1:
|
| 134 |
+
return tuple(input_shape) + (self.num_tokens,)
|
| 135 |
+
elif len(input_shape) == 1:
|
| 136 |
+
return tuple(input_shape) + (self.num_tokens,)
|
| 137 |
+
else:
|
| 138 |
+
return tuple(input_shape[:-1]) + (self.num_tokens,)
|
| 139 |
+
return tuple(input_shape[:-1]) + (self.num_tokens,)
|
| 140 |
+
|
| 141 |
+
def compute_output_spec(self, inputs, count_weights=None):
|
| 142 |
+
output_shape = self.compute_output_shape(inputs.shape)
|
| 143 |
+
return KerasTensor(
|
| 144 |
+
output_shape, dtype=self.compute_dtype, sparse=self.sparse
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
def get_config(self):
|
| 148 |
+
config = {
|
| 149 |
+
"num_tokens": self.num_tokens,
|
| 150 |
+
"output_mode": self.output_mode,
|
| 151 |
+
}
|
| 152 |
+
base_config = super().get_config()
|
| 153 |
+
return {**base_config, **config}
|
| 154 |
+
|
| 155 |
+
def call(self, inputs, count_weights=None):
|
| 156 |
+
if count_weights is not None:
|
| 157 |
+
if self.output_mode != "count":
|
| 158 |
+
raise ValueError(
|
| 159 |
+
"`count_weights` is not used when `output_mode` is not "
|
| 160 |
+
"`'count'`. Received `count_weights={count_weights}`."
|
| 161 |
+
)
|
| 162 |
+
count_weights = self.backend.convert_to_tensor(
|
| 163 |
+
count_weights, dtype=self.compute_dtype
|
| 164 |
+
)
|
| 165 |
+
outputs = self._encode(inputs, count_weights)
|
| 166 |
+
return backend_utils.convert_tf_tensor(outputs)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/discretization.py
ADDED
|
@@ -0,0 +1,337 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from keras.src import backend
|
| 4 |
+
from keras.src.api_export import keras_export
|
| 5 |
+
from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer
|
| 6 |
+
from keras.src.utils import argument_validation
|
| 7 |
+
from keras.src.utils import numerical_utils
|
| 8 |
+
from keras.src.utils.module_utils import tensorflow as tf
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@keras_export("keras.layers.Discretization")
|
| 12 |
+
class Discretization(TFDataLayer):
|
| 13 |
+
"""A preprocessing layer which buckets continuous features by ranges.
|
| 14 |
+
|
| 15 |
+
This layer will place each element of its input data into one of several
|
| 16 |
+
contiguous ranges and output an integer index indicating which range each
|
| 17 |
+
element was placed in.
|
| 18 |
+
|
| 19 |
+
**Note:** This layer is safe to use inside a `tf.data` pipeline
|
| 20 |
+
(independently of which backend you're using).
|
| 21 |
+
|
| 22 |
+
Input shape:
|
| 23 |
+
Any array of dimension 2 or higher.
|
| 24 |
+
|
| 25 |
+
Output shape:
|
| 26 |
+
Same as input shape.
|
| 27 |
+
|
| 28 |
+
Arguments:
|
| 29 |
+
bin_boundaries: A list of bin boundaries.
|
| 30 |
+
The leftmost and rightmost bins
|
| 31 |
+
will always extend to `-inf` and `inf`,
|
| 32 |
+
so `bin_boundaries=[0., 1., 2.]`
|
| 33 |
+
generates bins `(-inf, 0.)`, `[0., 1.)`, `[1., 2.)`,
|
| 34 |
+
and `[2., +inf)`.
|
| 35 |
+
If this option is set, `adapt()` should not be called.
|
| 36 |
+
num_bins: The integer number of bins to compute.
|
| 37 |
+
If this option is set,
|
| 38 |
+
`adapt()` should be called to learn the bin boundaries.
|
| 39 |
+
epsilon: Error tolerance, typically a small fraction
|
| 40 |
+
close to zero (e.g. 0.01). Higher values of epsilon increase
|
| 41 |
+
the quantile approximation, and hence result in more
|
| 42 |
+
unequal buckets, but could improve performance
|
| 43 |
+
and resource consumption.
|
| 44 |
+
output_mode: Specification for the output of the layer.
|
| 45 |
+
Values can be `"int"`, `"one_hot"`, `"multi_hot"`, or
|
| 46 |
+
`"count"` configuring the layer as follows:
|
| 47 |
+
- `"int"`: Return the discretized bin indices directly.
|
| 48 |
+
- `"one_hot"`: Encodes each individual element in the
|
| 49 |
+
input into an array the same size as `num_bins`,
|
| 50 |
+
containing a 1 at the input's bin
|
| 51 |
+
index. If the last dimension is size 1, will encode on that
|
| 52 |
+
dimension. If the last dimension is not size 1,
|
| 53 |
+
will append a new dimension for the encoded output.
|
| 54 |
+
- `"multi_hot"`: Encodes each sample in the input into a
|
| 55 |
+
single array the same size as `num_bins`,
|
| 56 |
+
containing a 1 for each bin index
|
| 57 |
+
index present in the sample.
|
| 58 |
+
Treats the last dimension as the sample
|
| 59 |
+
dimension, if input shape is `(..., sample_length)`,
|
| 60 |
+
output shape will be `(..., num_tokens)`.
|
| 61 |
+
- `"count"`: As `"multi_hot"`, but the int array contains
|
| 62 |
+
a count of the number of times the bin index appeared
|
| 63 |
+
in the sample.
|
| 64 |
+
Defaults to `"int"`.
|
| 65 |
+
sparse: Boolean. Only applicable to `"one_hot"`, `"multi_hot"`,
|
| 66 |
+
and `"count"` output modes. Only supported with TensorFlow
|
| 67 |
+
backend. If `True`, returns a `SparseTensor` instead of
|
| 68 |
+
a dense `Tensor`. Defaults to `False`.
|
| 69 |
+
|
| 70 |
+
Examples:
|
| 71 |
+
|
| 72 |
+
Discretize float values based on provided buckets.
|
| 73 |
+
>>> input = np.array([[-1.5, 1.0, 3.4, .5], [0.0, 3.0, 1.3, 0.0]])
|
| 74 |
+
>>> layer = Discretization(bin_boundaries=[0., 1., 2.])
|
| 75 |
+
>>> layer(input)
|
| 76 |
+
array([[0, 2, 3, 1],
|
| 77 |
+
[1, 3, 2, 1]])
|
| 78 |
+
|
| 79 |
+
Discretize float values based on a number of buckets to compute.
|
| 80 |
+
>>> input = np.array([[-1.5, 1.0, 3.4, .5], [0.0, 3.0, 1.3, 0.0]])
|
| 81 |
+
>>> layer = Discretization(num_bins=4, epsilon=0.01)
|
| 82 |
+
>>> layer.adapt(input)
|
| 83 |
+
>>> layer(input)
|
| 84 |
+
array([[0, 2, 3, 2],
|
| 85 |
+
[1, 3, 3, 1]])
|
| 86 |
+
"""
|
| 87 |
+
|
| 88 |
+
def __init__(
|
| 89 |
+
self,
|
| 90 |
+
bin_boundaries=None,
|
| 91 |
+
num_bins=None,
|
| 92 |
+
epsilon=0.01,
|
| 93 |
+
output_mode="int",
|
| 94 |
+
sparse=False,
|
| 95 |
+
dtype=None,
|
| 96 |
+
name=None,
|
| 97 |
+
):
|
| 98 |
+
if dtype is None:
|
| 99 |
+
dtype = "int64" if output_mode == "int" else backend.floatx()
|
| 100 |
+
|
| 101 |
+
super().__init__(name=name, dtype=dtype)
|
| 102 |
+
|
| 103 |
+
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
|
| 104 |
+
raise ValueError(
|
| 105 |
+
f"`sparse=True` cannot be used with backend {backend.backend()}"
|
| 106 |
+
)
|
| 107 |
+
if sparse and output_mode == "int":
|
| 108 |
+
raise ValueError(
|
| 109 |
+
"`sparse=True` may only be used if `output_mode` is "
|
| 110 |
+
"`'one_hot'`, `'multi_hot'`, or `'count'`. "
|
| 111 |
+
f"Received: sparse={sparse} and "
|
| 112 |
+
f"output_mode={output_mode}"
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
argument_validation.validate_string_arg(
|
| 116 |
+
output_mode,
|
| 117 |
+
allowable_strings=(
|
| 118 |
+
"int",
|
| 119 |
+
"one_hot",
|
| 120 |
+
"multi_hot",
|
| 121 |
+
"count",
|
| 122 |
+
),
|
| 123 |
+
caller_name=self.__class__.__name__,
|
| 124 |
+
arg_name="output_mode",
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
if num_bins is not None and num_bins < 0:
|
| 128 |
+
raise ValueError(
|
| 129 |
+
"`num_bins` must be greater than or equal to 0. "
|
| 130 |
+
f"Received: `num_bins={num_bins}`"
|
| 131 |
+
)
|
| 132 |
+
if num_bins is not None and bin_boundaries is not None:
|
| 133 |
+
if len(bin_boundaries) != num_bins - 1:
|
| 134 |
+
raise ValueError(
|
| 135 |
+
"Both `num_bins` and `bin_boundaries` should not be "
|
| 136 |
+
f"set. Received: `num_bins={num_bins}` and "
|
| 137 |
+
f"`bin_boundaries={bin_boundaries}`"
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
self.input_bin_boundaries = bin_boundaries
|
| 141 |
+
self.bin_boundaries = (
|
| 142 |
+
bin_boundaries if bin_boundaries is not None else []
|
| 143 |
+
)
|
| 144 |
+
self.num_bins = num_bins
|
| 145 |
+
self.epsilon = epsilon
|
| 146 |
+
self.output_mode = output_mode
|
| 147 |
+
self.sparse = sparse
|
| 148 |
+
|
| 149 |
+
if self.bin_boundaries:
|
| 150 |
+
self.summary = None
|
| 151 |
+
else:
|
| 152 |
+
self.summary = np.array([[], []], dtype="float32")
|
| 153 |
+
|
| 154 |
+
def build(self, input_shape=None):
|
| 155 |
+
self.built = True
|
| 156 |
+
|
| 157 |
+
@property
|
| 158 |
+
def input_dtype(self):
|
| 159 |
+
return backend.floatx()
|
| 160 |
+
|
| 161 |
+
def adapt(self, data, steps=None):
|
| 162 |
+
"""Computes bin boundaries from quantiles in a input dataset.
|
| 163 |
+
|
| 164 |
+
Calling `adapt()` on a `Discretization` layer is an alternative to
|
| 165 |
+
passing in a `bin_boundaries` argument during construction. A
|
| 166 |
+
`Discretization` layer should always be either adapted over a dataset or
|
| 167 |
+
passed `bin_boundaries`.
|
| 168 |
+
|
| 169 |
+
During `adapt()`, the layer will estimate the quantile boundaries of the
|
| 170 |
+
input dataset. The number of quantiles can be controlled via the
|
| 171 |
+
`num_bins` argument, and the error tolerance for quantile boundaries can
|
| 172 |
+
be controlled via the `epsilon` argument.
|
| 173 |
+
|
| 174 |
+
Arguments:
|
| 175 |
+
data: The data to train on. It can be passed either as a
|
| 176 |
+
batched `tf.data.Dataset`,
|
| 177 |
+
or as a NumPy array.
|
| 178 |
+
steps: Integer or `None`.
|
| 179 |
+
Total number of steps (batches of samples) to process.
|
| 180 |
+
If `data` is a `tf.data.Dataset`, and `steps` is `None`,
|
| 181 |
+
`adapt()` will run until the input dataset is exhausted.
|
| 182 |
+
When passing an infinitely
|
| 183 |
+
repeating dataset, you must specify the `steps` argument. This
|
| 184 |
+
argument is not supported with array inputs or list inputs.
|
| 185 |
+
"""
|
| 186 |
+
if self.input_bin_boundaries is not None:
|
| 187 |
+
raise ValueError(
|
| 188 |
+
"Cannot adapt a Discretization layer that has been initialized "
|
| 189 |
+
"with `bin_boundaries`, use `num_bins` instead."
|
| 190 |
+
)
|
| 191 |
+
self.reset_state()
|
| 192 |
+
if isinstance(data, tf.data.Dataset):
|
| 193 |
+
if steps is not None:
|
| 194 |
+
data = data.take(steps)
|
| 195 |
+
for batch in data:
|
| 196 |
+
self.update_state(batch)
|
| 197 |
+
else:
|
| 198 |
+
self.update_state(data)
|
| 199 |
+
self.finalize_state()
|
| 200 |
+
|
| 201 |
+
def update_state(self, data):
|
| 202 |
+
data = np.array(data).astype("float32")
|
| 203 |
+
summary = summarize(data, self.epsilon)
|
| 204 |
+
self.summary = merge_summaries(summary, self.summary, self.epsilon)
|
| 205 |
+
|
| 206 |
+
def finalize_state(self):
|
| 207 |
+
if self.input_bin_boundaries is not None:
|
| 208 |
+
return
|
| 209 |
+
self.bin_boundaries = get_bin_boundaries(
|
| 210 |
+
self.summary, self.num_bins
|
| 211 |
+
).tolist()
|
| 212 |
+
|
| 213 |
+
def reset_state(self):
|
| 214 |
+
if self.input_bin_boundaries is not None:
|
| 215 |
+
return
|
| 216 |
+
self.summary = np.array([[], []], dtype="float32")
|
| 217 |
+
|
| 218 |
+
def compute_output_spec(self, inputs):
|
| 219 |
+
return backend.KerasTensor(shape=inputs.shape, dtype=self.compute_dtype)
|
| 220 |
+
|
| 221 |
+
def load_own_variables(self, store):
|
| 222 |
+
if len(store) == 1:
|
| 223 |
+
# Legacy format case
|
| 224 |
+
self.summary = store["0"]
|
| 225 |
+
return
|
| 226 |
+
|
| 227 |
+
def call(self, inputs):
|
| 228 |
+
indices = self.backend.numpy.digitize(inputs, self.bin_boundaries)
|
| 229 |
+
return numerical_utils.encode_categorical_inputs(
|
| 230 |
+
indices,
|
| 231 |
+
output_mode=self.output_mode,
|
| 232 |
+
depth=len(self.bin_boundaries) + 1,
|
| 233 |
+
dtype=self.compute_dtype,
|
| 234 |
+
sparse=self.sparse,
|
| 235 |
+
backend_module=self.backend,
|
| 236 |
+
)
|
| 237 |
+
|
| 238 |
+
def get_config(self):
|
| 239 |
+
return {
|
| 240 |
+
"bin_boundaries": self.bin_boundaries,
|
| 241 |
+
"num_bins": self.num_bins,
|
| 242 |
+
"epsilon": self.epsilon,
|
| 243 |
+
"output_mode": self.output_mode,
|
| 244 |
+
"sparse": self.sparse,
|
| 245 |
+
"name": self.name,
|
| 246 |
+
"dtype": self.dtype,
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
def summarize(values, epsilon):
|
| 251 |
+
"""Reduce a 1D sequence of values to a summary.
|
| 252 |
+
|
| 253 |
+
This algorithm is based on numpy.quantiles but modified to allow for
|
| 254 |
+
intermediate steps between multiple data sets. It first finds the target
|
| 255 |
+
number of bins as the reciprocal of epsilon and then takes the individual
|
| 256 |
+
values spaced at appropriate intervals to arrive at that target.
|
| 257 |
+
The final step is to return the corresponding counts between those values
|
| 258 |
+
If the target num_bins is larger than the size of values, the whole array is
|
| 259 |
+
returned (with weights of 1).
|
| 260 |
+
|
| 261 |
+
Args:
|
| 262 |
+
values: 1D `np.ndarray` to be summarized.
|
| 263 |
+
epsilon: A `'float32'` that determines the approximate desired
|
| 264 |
+
precision.
|
| 265 |
+
|
| 266 |
+
Returns:
|
| 267 |
+
A 2D `np.ndarray` that is a summary of the inputs. First column is the
|
| 268 |
+
interpolated partition values, the second is the weights (counts).
|
| 269 |
+
"""
|
| 270 |
+
values = np.reshape(values, [-1])
|
| 271 |
+
values = np.sort(values)
|
| 272 |
+
elements = np.size(values)
|
| 273 |
+
num_buckets = 1.0 / epsilon
|
| 274 |
+
increment = elements / num_buckets
|
| 275 |
+
start = increment
|
| 276 |
+
step = max(increment, 1)
|
| 277 |
+
boundaries = values[int(start) :: int(step)]
|
| 278 |
+
weights = np.ones_like(boundaries)
|
| 279 |
+
weights = weights * step
|
| 280 |
+
return np.stack([boundaries, weights])
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def merge_summaries(prev_summary, next_summary, epsilon):
|
| 284 |
+
"""Weighted merge sort of summaries.
|
| 285 |
+
|
| 286 |
+
Given two summaries of distinct data, this function merges (and compresses)
|
| 287 |
+
them to stay within `epsilon` error tolerance.
|
| 288 |
+
|
| 289 |
+
Args:
|
| 290 |
+
prev_summary: 2D `np.ndarray` summary to be merged with `next_summary`.
|
| 291 |
+
next_summary: 2D `np.ndarray` summary to be merged with `prev_summary`.
|
| 292 |
+
epsilon: A float that determines the approximate desired precision.
|
| 293 |
+
|
| 294 |
+
Returns:
|
| 295 |
+
A 2-D `np.ndarray` that is a merged summary. First column is the
|
| 296 |
+
interpolated partition values, the second is the weights (counts).
|
| 297 |
+
"""
|
| 298 |
+
merged = np.concatenate((prev_summary, next_summary), axis=1)
|
| 299 |
+
merged = np.take(merged, np.argsort(merged[0]), axis=1)
|
| 300 |
+
return compress_summary(merged, epsilon)
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
def get_bin_boundaries(summary, num_bins):
|
| 304 |
+
return compress_summary(summary, 1.0 / num_bins)[0, :-1]
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
def compress_summary(summary, epsilon):
|
| 308 |
+
"""Compress a summary to within `epsilon` accuracy.
|
| 309 |
+
|
| 310 |
+
The compression step is needed to keep the summary sizes small after
|
| 311 |
+
merging, and also used to return the final target boundaries. It finds the
|
| 312 |
+
new bins based on interpolating cumulative weight percentages from the large
|
| 313 |
+
summary. Taking the difference of the cumulative weights from the previous
|
| 314 |
+
bin's cumulative weight will give the new weight for that bin.
|
| 315 |
+
|
| 316 |
+
Args:
|
| 317 |
+
summary: 2D `np.ndarray` summary to be compressed.
|
| 318 |
+
epsilon: A `'float32'` that determines the approximate desired
|
| 319 |
+
precision.
|
| 320 |
+
|
| 321 |
+
Returns:
|
| 322 |
+
A 2D `np.ndarray` that is a compressed summary. First column is the
|
| 323 |
+
interpolated partition values, the second is the weights (counts).
|
| 324 |
+
"""
|
| 325 |
+
if summary.shape[1] * epsilon < 1:
|
| 326 |
+
return summary
|
| 327 |
+
|
| 328 |
+
percents = epsilon + np.arange(0.0, 1.0, epsilon)
|
| 329 |
+
cum_weights = summary[1].cumsum()
|
| 330 |
+
cum_weight_percents = cum_weights / cum_weights[-1]
|
| 331 |
+
new_bins = np.interp(percents, cum_weight_percents, summary[0])
|
| 332 |
+
cum_weights = np.interp(percents, cum_weight_percents, cum_weights)
|
| 333 |
+
new_weights = cum_weights - np.concatenate(
|
| 334 |
+
(np.array([0]), cum_weights[:-1])
|
| 335 |
+
)
|
| 336 |
+
summary = np.stack((new_bins, new_weights))
|
| 337 |
+
return summary.astype("float32")
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/feature_space.py
ADDED
|
@@ -0,0 +1,815 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src import layers
|
| 3 |
+
from keras.src import tree
|
| 4 |
+
from keras.src.api_export import keras_export
|
| 5 |
+
from keras.src.layers.layer import Layer
|
| 6 |
+
from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer
|
| 7 |
+
from keras.src.saving import saving_lib
|
| 8 |
+
from keras.src.saving import serialization_lib
|
| 9 |
+
from keras.src.utils import backend_utils
|
| 10 |
+
from keras.src.utils.module_utils import tensorflow as tf
|
| 11 |
+
from keras.src.utils.naming import auto_name
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class Cross:
|
| 15 |
+
def __init__(self, feature_names, crossing_dim, output_mode="one_hot"):
|
| 16 |
+
if output_mode not in {"int", "one_hot"}:
|
| 17 |
+
raise ValueError(
|
| 18 |
+
"Invalid value for argument `output_mode`. "
|
| 19 |
+
"Expected one of {'int', 'one_hot'}. "
|
| 20 |
+
f"Received: output_mode={output_mode}"
|
| 21 |
+
)
|
| 22 |
+
self.feature_names = tuple(feature_names)
|
| 23 |
+
self.crossing_dim = crossing_dim
|
| 24 |
+
self.output_mode = output_mode
|
| 25 |
+
|
| 26 |
+
@property
|
| 27 |
+
def name(self):
|
| 28 |
+
return "_X_".join(self.feature_names)
|
| 29 |
+
|
| 30 |
+
def get_config(self):
|
| 31 |
+
return {
|
| 32 |
+
"feature_names": self.feature_names,
|
| 33 |
+
"crossing_dim": self.crossing_dim,
|
| 34 |
+
"output_mode": self.output_mode,
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
@classmethod
|
| 38 |
+
def from_config(cls, config):
|
| 39 |
+
return cls(**config)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class Feature:
|
| 43 |
+
def __init__(self, dtype, preprocessor, output_mode):
|
| 44 |
+
if output_mode not in {"int", "one_hot", "float"}:
|
| 45 |
+
raise ValueError(
|
| 46 |
+
"Invalid value for argument `output_mode`. "
|
| 47 |
+
"Expected one of {'int', 'one_hot', 'float'}. "
|
| 48 |
+
f"Received: output_mode={output_mode}"
|
| 49 |
+
)
|
| 50 |
+
self.dtype = dtype
|
| 51 |
+
if isinstance(preprocessor, dict):
|
| 52 |
+
preprocessor = serialization_lib.deserialize_keras_object(
|
| 53 |
+
preprocessor
|
| 54 |
+
)
|
| 55 |
+
self.preprocessor = preprocessor
|
| 56 |
+
self.output_mode = output_mode
|
| 57 |
+
|
| 58 |
+
def get_config(self):
|
| 59 |
+
return {
|
| 60 |
+
"dtype": self.dtype,
|
| 61 |
+
"preprocessor": serialization_lib.serialize_keras_object(
|
| 62 |
+
self.preprocessor
|
| 63 |
+
),
|
| 64 |
+
"output_mode": self.output_mode,
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
@classmethod
|
| 68 |
+
def from_config(cls, config):
|
| 69 |
+
return cls(**config)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
@keras_export("keras.utils.FeatureSpace")
|
| 73 |
+
class FeatureSpace(Layer):
|
| 74 |
+
"""One-stop utility for preprocessing and encoding structured data.
|
| 75 |
+
|
| 76 |
+
Arguments:
|
| 77 |
+
feature_names: Dict mapping the names of your features to their
|
| 78 |
+
type specification, e.g. `{"my_feature": "integer_categorical"}`
|
| 79 |
+
or `{"my_feature": FeatureSpace.integer_categorical()}`.
|
| 80 |
+
For a complete list of all supported types, see
|
| 81 |
+
"Available feature types" paragraph below.
|
| 82 |
+
output_mode: One of `"concat"` or `"dict"`. In concat mode, all
|
| 83 |
+
features get concatenated together into a single vector.
|
| 84 |
+
In dict mode, the FeatureSpace returns a dict of individually
|
| 85 |
+
encoded features (with the same keys as the input dict keys).
|
| 86 |
+
crosses: List of features to be crossed together, e.g.
|
| 87 |
+
`crosses=[("feature_1", "feature_2")]`. The features will be
|
| 88 |
+
"crossed" by hashing their combined value into
|
| 89 |
+
a fixed-length vector.
|
| 90 |
+
crossing_dim: Default vector size for hashing crossed features.
|
| 91 |
+
Defaults to `32`.
|
| 92 |
+
hashing_dim: Default vector size for hashing features of type
|
| 93 |
+
`"integer_hashed"` and `"string_hashed"`. Defaults to `32`.
|
| 94 |
+
num_discretization_bins: Default number of bins to be used for
|
| 95 |
+
discretizing features of type `"float_discretized"`.
|
| 96 |
+
Defaults to `32`.
|
| 97 |
+
|
| 98 |
+
**Available feature types:**
|
| 99 |
+
|
| 100 |
+
Note that all features can be referred to by their string name,
|
| 101 |
+
e.g. `"integer_categorical"`. When using the string name, the default
|
| 102 |
+
argument values are used.
|
| 103 |
+
|
| 104 |
+
```python
|
| 105 |
+
# Plain float values.
|
| 106 |
+
FeatureSpace.float(name=None)
|
| 107 |
+
|
| 108 |
+
# Float values to be preprocessed via featurewise standardization
|
| 109 |
+
# (i.e. via a `keras.layers.Normalization` layer).
|
| 110 |
+
FeatureSpace.float_normalized(name=None)
|
| 111 |
+
|
| 112 |
+
# Float values to be preprocessed via linear rescaling
|
| 113 |
+
# (i.e. via a `keras.layers.Rescaling` layer).
|
| 114 |
+
FeatureSpace.float_rescaled(scale=1., offset=0., name=None)
|
| 115 |
+
|
| 116 |
+
# Float values to be discretized. By default, the discrete
|
| 117 |
+
# representation will then be one-hot encoded.
|
| 118 |
+
FeatureSpace.float_discretized(
|
| 119 |
+
num_bins, bin_boundaries=None, output_mode="one_hot", name=None)
|
| 120 |
+
|
| 121 |
+
# Integer values to be indexed. By default, the discrete
|
| 122 |
+
# representation will then be one-hot encoded.
|
| 123 |
+
FeatureSpace.integer_categorical(
|
| 124 |
+
max_tokens=None, num_oov_indices=1, output_mode="one_hot", name=None)
|
| 125 |
+
|
| 126 |
+
# String values to be indexed. By default, the discrete
|
| 127 |
+
# representation will then be one-hot encoded.
|
| 128 |
+
FeatureSpace.string_categorical(
|
| 129 |
+
max_tokens=None, num_oov_indices=1, output_mode="one_hot", name=None)
|
| 130 |
+
|
| 131 |
+
# Integer values to be hashed into a fixed number of bins.
|
| 132 |
+
# By default, the discrete representation will then be one-hot encoded.
|
| 133 |
+
FeatureSpace.integer_hashed(num_bins, output_mode="one_hot", name=None)
|
| 134 |
+
|
| 135 |
+
# String values to be hashed into a fixed number of bins.
|
| 136 |
+
# By default, the discrete representation will then be one-hot encoded.
|
| 137 |
+
FeatureSpace.string_hashed(num_bins, output_mode="one_hot", name=None)
|
| 138 |
+
```
|
| 139 |
+
|
| 140 |
+
Examples:
|
| 141 |
+
|
| 142 |
+
**Basic usage with a dict of input data:**
|
| 143 |
+
|
| 144 |
+
```python
|
| 145 |
+
raw_data = {
|
| 146 |
+
"float_values": [0.0, 0.1, 0.2, 0.3],
|
| 147 |
+
"string_values": ["zero", "one", "two", "three"],
|
| 148 |
+
"int_values": [0, 1, 2, 3],
|
| 149 |
+
}
|
| 150 |
+
dataset = tf.data.Dataset.from_tensor_slices(raw_data)
|
| 151 |
+
|
| 152 |
+
feature_space = FeatureSpace(
|
| 153 |
+
features={
|
| 154 |
+
"float_values": "float_normalized",
|
| 155 |
+
"string_values": "string_categorical",
|
| 156 |
+
"int_values": "integer_categorical",
|
| 157 |
+
},
|
| 158 |
+
crosses=[("string_values", "int_values")],
|
| 159 |
+
output_mode="concat",
|
| 160 |
+
)
|
| 161 |
+
# Before you start using the FeatureSpace,
|
| 162 |
+
# you must `adapt()` it on some data.
|
| 163 |
+
feature_space.adapt(dataset)
|
| 164 |
+
|
| 165 |
+
# You can call the FeatureSpace on a dict of data (batched or unbatched).
|
| 166 |
+
output_vector = feature_space(raw_data)
|
| 167 |
+
```
|
| 168 |
+
|
| 169 |
+
**Basic usage with `tf.data`:**
|
| 170 |
+
|
| 171 |
+
```python
|
| 172 |
+
# Unlabeled data
|
| 173 |
+
preprocessed_ds = unlabeled_dataset.map(feature_space)
|
| 174 |
+
|
| 175 |
+
# Labeled data
|
| 176 |
+
preprocessed_ds = labeled_dataset.map(lambda x, y: (feature_space(x), y))
|
| 177 |
+
```
|
| 178 |
+
|
| 179 |
+
**Basic usage with the Keras Functional API:**
|
| 180 |
+
|
| 181 |
+
```python
|
| 182 |
+
# Retrieve a dict Keras Input objects
|
| 183 |
+
inputs = feature_space.get_inputs()
|
| 184 |
+
# Retrieve the corresponding encoded Keras tensors
|
| 185 |
+
encoded_features = feature_space.get_encoded_features()
|
| 186 |
+
# Build a Functional model
|
| 187 |
+
outputs = keras.layers.Dense(1, activation="sigmoid")(encoded_features)
|
| 188 |
+
model = keras.Model(inputs, outputs)
|
| 189 |
+
```
|
| 190 |
+
|
| 191 |
+
**Customizing each feature or feature cross:**
|
| 192 |
+
|
| 193 |
+
```python
|
| 194 |
+
feature_space = FeatureSpace(
|
| 195 |
+
features={
|
| 196 |
+
"float_values": FeatureSpace.float_normalized(),
|
| 197 |
+
"string_values": FeatureSpace.string_categorical(max_tokens=10),
|
| 198 |
+
"int_values": FeatureSpace.integer_categorical(max_tokens=10),
|
| 199 |
+
},
|
| 200 |
+
crosses=[
|
| 201 |
+
FeatureSpace.cross(("string_values", "int_values"), crossing_dim=32)
|
| 202 |
+
],
|
| 203 |
+
output_mode="concat",
|
| 204 |
+
)
|
| 205 |
+
```
|
| 206 |
+
|
| 207 |
+
**Returning a dict of integer-encoded features:**
|
| 208 |
+
|
| 209 |
+
```python
|
| 210 |
+
feature_space = FeatureSpace(
|
| 211 |
+
features={
|
| 212 |
+
"string_values": FeatureSpace.string_categorical(output_mode="int"),
|
| 213 |
+
"int_values": FeatureSpace.integer_categorical(output_mode="int"),
|
| 214 |
+
},
|
| 215 |
+
crosses=[
|
| 216 |
+
FeatureSpace.cross(
|
| 217 |
+
feature_names=("string_values", "int_values"),
|
| 218 |
+
crossing_dim=32,
|
| 219 |
+
output_mode="int",
|
| 220 |
+
)
|
| 221 |
+
],
|
| 222 |
+
output_mode="dict",
|
| 223 |
+
)
|
| 224 |
+
```
|
| 225 |
+
|
| 226 |
+
**Specifying your own Keras preprocessing layer:**
|
| 227 |
+
|
| 228 |
+
```python
|
| 229 |
+
# Let's say that one of the features is a short text paragraph that
|
| 230 |
+
# we want to encode as a vector (one vector per paragraph) via TF-IDF.
|
| 231 |
+
data = {
|
| 232 |
+
"text": ["1st string", "2nd string", "3rd string"],
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
# There's a Keras layer for this: TextVectorization.
|
| 236 |
+
custom_layer = layers.TextVectorization(output_mode="tf_idf")
|
| 237 |
+
|
| 238 |
+
# We can use FeatureSpace.feature to create a custom feature
|
| 239 |
+
# that will use our preprocessing layer.
|
| 240 |
+
feature_space = FeatureSpace(
|
| 241 |
+
features={
|
| 242 |
+
"text": FeatureSpace.feature(
|
| 243 |
+
preprocessor=custom_layer, dtype="string", output_mode="float"
|
| 244 |
+
),
|
| 245 |
+
},
|
| 246 |
+
output_mode="concat",
|
| 247 |
+
)
|
| 248 |
+
feature_space.adapt(tf.data.Dataset.from_tensor_slices(data))
|
| 249 |
+
output_vector = feature_space(data)
|
| 250 |
+
```
|
| 251 |
+
|
| 252 |
+
**Retrieving the underlying Keras preprocessing layers:**
|
| 253 |
+
|
| 254 |
+
```python
|
| 255 |
+
# The preprocessing layer of each feature is available in `.preprocessors`.
|
| 256 |
+
preprocessing_layer = feature_space.preprocessors["feature1"]
|
| 257 |
+
|
| 258 |
+
# The crossing layer of each feature cross is available in `.crossers`.
|
| 259 |
+
# It's an instance of keras.layers.HashedCrossing.
|
| 260 |
+
crossing_layer = feature_space.crossers["feature1_X_feature2"]
|
| 261 |
+
```
|
| 262 |
+
|
| 263 |
+
**Saving and reloading a FeatureSpace:**
|
| 264 |
+
|
| 265 |
+
```python
|
| 266 |
+
feature_space.save("featurespace.keras")
|
| 267 |
+
reloaded_feature_space = keras.models.load_model("featurespace.keras")
|
| 268 |
+
```
|
| 269 |
+
"""
|
| 270 |
+
|
| 271 |
+
@classmethod
|
| 272 |
+
def cross(cls, feature_names, crossing_dim, output_mode="one_hot"):
|
| 273 |
+
return Cross(feature_names, crossing_dim, output_mode=output_mode)
|
| 274 |
+
|
| 275 |
+
@classmethod
|
| 276 |
+
def feature(cls, dtype, preprocessor, output_mode):
|
| 277 |
+
return Feature(dtype, preprocessor, output_mode)
|
| 278 |
+
|
| 279 |
+
@classmethod
|
| 280 |
+
def float(cls, name=None):
|
| 281 |
+
name = name or auto_name("float")
|
| 282 |
+
preprocessor = TFDIdentity(dtype="float32", name=f"{name}_preprocessor")
|
| 283 |
+
return Feature(
|
| 284 |
+
dtype="float32", preprocessor=preprocessor, output_mode="float"
|
| 285 |
+
)
|
| 286 |
+
|
| 287 |
+
@classmethod
|
| 288 |
+
def float_rescaled(cls, scale=1.0, offset=0.0, name=None):
|
| 289 |
+
name = name or auto_name("float_rescaled")
|
| 290 |
+
preprocessor = layers.Rescaling(
|
| 291 |
+
scale=scale, offset=offset, name=f"{name}_preprocessor"
|
| 292 |
+
)
|
| 293 |
+
return Feature(
|
| 294 |
+
dtype="float32", preprocessor=preprocessor, output_mode="float"
|
| 295 |
+
)
|
| 296 |
+
|
| 297 |
+
@classmethod
|
| 298 |
+
def float_normalized(cls, name=None):
|
| 299 |
+
name = name or auto_name("float_normalized")
|
| 300 |
+
preprocessor = layers.Normalization(
|
| 301 |
+
axis=-1, name=f"{name}_preprocessor"
|
| 302 |
+
)
|
| 303 |
+
return Feature(
|
| 304 |
+
dtype="float32", preprocessor=preprocessor, output_mode="float"
|
| 305 |
+
)
|
| 306 |
+
|
| 307 |
+
@classmethod
|
| 308 |
+
def float_discretized(
|
| 309 |
+
cls, num_bins, bin_boundaries=None, output_mode="one_hot", name=None
|
| 310 |
+
):
|
| 311 |
+
name = name or auto_name("float_discretized")
|
| 312 |
+
preprocessor = layers.Discretization(
|
| 313 |
+
num_bins=num_bins,
|
| 314 |
+
bin_boundaries=bin_boundaries,
|
| 315 |
+
name=f"{name}_preprocessor",
|
| 316 |
+
)
|
| 317 |
+
return Feature(
|
| 318 |
+
dtype="float32", preprocessor=preprocessor, output_mode=output_mode
|
| 319 |
+
)
|
| 320 |
+
|
| 321 |
+
@classmethod
|
| 322 |
+
def integer_categorical(
|
| 323 |
+
cls,
|
| 324 |
+
max_tokens=None,
|
| 325 |
+
num_oov_indices=1,
|
| 326 |
+
output_mode="one_hot",
|
| 327 |
+
name=None,
|
| 328 |
+
):
|
| 329 |
+
name = name or auto_name("integer_categorical")
|
| 330 |
+
preprocessor = layers.IntegerLookup(
|
| 331 |
+
name=f"{name}_preprocessor",
|
| 332 |
+
max_tokens=max_tokens,
|
| 333 |
+
num_oov_indices=num_oov_indices,
|
| 334 |
+
)
|
| 335 |
+
return Feature(
|
| 336 |
+
dtype="int32", preprocessor=preprocessor, output_mode=output_mode
|
| 337 |
+
)
|
| 338 |
+
|
| 339 |
+
@classmethod
|
| 340 |
+
def string_categorical(
|
| 341 |
+
cls,
|
| 342 |
+
max_tokens=None,
|
| 343 |
+
num_oov_indices=1,
|
| 344 |
+
output_mode="one_hot",
|
| 345 |
+
name=None,
|
| 346 |
+
):
|
| 347 |
+
name = name or auto_name("string_categorical")
|
| 348 |
+
preprocessor = layers.StringLookup(
|
| 349 |
+
name=f"{name}_preprocessor",
|
| 350 |
+
max_tokens=max_tokens,
|
| 351 |
+
num_oov_indices=num_oov_indices,
|
| 352 |
+
)
|
| 353 |
+
return Feature(
|
| 354 |
+
dtype="string", preprocessor=preprocessor, output_mode=output_mode
|
| 355 |
+
)
|
| 356 |
+
|
| 357 |
+
@classmethod
|
| 358 |
+
def string_hashed(cls, num_bins, output_mode="one_hot", name=None):
|
| 359 |
+
name = name or auto_name("string_hashed")
|
| 360 |
+
preprocessor = layers.Hashing(
|
| 361 |
+
name=f"{name}_preprocessor", num_bins=num_bins
|
| 362 |
+
)
|
| 363 |
+
return Feature(
|
| 364 |
+
dtype="string", preprocessor=preprocessor, output_mode=output_mode
|
| 365 |
+
)
|
| 366 |
+
|
| 367 |
+
@classmethod
|
| 368 |
+
def integer_hashed(cls, num_bins, output_mode="one_hot", name=None):
|
| 369 |
+
name = name or auto_name("integer_hashed")
|
| 370 |
+
preprocessor = layers.Hashing(
|
| 371 |
+
name=f"{name}_preprocessor", num_bins=num_bins
|
| 372 |
+
)
|
| 373 |
+
return Feature(
|
| 374 |
+
dtype="int32", preprocessor=preprocessor, output_mode=output_mode
|
| 375 |
+
)
|
| 376 |
+
|
| 377 |
+
def __init__(
|
| 378 |
+
self,
|
| 379 |
+
features,
|
| 380 |
+
output_mode="concat",
|
| 381 |
+
crosses=None,
|
| 382 |
+
crossing_dim=32,
|
| 383 |
+
hashing_dim=32,
|
| 384 |
+
num_discretization_bins=32,
|
| 385 |
+
name=None,
|
| 386 |
+
):
|
| 387 |
+
super().__init__(name=name)
|
| 388 |
+
if not features:
|
| 389 |
+
raise ValueError("The `features` argument cannot be None or empty.")
|
| 390 |
+
self.crossing_dim = crossing_dim
|
| 391 |
+
self.hashing_dim = hashing_dim
|
| 392 |
+
self.num_discretization_bins = num_discretization_bins
|
| 393 |
+
self.features = {
|
| 394 |
+
name: self._standardize_feature(name, value)
|
| 395 |
+
for name, value in features.items()
|
| 396 |
+
}
|
| 397 |
+
self.crosses = []
|
| 398 |
+
if crosses:
|
| 399 |
+
feature_set = set(features.keys())
|
| 400 |
+
for cross in crosses:
|
| 401 |
+
if isinstance(cross, dict):
|
| 402 |
+
cross = serialization_lib.deserialize_keras_object(cross)
|
| 403 |
+
if isinstance(cross, Cross):
|
| 404 |
+
self.crosses.append(cross)
|
| 405 |
+
else:
|
| 406 |
+
if not crossing_dim:
|
| 407 |
+
raise ValueError(
|
| 408 |
+
"When specifying `crosses`, the argument "
|
| 409 |
+
"`crossing_dim` "
|
| 410 |
+
"(dimensionality of the crossing space) "
|
| 411 |
+
"should be specified as well."
|
| 412 |
+
)
|
| 413 |
+
for key in cross:
|
| 414 |
+
if key not in feature_set:
|
| 415 |
+
raise ValueError(
|
| 416 |
+
"All features referenced "
|
| 417 |
+
"in the `crosses` argument "
|
| 418 |
+
"should be present in the `features` dict. "
|
| 419 |
+
f"Received unknown features: {cross}"
|
| 420 |
+
)
|
| 421 |
+
self.crosses.append(Cross(cross, crossing_dim=crossing_dim))
|
| 422 |
+
self.crosses_by_name = {cross.name: cross for cross in self.crosses}
|
| 423 |
+
|
| 424 |
+
if output_mode not in {"dict", "concat"}:
|
| 425 |
+
raise ValueError(
|
| 426 |
+
"Invalid value for argument `output_mode`. "
|
| 427 |
+
"Expected one of {'dict', 'concat'}. "
|
| 428 |
+
f"Received: output_mode={output_mode}"
|
| 429 |
+
)
|
| 430 |
+
self.output_mode = output_mode
|
| 431 |
+
|
| 432 |
+
self.inputs = {
|
| 433 |
+
name: self._feature_to_input(name, value)
|
| 434 |
+
for name, value in self.features.items()
|
| 435 |
+
}
|
| 436 |
+
self.preprocessors = {
|
| 437 |
+
name: value.preprocessor for name, value in self.features.items()
|
| 438 |
+
}
|
| 439 |
+
self.encoded_features = None
|
| 440 |
+
self.crossers = {
|
| 441 |
+
cross.name: self._cross_to_crosser(cross) for cross in self.crosses
|
| 442 |
+
}
|
| 443 |
+
self.one_hot_encoders = {}
|
| 444 |
+
self._is_adapted = False
|
| 445 |
+
self.concat = None
|
| 446 |
+
self._preprocessed_features_names = None
|
| 447 |
+
self._crossed_features_names = None
|
| 448 |
+
self._sublayers_built = False
|
| 449 |
+
|
| 450 |
+
def _feature_to_input(self, name, feature):
|
| 451 |
+
return layers.Input(shape=(1,), dtype=feature.dtype, name=name)
|
| 452 |
+
|
| 453 |
+
def _standardize_feature(self, name, feature):
|
| 454 |
+
if isinstance(feature, Feature):
|
| 455 |
+
return feature
|
| 456 |
+
|
| 457 |
+
if isinstance(feature, dict):
|
| 458 |
+
return serialization_lib.deserialize_keras_object(feature)
|
| 459 |
+
|
| 460 |
+
if feature == "float":
|
| 461 |
+
return self.float(name=name)
|
| 462 |
+
elif feature == "float_normalized":
|
| 463 |
+
return self.float_normalized(name=name)
|
| 464 |
+
elif feature == "float_rescaled":
|
| 465 |
+
return self.float_rescaled(name=name)
|
| 466 |
+
elif feature == "float_discretized":
|
| 467 |
+
return self.float_discretized(
|
| 468 |
+
name=name, num_bins=self.num_discretization_bins
|
| 469 |
+
)
|
| 470 |
+
elif feature == "integer_categorical":
|
| 471 |
+
return self.integer_categorical(name=name)
|
| 472 |
+
elif feature == "string_categorical":
|
| 473 |
+
return self.string_categorical(name=name)
|
| 474 |
+
elif feature == "integer_hashed":
|
| 475 |
+
return self.integer_hashed(self.hashing_dim, name=name)
|
| 476 |
+
elif feature == "string_hashed":
|
| 477 |
+
return self.string_hashed(self.hashing_dim, name=name)
|
| 478 |
+
else:
|
| 479 |
+
raise ValueError(f"Invalid feature type: {feature}")
|
| 480 |
+
|
| 481 |
+
def _cross_to_crosser(self, cross):
|
| 482 |
+
return layers.HashedCrossing(cross.crossing_dim, name=cross.name)
|
| 483 |
+
|
| 484 |
+
def _list_adaptable_preprocessors(self):
|
| 485 |
+
adaptable_preprocessors = []
|
| 486 |
+
for name in self.features.keys():
|
| 487 |
+
preprocessor = self.preprocessors[name]
|
| 488 |
+
# Special case: a Normalization layer with preset mean/variance.
|
| 489 |
+
# Not adaptable.
|
| 490 |
+
if isinstance(preprocessor, layers.Normalization):
|
| 491 |
+
if preprocessor.input_mean is not None:
|
| 492 |
+
continue
|
| 493 |
+
# Special case: a TextVectorization layer with provided vocabulary.
|
| 494 |
+
elif isinstance(preprocessor, layers.TextVectorization):
|
| 495 |
+
if preprocessor._has_input_vocabulary:
|
| 496 |
+
continue
|
| 497 |
+
if hasattr(preprocessor, "adapt"):
|
| 498 |
+
adaptable_preprocessors.append(name)
|
| 499 |
+
return adaptable_preprocessors
|
| 500 |
+
|
| 501 |
+
def adapt(self, dataset):
|
| 502 |
+
if not isinstance(dataset, tf.data.Dataset):
|
| 503 |
+
raise ValueError(
|
| 504 |
+
"`adapt()` can only be called on a tf.data.Dataset. "
|
| 505 |
+
f"Received instead: {dataset} (of type {type(dataset)})"
|
| 506 |
+
)
|
| 507 |
+
|
| 508 |
+
for name in self._list_adaptable_preprocessors():
|
| 509 |
+
# Call adapt() on each individual adaptable layer.
|
| 510 |
+
|
| 511 |
+
# TODO: consider rewriting this to instead iterate on the
|
| 512 |
+
# dataset once, split each batch into individual features,
|
| 513 |
+
# and call the layer's `_adapt_function` on each batch
|
| 514 |
+
# to simulate the behavior of adapt() in a more performant fashion.
|
| 515 |
+
|
| 516 |
+
feature_dataset = dataset.map(lambda x: x[name])
|
| 517 |
+
preprocessor = self.preprocessors[name]
|
| 518 |
+
# TODO: consider adding an adapt progress bar.
|
| 519 |
+
# Sample 1 element to check the rank
|
| 520 |
+
x = next(iter(feature_dataset))
|
| 521 |
+
if len(x.shape) == 0:
|
| 522 |
+
# The dataset yields unbatched scalars; batch it.
|
| 523 |
+
feature_dataset = feature_dataset.batch(32)
|
| 524 |
+
if len(x.shape) in {0, 1}:
|
| 525 |
+
# If the rank is 1, add a dimension
|
| 526 |
+
# so we can reduce on axis=-1.
|
| 527 |
+
# Note: if rank was previously 0, it is now 1.
|
| 528 |
+
feature_dataset = feature_dataset.map(
|
| 529 |
+
lambda x: tf.expand_dims(x, -1)
|
| 530 |
+
)
|
| 531 |
+
preprocessor.adapt(feature_dataset)
|
| 532 |
+
self._is_adapted = True
|
| 533 |
+
self.get_encoded_features() # Finish building the layer
|
| 534 |
+
self.built = True
|
| 535 |
+
self._sublayers_built = True
|
| 536 |
+
|
| 537 |
+
def get_inputs(self):
|
| 538 |
+
self._check_if_built()
|
| 539 |
+
return self.inputs
|
| 540 |
+
|
| 541 |
+
def get_encoded_features(self):
|
| 542 |
+
self._check_if_adapted()
|
| 543 |
+
|
| 544 |
+
if self.encoded_features is None:
|
| 545 |
+
preprocessed_features = self._preprocess_features(self.inputs)
|
| 546 |
+
crossed_features = self._cross_features(preprocessed_features)
|
| 547 |
+
merged_features = self._merge_features(
|
| 548 |
+
preprocessed_features, crossed_features
|
| 549 |
+
)
|
| 550 |
+
self.encoded_features = merged_features
|
| 551 |
+
return self.encoded_features
|
| 552 |
+
|
| 553 |
+
def _preprocess_features(self, features):
|
| 554 |
+
return {
|
| 555 |
+
name: self.preprocessors[name](features[name])
|
| 556 |
+
for name in features.keys()
|
| 557 |
+
}
|
| 558 |
+
|
| 559 |
+
def _cross_features(self, features):
|
| 560 |
+
all_outputs = {}
|
| 561 |
+
for cross in self.crosses:
|
| 562 |
+
inputs = [features[name] for name in cross.feature_names]
|
| 563 |
+
outputs = self.crossers[cross.name](inputs)
|
| 564 |
+
all_outputs[cross.name] = outputs
|
| 565 |
+
return all_outputs
|
| 566 |
+
|
| 567 |
+
def _merge_features(self, preprocessed_features, crossed_features):
|
| 568 |
+
if not self._preprocessed_features_names:
|
| 569 |
+
self._preprocessed_features_names = sorted(
|
| 570 |
+
preprocessed_features.keys()
|
| 571 |
+
)
|
| 572 |
+
self._crossed_features_names = sorted(crossed_features.keys())
|
| 573 |
+
|
| 574 |
+
all_names = (
|
| 575 |
+
self._preprocessed_features_names + self._crossed_features_names
|
| 576 |
+
)
|
| 577 |
+
all_features = [
|
| 578 |
+
preprocessed_features[name]
|
| 579 |
+
for name in self._preprocessed_features_names
|
| 580 |
+
] + [crossed_features[name] for name in self._crossed_features_names]
|
| 581 |
+
|
| 582 |
+
if self.output_mode == "dict":
|
| 583 |
+
output_dict = {}
|
| 584 |
+
else:
|
| 585 |
+
features_to_concat = []
|
| 586 |
+
|
| 587 |
+
if self._sublayers_built:
|
| 588 |
+
# Fast mode.
|
| 589 |
+
for name, feature in zip(all_names, all_features):
|
| 590 |
+
encoder = self.one_hot_encoders.get(name, None)
|
| 591 |
+
if encoder:
|
| 592 |
+
feature = encoder(feature)
|
| 593 |
+
if self.output_mode == "dict":
|
| 594 |
+
output_dict[name] = feature
|
| 595 |
+
else:
|
| 596 |
+
features_to_concat.append(feature)
|
| 597 |
+
if self.output_mode == "dict":
|
| 598 |
+
return output_dict
|
| 599 |
+
else:
|
| 600 |
+
return self.concat(features_to_concat)
|
| 601 |
+
|
| 602 |
+
# If the object isn't built,
|
| 603 |
+
# we create the encoder and concat layers below
|
| 604 |
+
all_specs = [
|
| 605 |
+
self.features[name] for name in self._preprocessed_features_names
|
| 606 |
+
] + [
|
| 607 |
+
self.crosses_by_name[name] for name in self._crossed_features_names
|
| 608 |
+
]
|
| 609 |
+
|
| 610 |
+
for name, feature, spec in zip(all_names, all_features, all_specs):
|
| 611 |
+
if tree.is_nested(feature):
|
| 612 |
+
dtype = tree.flatten(feature)[0].dtype
|
| 613 |
+
else:
|
| 614 |
+
dtype = feature.dtype
|
| 615 |
+
dtype = backend.standardize_dtype(dtype)
|
| 616 |
+
|
| 617 |
+
if spec.output_mode == "one_hot":
|
| 618 |
+
preprocessor = self.preprocessors.get(
|
| 619 |
+
name
|
| 620 |
+
) or self.crossers.get(name)
|
| 621 |
+
|
| 622 |
+
cardinality = None
|
| 623 |
+
if not dtype.startswith("int"):
|
| 624 |
+
raise ValueError(
|
| 625 |
+
f"Feature '{name}' has `output_mode='one_hot'`. "
|
| 626 |
+
"Thus its preprocessor should return an integer dtype. "
|
| 627 |
+
f"Instead it returns a {dtype} dtype."
|
| 628 |
+
)
|
| 629 |
+
|
| 630 |
+
if isinstance(
|
| 631 |
+
preprocessor, (layers.IntegerLookup, layers.StringLookup)
|
| 632 |
+
):
|
| 633 |
+
cardinality = preprocessor.vocabulary_size()
|
| 634 |
+
elif isinstance(preprocessor, layers.CategoryEncoding):
|
| 635 |
+
cardinality = preprocessor.num_tokens
|
| 636 |
+
elif isinstance(preprocessor, layers.Discretization):
|
| 637 |
+
cardinality = preprocessor.num_bins
|
| 638 |
+
elif isinstance(
|
| 639 |
+
preprocessor, (layers.HashedCrossing, layers.Hashing)
|
| 640 |
+
):
|
| 641 |
+
cardinality = preprocessor.num_bins
|
| 642 |
+
else:
|
| 643 |
+
raise ValueError(
|
| 644 |
+
f"Feature '{name}' has `output_mode='one_hot'`. "
|
| 645 |
+
"However it isn't a standard feature and the "
|
| 646 |
+
"dimensionality of its output space is not known, "
|
| 647 |
+
"thus it cannot be one-hot encoded. "
|
| 648 |
+
"Try using `output_mode='int'`."
|
| 649 |
+
)
|
| 650 |
+
if cardinality is not None:
|
| 651 |
+
encoder = layers.CategoryEncoding(
|
| 652 |
+
num_tokens=cardinality, output_mode="multi_hot"
|
| 653 |
+
)
|
| 654 |
+
self.one_hot_encoders[name] = encoder
|
| 655 |
+
feature = encoder(feature)
|
| 656 |
+
|
| 657 |
+
if self.output_mode == "concat":
|
| 658 |
+
dtype = feature.dtype
|
| 659 |
+
if dtype.startswith("int") or dtype == "string":
|
| 660 |
+
raise ValueError(
|
| 661 |
+
f"Cannot concatenate features because feature '{name}' "
|
| 662 |
+
f"has not been encoded (it has dtype {dtype}). "
|
| 663 |
+
"Consider using `output_mode='dict'`."
|
| 664 |
+
)
|
| 665 |
+
features_to_concat.append(feature)
|
| 666 |
+
else:
|
| 667 |
+
output_dict[name] = feature
|
| 668 |
+
|
| 669 |
+
if self.output_mode == "concat":
|
| 670 |
+
self.concat = TFDConcat(axis=-1)
|
| 671 |
+
return self.concat(features_to_concat)
|
| 672 |
+
else:
|
| 673 |
+
return output_dict
|
| 674 |
+
|
| 675 |
+
def _check_if_adapted(self):
|
| 676 |
+
if not self._is_adapted:
|
| 677 |
+
if not self._list_adaptable_preprocessors():
|
| 678 |
+
self._is_adapted = True
|
| 679 |
+
else:
|
| 680 |
+
raise ValueError(
|
| 681 |
+
"You need to call `.adapt(dataset)` on the FeatureSpace "
|
| 682 |
+
"before you can start using it."
|
| 683 |
+
)
|
| 684 |
+
|
| 685 |
+
def _check_if_built(self):
|
| 686 |
+
if not self._sublayers_built:
|
| 687 |
+
self._check_if_adapted()
|
| 688 |
+
# Finishes building
|
| 689 |
+
self.get_encoded_features()
|
| 690 |
+
self._sublayers_built = True
|
| 691 |
+
|
| 692 |
+
def _convert_input(self, x):
|
| 693 |
+
if not isinstance(x, (tf.Tensor, tf.SparseTensor, tf.RaggedTensor)):
|
| 694 |
+
if not isinstance(x, (list, tuple, int, float)):
|
| 695 |
+
x = backend.convert_to_numpy(x)
|
| 696 |
+
x = tf.convert_to_tensor(x)
|
| 697 |
+
return x
|
| 698 |
+
|
| 699 |
+
def __call__(self, data):
|
| 700 |
+
self._check_if_built()
|
| 701 |
+
if not isinstance(data, dict):
|
| 702 |
+
raise ValueError(
|
| 703 |
+
"A FeatureSpace can only be called with a dict. "
|
| 704 |
+
f"Received: data={data} (of type {type(data)}"
|
| 705 |
+
)
|
| 706 |
+
|
| 707 |
+
# Many preprocessing layers support all backends but many do not.
|
| 708 |
+
# Switch to TF to make FeatureSpace work universally.
|
| 709 |
+
data = {key: self._convert_input(value) for key, value in data.items()}
|
| 710 |
+
rebatched = False
|
| 711 |
+
for name, x in data.items():
|
| 712 |
+
if len(x.shape) == 0:
|
| 713 |
+
data[name] = tf.reshape(x, (1, 1))
|
| 714 |
+
rebatched = True
|
| 715 |
+
elif len(x.shape) == 1:
|
| 716 |
+
data[name] = tf.expand_dims(x, -1)
|
| 717 |
+
|
| 718 |
+
with backend_utils.TFGraphScope():
|
| 719 |
+
# This scope is to make sure that inner TFDataLayers
|
| 720 |
+
# will not convert outputs back to backend-native --
|
| 721 |
+
# they should be TF tensors throughout
|
| 722 |
+
preprocessed_data = self._preprocess_features(data)
|
| 723 |
+
preprocessed_data = tree.map_structure(
|
| 724 |
+
lambda x: self._convert_input(x), preprocessed_data
|
| 725 |
+
)
|
| 726 |
+
|
| 727 |
+
crossed_data = self._cross_features(preprocessed_data)
|
| 728 |
+
crossed_data = tree.map_structure(
|
| 729 |
+
lambda x: self._convert_input(x), crossed_data
|
| 730 |
+
)
|
| 731 |
+
|
| 732 |
+
merged_data = self._merge_features(preprocessed_data, crossed_data)
|
| 733 |
+
|
| 734 |
+
if rebatched:
|
| 735 |
+
if self.output_mode == "concat":
|
| 736 |
+
assert merged_data.shape[0] == 1
|
| 737 |
+
if (
|
| 738 |
+
backend.backend() != "tensorflow"
|
| 739 |
+
and not backend_utils.in_tf_graph()
|
| 740 |
+
):
|
| 741 |
+
merged_data = backend.convert_to_numpy(merged_data)
|
| 742 |
+
merged_data = tf.squeeze(merged_data, axis=0)
|
| 743 |
+
else:
|
| 744 |
+
for name, x in merged_data.items():
|
| 745 |
+
if len(x.shape) == 2 and x.shape[0] == 1:
|
| 746 |
+
merged_data[name] = tf.squeeze(x, axis=0)
|
| 747 |
+
|
| 748 |
+
if (
|
| 749 |
+
backend.backend() != "tensorflow"
|
| 750 |
+
and not backend_utils.in_tf_graph()
|
| 751 |
+
):
|
| 752 |
+
merged_data = tree.map_structure(
|
| 753 |
+
lambda x: backend.convert_to_tensor(x, dtype=x.dtype),
|
| 754 |
+
merged_data,
|
| 755 |
+
)
|
| 756 |
+
return merged_data
|
| 757 |
+
|
| 758 |
+
def get_config(self):
|
| 759 |
+
return {
|
| 760 |
+
"features": serialization_lib.serialize_keras_object(self.features),
|
| 761 |
+
"output_mode": self.output_mode,
|
| 762 |
+
"crosses": serialization_lib.serialize_keras_object(self.crosses),
|
| 763 |
+
"crossing_dim": self.crossing_dim,
|
| 764 |
+
"hashing_dim": self.hashing_dim,
|
| 765 |
+
"num_discretization_bins": self.num_discretization_bins,
|
| 766 |
+
}
|
| 767 |
+
|
| 768 |
+
@classmethod
|
| 769 |
+
def from_config(cls, config):
|
| 770 |
+
return cls(**config)
|
| 771 |
+
|
| 772 |
+
def get_build_config(self):
|
| 773 |
+
return {
|
| 774 |
+
name: feature.preprocessor.get_build_config()
|
| 775 |
+
for name, feature in self.features.items()
|
| 776 |
+
}
|
| 777 |
+
|
| 778 |
+
def build_from_config(self, config):
|
| 779 |
+
for name in config.keys():
|
| 780 |
+
preprocessor = self.features[name].preprocessor
|
| 781 |
+
if not preprocessor.built:
|
| 782 |
+
preprocessor.build_from_config(config[name])
|
| 783 |
+
self._is_adapted = True
|
| 784 |
+
|
| 785 |
+
def save(self, filepath):
|
| 786 |
+
"""Save the `FeatureSpace` instance to a `.keras` file.
|
| 787 |
+
|
| 788 |
+
You can reload it via `keras.models.load_model()`:
|
| 789 |
+
|
| 790 |
+
```python
|
| 791 |
+
feature_space.save("featurespace.keras")
|
| 792 |
+
reloaded_fs = keras.models.load_model("featurespace.keras")
|
| 793 |
+
```
|
| 794 |
+
"""
|
| 795 |
+
saving_lib.save_model(self, filepath)
|
| 796 |
+
|
| 797 |
+
def save_own_variables(self, store):
|
| 798 |
+
return
|
| 799 |
+
|
| 800 |
+
def load_own_variables(self, store):
|
| 801 |
+
return
|
| 802 |
+
|
| 803 |
+
|
| 804 |
+
class TFDConcat(TFDataLayer):
|
| 805 |
+
def __init__(self, axis, **kwargs):
|
| 806 |
+
super().__init__(**kwargs)
|
| 807 |
+
self.axis = axis
|
| 808 |
+
|
| 809 |
+
def call(self, xs):
|
| 810 |
+
return self.backend.numpy.concatenate(xs, axis=self.axis)
|
| 811 |
+
|
| 812 |
+
|
| 813 |
+
class TFDIdentity(TFDataLayer):
|
| 814 |
+
def call(self, x):
|
| 815 |
+
return x
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/hashed_crossing.py
ADDED
|
@@ -0,0 +1,227 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.layers.layer import Layer
|
| 4 |
+
from keras.src.utils import argument_validation
|
| 5 |
+
from keras.src.utils import backend_utils
|
| 6 |
+
from keras.src.utils import numerical_utils
|
| 7 |
+
from keras.src.utils import tf_utils
|
| 8 |
+
from keras.src.utils.module_utils import tensorflow as tf
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@keras_export("keras.layers.HashedCrossing")
|
| 12 |
+
class HashedCrossing(Layer):
|
| 13 |
+
"""A preprocessing layer which crosses features using the "hashing trick".
|
| 14 |
+
|
| 15 |
+
This layer performs crosses of categorical features using the "hashing
|
| 16 |
+
trick". Conceptually, the transformation can be thought of as:
|
| 17 |
+
`hash(concatenate(features)) % num_bins`.
|
| 18 |
+
|
| 19 |
+
This layer currently only performs crosses of scalar inputs and batches of
|
| 20 |
+
scalar inputs. Valid input shapes are `(batch_size, 1)`, `(batch_size,)` and
|
| 21 |
+
`()`.
|
| 22 |
+
|
| 23 |
+
**Note:** This layer wraps `tf.keras.layers.HashedCrossing`. It cannot
|
| 24 |
+
be used as part of the compiled computation graph of a model with
|
| 25 |
+
any backend other than TensorFlow.
|
| 26 |
+
It can however be used with any backend when running eagerly.
|
| 27 |
+
It can also always be used as part of an input preprocessing pipeline
|
| 28 |
+
with any backend (outside the model itself), which is how we recommend
|
| 29 |
+
to use this layer.
|
| 30 |
+
|
| 31 |
+
**Note:** This layer is safe to use inside a `tf.data` pipeline
|
| 32 |
+
(independently of which backend you're using).
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
num_bins: Number of hash bins.
|
| 36 |
+
output_mode: Specification for the output of the layer. Values can be
|
| 37 |
+
`"int"`, or `"one_hot"` configuring the layer as follows:
|
| 38 |
+
- `"int"`: Return the integer bin indices directly.
|
| 39 |
+
- `"one_hot"`: Encodes each individual element in the input into an
|
| 40 |
+
array the same size as `num_bins`, containing a 1 at the input's
|
| 41 |
+
bin index. Defaults to `"int"`.
|
| 42 |
+
sparse: Boolean. Only applicable to `"one_hot"` mode and only valid
|
| 43 |
+
when using the TensorFlow backend. If `True`, returns
|
| 44 |
+
a `SparseTensor` instead of a dense `Tensor`. Defaults to `False`.
|
| 45 |
+
**kwargs: Keyword arguments to construct a layer.
|
| 46 |
+
|
| 47 |
+
Examples:
|
| 48 |
+
|
| 49 |
+
**Crossing two scalar features.**
|
| 50 |
+
|
| 51 |
+
>>> layer = keras.layers.HashedCrossing(
|
| 52 |
+
... num_bins=5)
|
| 53 |
+
>>> feat1 = np.array(['A', 'B', 'A', 'B', 'A'])
|
| 54 |
+
>>> feat2 = np.array([101, 101, 101, 102, 102])
|
| 55 |
+
>>> layer((feat1, feat2))
|
| 56 |
+
array([1, 4, 1, 1, 3])
|
| 57 |
+
|
| 58 |
+
**Crossing and one-hotting two scalar features.**
|
| 59 |
+
|
| 60 |
+
>>> layer = keras.layers.HashedCrossing(
|
| 61 |
+
... num_bins=5, output_mode='one_hot')
|
| 62 |
+
>>> feat1 = np.array(['A', 'B', 'A', 'B', 'A'])
|
| 63 |
+
>>> feat2 = np.array([101, 101, 101, 102, 102])
|
| 64 |
+
>>> layer((feat1, feat2))
|
| 65 |
+
array([[0., 1., 0., 0., 0.],
|
| 66 |
+
[0., 0., 0., 0., 1.],
|
| 67 |
+
[0., 1., 0., 0., 0.],
|
| 68 |
+
[0., 1., 0., 0., 0.],
|
| 69 |
+
[0., 0., 0., 1., 0.]], dtype=float32)
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
def __init__(
|
| 73 |
+
self,
|
| 74 |
+
num_bins,
|
| 75 |
+
output_mode="int",
|
| 76 |
+
sparse=False,
|
| 77 |
+
name=None,
|
| 78 |
+
dtype=None,
|
| 79 |
+
**kwargs,
|
| 80 |
+
):
|
| 81 |
+
if not tf.available:
|
| 82 |
+
raise ImportError(
|
| 83 |
+
"Layer HashedCrossing requires TensorFlow. "
|
| 84 |
+
"Install it via `pip install tensorflow`."
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
if output_mode == "int" and dtype is None:
|
| 88 |
+
dtype = "int64"
|
| 89 |
+
|
| 90 |
+
super().__init__(name=name, dtype=dtype)
|
| 91 |
+
if sparse and backend.backend() != "tensorflow":
|
| 92 |
+
raise ValueError(
|
| 93 |
+
"`sparse=True` can only be used with the " "TensorFlow backend."
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
argument_validation.validate_string_arg(
|
| 97 |
+
output_mode,
|
| 98 |
+
allowable_strings=("int", "one_hot"),
|
| 99 |
+
caller_name=self.__class__.__name__,
|
| 100 |
+
arg_name="output_mode",
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
self.num_bins = num_bins
|
| 104 |
+
self.output_mode = output_mode
|
| 105 |
+
self.sparse = sparse
|
| 106 |
+
self._allow_non_tensor_positional_args = True
|
| 107 |
+
self._convert_input_args = False
|
| 108 |
+
self.supports_jit = False
|
| 109 |
+
|
| 110 |
+
def compute_output_shape(self, input_shape):
|
| 111 |
+
if (
|
| 112 |
+
not len(input_shape) == 2
|
| 113 |
+
or not isinstance(input_shape[0], tuple)
|
| 114 |
+
or not isinstance(input_shape[1], tuple)
|
| 115 |
+
):
|
| 116 |
+
raise ValueError(
|
| 117 |
+
"Expected as input a list/tuple of 2 tensors. "
|
| 118 |
+
f"Received input_shape={input_shape}"
|
| 119 |
+
)
|
| 120 |
+
if input_shape[0][-1] != input_shape[1][-1]:
|
| 121 |
+
raise ValueError(
|
| 122 |
+
"Expected the two input tensors to have identical shapes. "
|
| 123 |
+
f"Received input_shape={input_shape}"
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
if not input_shape:
|
| 127 |
+
if self.output_mode == "int":
|
| 128 |
+
return ()
|
| 129 |
+
return (self.num_bins,)
|
| 130 |
+
if self.output_mode == "int":
|
| 131 |
+
return input_shape[0]
|
| 132 |
+
|
| 133 |
+
if self.output_mode == "one_hot" and input_shape[0][-1] != 1:
|
| 134 |
+
return tuple(input_shape[0]) + (self.num_bins,)
|
| 135 |
+
|
| 136 |
+
return tuple(input_shape[0])[:-1] + (self.num_bins,)
|
| 137 |
+
|
| 138 |
+
def call(self, inputs):
|
| 139 |
+
from keras.src.backend import tensorflow as tf_backend
|
| 140 |
+
|
| 141 |
+
self._check_at_least_two_inputs(inputs)
|
| 142 |
+
inputs = [tf_utils.ensure_tensor(x) for x in inputs]
|
| 143 |
+
self._check_input_shape_and_type(inputs)
|
| 144 |
+
|
| 145 |
+
# Uprank to rank 2 for the cross_hashed op.
|
| 146 |
+
rank = len(inputs[0].shape)
|
| 147 |
+
if rank < 2:
|
| 148 |
+
inputs = [tf_backend.numpy.expand_dims(x, -1) for x in inputs]
|
| 149 |
+
if rank < 1:
|
| 150 |
+
inputs = [tf_backend.numpy.expand_dims(x, -1) for x in inputs]
|
| 151 |
+
|
| 152 |
+
# Perform the cross and convert to dense
|
| 153 |
+
outputs = tf.sparse.cross_hashed(inputs, self.num_bins)
|
| 154 |
+
outputs = tf.sparse.to_dense(outputs)
|
| 155 |
+
|
| 156 |
+
# Fix output shape and downrank to match input rank.
|
| 157 |
+
if rank == 2:
|
| 158 |
+
# tf.sparse.cross_hashed output shape will always be None on the
|
| 159 |
+
# last dimension. Given our input shape restrictions, we want to
|
| 160 |
+
# force shape 1 instead.
|
| 161 |
+
outputs = tf.reshape(outputs, [-1, 1])
|
| 162 |
+
elif rank == 1:
|
| 163 |
+
outputs = tf.reshape(outputs, [-1])
|
| 164 |
+
elif rank == 0:
|
| 165 |
+
outputs = tf.reshape(outputs, [])
|
| 166 |
+
|
| 167 |
+
# Encode outputs.
|
| 168 |
+
outputs = numerical_utils.encode_categorical_inputs(
|
| 169 |
+
outputs,
|
| 170 |
+
output_mode=self.output_mode,
|
| 171 |
+
depth=self.num_bins,
|
| 172 |
+
sparse=self.sparse,
|
| 173 |
+
dtype=self.compute_dtype,
|
| 174 |
+
backend_module=tf_backend,
|
| 175 |
+
)
|
| 176 |
+
return backend_utils.convert_tf_tensor(outputs, dtype=self.dtype)
|
| 177 |
+
|
| 178 |
+
def get_config(self):
|
| 179 |
+
return {
|
| 180 |
+
"num_bins": self.num_bins,
|
| 181 |
+
"output_mode": self.output_mode,
|
| 182 |
+
"sparse": self.sparse,
|
| 183 |
+
"name": self.name,
|
| 184 |
+
"dtype": self.dtype,
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
def _check_at_least_two_inputs(self, inputs):
|
| 188 |
+
if not isinstance(inputs, (list, tuple)):
|
| 189 |
+
raise ValueError(
|
| 190 |
+
"`HashedCrossing` should be called on a list or tuple of "
|
| 191 |
+
f"inputs. Received: inputs={inputs}"
|
| 192 |
+
)
|
| 193 |
+
if len(inputs) < 2:
|
| 194 |
+
raise ValueError(
|
| 195 |
+
"`HashedCrossing` should be called on at least two inputs. "
|
| 196 |
+
f"Received: inputs={inputs}"
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
def _check_input_shape_and_type(self, inputs):
|
| 200 |
+
first_shape = tuple(inputs[0].shape)
|
| 201 |
+
rank = len(first_shape)
|
| 202 |
+
if rank > 2 or (rank == 2 and first_shape[-1] != 1):
|
| 203 |
+
raise ValueError(
|
| 204 |
+
"All `HashedCrossing` inputs should have shape `()`, "
|
| 205 |
+
"`(batch_size)` or `(batch_size, 1)`. "
|
| 206 |
+
f"Received: inputs={inputs}"
|
| 207 |
+
)
|
| 208 |
+
if not all(tuple(x.shape) == first_shape for x in inputs[1:]):
|
| 209 |
+
raise ValueError(
|
| 210 |
+
"All `HashedCrossing` inputs should have equal shape. "
|
| 211 |
+
f"Received: inputs={inputs}"
|
| 212 |
+
)
|
| 213 |
+
if any(
|
| 214 |
+
isinstance(x, (tf.RaggedTensor, tf.SparseTensor)) for x in inputs
|
| 215 |
+
):
|
| 216 |
+
raise ValueError(
|
| 217 |
+
"All `HashedCrossing` inputs should be dense tensors. "
|
| 218 |
+
f"Received: inputs={inputs}"
|
| 219 |
+
)
|
| 220 |
+
if not all(
|
| 221 |
+
tf.as_dtype(x.dtype).is_integer or x.dtype == tf.string
|
| 222 |
+
for x in inputs
|
| 223 |
+
):
|
| 224 |
+
raise ValueError(
|
| 225 |
+
"All `HashedCrossing` inputs should have an integer or "
|
| 226 |
+
f"string dtype. Received: inputs={inputs}"
|
| 227 |
+
)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/hashing.py
ADDED
|
@@ -0,0 +1,287 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.layers.layer import Layer
|
| 4 |
+
from keras.src.utils import backend_utils
|
| 5 |
+
from keras.src.utils import numerical_utils
|
| 6 |
+
from keras.src.utils import tf_utils
|
| 7 |
+
from keras.src.utils.module_utils import tensorflow as tf
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@keras_export("keras.layers.Hashing")
|
| 11 |
+
class Hashing(Layer):
|
| 12 |
+
"""A preprocessing layer which hashes and bins categorical features.
|
| 13 |
+
|
| 14 |
+
This layer transforms categorical inputs to hashed output. It element-wise
|
| 15 |
+
converts a ints or strings to ints in a fixed range. The stable hash
|
| 16 |
+
function uses `tensorflow::ops::Fingerprint` to produce the same output
|
| 17 |
+
consistently across all platforms.
|
| 18 |
+
|
| 19 |
+
This layer uses [FarmHash64](https://github.com/google/farmhash) by default,
|
| 20 |
+
which provides a consistent hashed output across different platforms and is
|
| 21 |
+
stable across invocations, regardless of device and context, by mixing the
|
| 22 |
+
input bits thoroughly.
|
| 23 |
+
|
| 24 |
+
If you want to obfuscate the hashed output, you can also pass a random
|
| 25 |
+
`salt` argument in the constructor. In that case, the layer will use the
|
| 26 |
+
[SipHash64](https://github.com/google/highwayhash) hash function, with
|
| 27 |
+
the `salt` value serving as additional input to the hash function.
|
| 28 |
+
|
| 29 |
+
**Note:** This layer internally uses TensorFlow. It cannot
|
| 30 |
+
be used as part of the compiled computation graph of a model with
|
| 31 |
+
any backend other than TensorFlow.
|
| 32 |
+
It can however be used with any backend when running eagerly.
|
| 33 |
+
It can also always be used as part of an input preprocessing pipeline
|
| 34 |
+
with any backend (outside the model itself), which is how we recommend
|
| 35 |
+
to use this layer.
|
| 36 |
+
|
| 37 |
+
**Note:** This layer is safe to use inside a `tf.data` pipeline
|
| 38 |
+
(independently of which backend you're using).
|
| 39 |
+
|
| 40 |
+
**Example (FarmHash64)**
|
| 41 |
+
|
| 42 |
+
>>> layer = keras.layers.Hashing(num_bins=3)
|
| 43 |
+
>>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]
|
| 44 |
+
>>> layer(inp)
|
| 45 |
+
array([[1],
|
| 46 |
+
[0],
|
| 47 |
+
[1],
|
| 48 |
+
[1],
|
| 49 |
+
[2]])>
|
| 50 |
+
|
| 51 |
+
**Example (FarmHash64) with a mask value**
|
| 52 |
+
|
| 53 |
+
>>> layer = keras.layers.Hashing(num_bins=3, mask_value='')
|
| 54 |
+
>>> inp = [['A'], ['B'], [''], ['C'], ['D']]
|
| 55 |
+
>>> layer(inp)
|
| 56 |
+
array([[1],
|
| 57 |
+
[1],
|
| 58 |
+
[0],
|
| 59 |
+
[2],
|
| 60 |
+
[2]])
|
| 61 |
+
|
| 62 |
+
**Example (SipHash64)**
|
| 63 |
+
|
| 64 |
+
>>> layer = keras.layers.Hashing(num_bins=3, salt=[133, 137])
|
| 65 |
+
>>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]
|
| 66 |
+
>>> layer(inp)
|
| 67 |
+
array([[1],
|
| 68 |
+
[2],
|
| 69 |
+
[1],
|
| 70 |
+
[0],
|
| 71 |
+
[2]])
|
| 72 |
+
|
| 73 |
+
**Example (Siphash64 with a single integer, same as `salt=[133, 133]`)**
|
| 74 |
+
|
| 75 |
+
>>> layer = keras.layers.Hashing(num_bins=3, salt=133)
|
| 76 |
+
>>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]
|
| 77 |
+
>>> layer(inp)
|
| 78 |
+
array([[0],
|
| 79 |
+
[0],
|
| 80 |
+
[2],
|
| 81 |
+
[1],
|
| 82 |
+
[0]])
|
| 83 |
+
|
| 84 |
+
Args:
|
| 85 |
+
num_bins: Number of hash bins. Note that this includes the `mask_value`
|
| 86 |
+
bin, so the effective number of bins is `(num_bins - 1)`
|
| 87 |
+
if `mask_value` is set.
|
| 88 |
+
mask_value: A value that represents masked inputs, which are mapped to
|
| 89 |
+
index 0. `None` means no mask term will be added and the
|
| 90 |
+
hashing will start at index 0. Defaults to `None`.
|
| 91 |
+
salt: A single unsigned integer or None.
|
| 92 |
+
If passed, the hash function used will be SipHash64,
|
| 93 |
+
with these values used as an additional input
|
| 94 |
+
(known as a "salt" in cryptography).
|
| 95 |
+
These should be non-zero. If `None`, uses the FarmHash64 hash
|
| 96 |
+
function. It also supports tuple/list of 2 unsigned
|
| 97 |
+
integer numbers, see reference paper for details.
|
| 98 |
+
Defaults to `None`.
|
| 99 |
+
output_mode: Specification for the output of the layer. Values can be
|
| 100 |
+
`"int"`, `"one_hot"`, `"multi_hot"`, or
|
| 101 |
+
`"count"` configuring the layer as follows:
|
| 102 |
+
- `"int"`: Return the integer bin indices directly.
|
| 103 |
+
- `"one_hot"`: Encodes each individual element in the input into an
|
| 104 |
+
array the same size as `num_bins`, containing a 1
|
| 105 |
+
at the input's bin index. If the last dimension is size 1,
|
| 106 |
+
will encode on that dimension.
|
| 107 |
+
If the last dimension is not size 1, will append a new
|
| 108 |
+
dimension for the encoded output.
|
| 109 |
+
- `"multi_hot"`: Encodes each sample in the input into a
|
| 110 |
+
single array the same size as `num_bins`,
|
| 111 |
+
containing a 1 for each bin index
|
| 112 |
+
index present in the sample. Treats the last dimension
|
| 113 |
+
as the sample dimension, if input shape is
|
| 114 |
+
`(..., sample_length)`, output shape will be
|
| 115 |
+
`(..., num_tokens)`.
|
| 116 |
+
- `"count"`: As `"multi_hot"`, but the int array contains a count of
|
| 117 |
+
the number of times the bin index appeared in the sample.
|
| 118 |
+
Defaults to `"int"`.
|
| 119 |
+
sparse: Boolean. Only applicable to `"one_hot"`, `"multi_hot"`,
|
| 120 |
+
and `"count"` output modes. Only supported with TensorFlow
|
| 121 |
+
backend. If `True`, returns a `SparseTensor` instead of
|
| 122 |
+
a dense `Tensor`. Defaults to `False`.
|
| 123 |
+
**kwargs: Keyword arguments to construct a layer.
|
| 124 |
+
|
| 125 |
+
Input shape:
|
| 126 |
+
A single string, a list of strings, or an `int32` or `int64` tensor
|
| 127 |
+
of shape `(batch_size, ...,)`.
|
| 128 |
+
|
| 129 |
+
Output shape:
|
| 130 |
+
An `int32` tensor of shape `(batch_size, ...)`.
|
| 131 |
+
|
| 132 |
+
Reference:
|
| 133 |
+
|
| 134 |
+
- [SipHash with salt](https://www.131002.net/siphash/siphash.pdf)
|
| 135 |
+
"""
|
| 136 |
+
|
| 137 |
+
def __init__(
|
| 138 |
+
self,
|
| 139 |
+
num_bins,
|
| 140 |
+
mask_value=None,
|
| 141 |
+
salt=None,
|
| 142 |
+
output_mode="int",
|
| 143 |
+
sparse=False,
|
| 144 |
+
**kwargs,
|
| 145 |
+
):
|
| 146 |
+
if not tf.available:
|
| 147 |
+
raise ImportError(
|
| 148 |
+
"Layer Hashing requires TensorFlow. "
|
| 149 |
+
"Install it via `pip install tensorflow`."
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
# By default, output int32 when output_mode='int' and floats otherwise.
|
| 153 |
+
if "dtype" not in kwargs or kwargs["dtype"] is None:
|
| 154 |
+
kwargs["dtype"] = (
|
| 155 |
+
"int64" if output_mode == "int" else backend.floatx()
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
super().__init__(**kwargs)
|
| 159 |
+
|
| 160 |
+
if num_bins is None or num_bins <= 0:
|
| 161 |
+
raise ValueError(
|
| 162 |
+
"The `num_bins` for `Hashing` cannot be `None` or "
|
| 163 |
+
f"non-positive values. Received: num_bins={num_bins}."
|
| 164 |
+
)
|
| 165 |
+
|
| 166 |
+
if output_mode == "int" and (
|
| 167 |
+
self.dtype_policy.name not in ("int32", "int64")
|
| 168 |
+
):
|
| 169 |
+
raise ValueError(
|
| 170 |
+
'When `output_mode="int"`, `dtype` should be an integer '
|
| 171 |
+
f"type, 'int32' or 'in64'. Received: dtype={kwargs['dtype']}"
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
# 'output_mode' must be one of (INT, ONE_HOT, MULTI_HOT, COUNT)
|
| 175 |
+
accepted_output_modes = ("int", "one_hot", "multi_hot", "count")
|
| 176 |
+
if output_mode not in accepted_output_modes:
|
| 177 |
+
raise ValueError(
|
| 178 |
+
"Invalid value for argument `output_mode`. "
|
| 179 |
+
f"Expected one of {accepted_output_modes}. "
|
| 180 |
+
f"Received: output_mode={output_mode}"
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
if sparse and output_mode == "int":
|
| 184 |
+
raise ValueError(
|
| 185 |
+
"`sparse` may only be true if `output_mode` is "
|
| 186 |
+
'`"one_hot"`, `"multi_hot"`, or `"count"`. '
|
| 187 |
+
f"Received: sparse={sparse} and "
|
| 188 |
+
f"output_mode={output_mode}"
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
self.num_bins = num_bins
|
| 192 |
+
self.mask_value = mask_value
|
| 193 |
+
self.strong_hash = True if salt is not None else False
|
| 194 |
+
self.output_mode = output_mode
|
| 195 |
+
self.sparse = sparse
|
| 196 |
+
self.salt = None
|
| 197 |
+
if salt is not None:
|
| 198 |
+
if isinstance(salt, (tuple, list)) and len(salt) == 2:
|
| 199 |
+
self.salt = list(salt)
|
| 200 |
+
elif isinstance(salt, int):
|
| 201 |
+
self.salt = [salt, salt]
|
| 202 |
+
else:
|
| 203 |
+
raise ValueError(
|
| 204 |
+
"The `salt` argument for `Hashing` can only be a tuple of "
|
| 205 |
+
"size 2 integers, or a single integer. "
|
| 206 |
+
f"Received: salt={salt}."
|
| 207 |
+
)
|
| 208 |
+
self._convert_input_args = False
|
| 209 |
+
self._allow_non_tensor_positional_args = True
|
| 210 |
+
self.supports_jit = False
|
| 211 |
+
|
| 212 |
+
def call(self, inputs):
|
| 213 |
+
from keras.src.backend import tensorflow as tf_backend
|
| 214 |
+
|
| 215 |
+
inputs = tf_utils.ensure_tensor(inputs)
|
| 216 |
+
if self.output_mode == "one_hot" and inputs.shape[-1] == 1:
|
| 217 |
+
# One hot only unpranks if the final dimension is not 1.
|
| 218 |
+
inputs = tf_backend.numpy.squeeze(inputs, axis=-1)
|
| 219 |
+
if isinstance(inputs, tf.SparseTensor):
|
| 220 |
+
indices = tf.SparseTensor(
|
| 221 |
+
indices=inputs.indices,
|
| 222 |
+
values=self._hash_values_to_bins(inputs.values),
|
| 223 |
+
dense_shape=inputs.dense_shape,
|
| 224 |
+
)
|
| 225 |
+
else:
|
| 226 |
+
indices = self._hash_values_to_bins(inputs)
|
| 227 |
+
outputs = numerical_utils.encode_categorical_inputs(
|
| 228 |
+
indices,
|
| 229 |
+
output_mode=self.output_mode,
|
| 230 |
+
depth=self.num_bins,
|
| 231 |
+
sparse=self.sparse,
|
| 232 |
+
dtype=self.dtype,
|
| 233 |
+
backend_module=tf_backend,
|
| 234 |
+
)
|
| 235 |
+
return backend_utils.convert_tf_tensor(outputs)
|
| 236 |
+
|
| 237 |
+
def _hash_values_to_bins(self, values):
|
| 238 |
+
"""Converts a non-sparse tensor of values to bin indices."""
|
| 239 |
+
hash_bins = self.num_bins
|
| 240 |
+
mask = None
|
| 241 |
+
# If mask_value is set, the zeroth bin is reserved for it.
|
| 242 |
+
if self.mask_value is not None and hash_bins > 1:
|
| 243 |
+
hash_bins -= 1
|
| 244 |
+
mask = tf.equal(values, self.mask_value)
|
| 245 |
+
# Convert all values to strings before hashing.
|
| 246 |
+
# Floats are first normalized to int64.
|
| 247 |
+
if values.dtype.is_floating:
|
| 248 |
+
values = tf.cast(values, dtype="int64")
|
| 249 |
+
if values.dtype != tf.string:
|
| 250 |
+
values = tf.as_string(values)
|
| 251 |
+
# Hash the strings.
|
| 252 |
+
if self.strong_hash:
|
| 253 |
+
values = tf.strings.to_hash_bucket_strong(
|
| 254 |
+
values, hash_bins, name="hash", key=self.salt
|
| 255 |
+
)
|
| 256 |
+
else:
|
| 257 |
+
values = tf.strings.to_hash_bucket_fast(
|
| 258 |
+
values, hash_bins, name="hash"
|
| 259 |
+
)
|
| 260 |
+
if mask is not None:
|
| 261 |
+
values = tf.add(values, tf.ones_like(values))
|
| 262 |
+
values = tf.where(mask, tf.zeros_like(values), values)
|
| 263 |
+
return values
|
| 264 |
+
|
| 265 |
+
def compute_output_spec(self, inputs):
|
| 266 |
+
if self.output_mode == "int":
|
| 267 |
+
return backend.KerasTensor(shape=inputs.shape, dtype=self.dtype)
|
| 268 |
+
if len(inputs.shape) >= 1:
|
| 269 |
+
base_shape = tuple(inputs.shape)[:-1]
|
| 270 |
+
else:
|
| 271 |
+
base_shape = ()
|
| 272 |
+
return backend.KerasTensor(
|
| 273 |
+
shape=base_shape + (self.num_bins,), dtype=self.dtype
|
| 274 |
+
)
|
| 275 |
+
|
| 276 |
+
def get_config(self):
|
| 277 |
+
config = super().get_config()
|
| 278 |
+
config.update(
|
| 279 |
+
{
|
| 280 |
+
"num_bins": self.num_bins,
|
| 281 |
+
"salt": self.salt,
|
| 282 |
+
"mask_value": self.mask_value,
|
| 283 |
+
"output_mode": self.output_mode,
|
| 284 |
+
"sparse": self.sparse,
|
| 285 |
+
}
|
| 286 |
+
)
|
| 287 |
+
return config
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_contrast.cpython-310.pyc
ADDED
|
Binary file (5.34 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_crop.cpython-310.pyc
ADDED
|
Binary file (6.99 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_flip.cpython-310.pyc
ADDED
|
Binary file (6.55 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_grayscale.cpython-310.pyc
ADDED
|
Binary file (4.83 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_hue.cpython-310.pyc
ADDED
|
Binary file (5.44 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_posterization.cpython-310.pyc
ADDED
|
Binary file (4.57 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_rotation.cpython-310.pyc
ADDED
|
Binary file (7.88 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_saturation.cpython-310.pyc
ADDED
|
Binary file (5.33 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_sharpness.cpython-310.pyc
ADDED
|
Binary file (5.42 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_shear.cpython-310.pyc
ADDED
|
Binary file (10.7 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_translation.cpython-310.pyc
ADDED
|
Binary file (11.5 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_zoom.cpython-310.pyc
ADDED
|
Binary file (12.1 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/resizing.cpython-310.pyc
ADDED
|
Binary file (9 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/solarization.cpython-310.pyc
ADDED
|
Binary file (6.71 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/auto_contrast.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
|
| 4 |
+
BaseImagePreprocessingLayer,
|
| 5 |
+
)
|
| 6 |
+
from keras.src.ops.core import _saturate_cast
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@keras_export("keras.layers.AutoContrast")
|
| 10 |
+
class AutoContrast(BaseImagePreprocessingLayer):
|
| 11 |
+
"""Performs the auto-contrast operation on an image.
|
| 12 |
+
|
| 13 |
+
Auto contrast stretches the values of an image across the entire available
|
| 14 |
+
`value_range`. This makes differences between pixels more obvious. An
|
| 15 |
+
example of this is if an image only has values `[0, 1]` out of the range
|
| 16 |
+
`[0, 255]`, auto contrast will change the `1` values to be `255`.
|
| 17 |
+
|
| 18 |
+
This layer is active at both training and inference time.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
value_range: Range of values the incoming images will have.
|
| 22 |
+
Represented as a two number tuple written `(low, high)`.
|
| 23 |
+
This is typically either `(0, 1)` or `(0, 255)` depending
|
| 24 |
+
on how your preprocessing pipeline is set up.
|
| 25 |
+
Defaults to `(0, 255)`.
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
_USE_BASE_FACTOR = False
|
| 29 |
+
_VALUE_RANGE_VALIDATION_ERROR = (
|
| 30 |
+
"The `value_range` argument should be a list of two numbers. "
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
def __init__(
|
| 34 |
+
self,
|
| 35 |
+
value_range=(0, 255),
|
| 36 |
+
**kwargs,
|
| 37 |
+
):
|
| 38 |
+
super().__init__(**kwargs)
|
| 39 |
+
self._set_value_range(value_range)
|
| 40 |
+
|
| 41 |
+
def _set_value_range(self, value_range):
|
| 42 |
+
if not isinstance(value_range, (tuple, list)):
|
| 43 |
+
raise ValueError(
|
| 44 |
+
self._VALUE_RANGE_VALIDATION_ERROR
|
| 45 |
+
+ f"Received: value_range={value_range}"
|
| 46 |
+
)
|
| 47 |
+
if len(value_range) != 2:
|
| 48 |
+
raise ValueError(
|
| 49 |
+
self._VALUE_RANGE_VALIDATION_ERROR
|
| 50 |
+
+ f"Received: value_range={value_range}"
|
| 51 |
+
)
|
| 52 |
+
self.value_range = sorted(value_range)
|
| 53 |
+
|
| 54 |
+
def transform_images(self, images, transformation=None, training=True):
|
| 55 |
+
original_images = images
|
| 56 |
+
images = self._transform_value_range(
|
| 57 |
+
images,
|
| 58 |
+
original_range=self.value_range,
|
| 59 |
+
target_range=(0, 255),
|
| 60 |
+
dtype=self.compute_dtype,
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
images = self.backend.cast(images, self.compute_dtype)
|
| 64 |
+
low = self.backend.numpy.min(images, axis=(1, 2), keepdims=True)
|
| 65 |
+
high = self.backend.numpy.max(images, axis=(1, 2), keepdims=True)
|
| 66 |
+
scale = 255.0 / (high - low)
|
| 67 |
+
offset = -low * scale
|
| 68 |
+
|
| 69 |
+
images = images * scale + offset
|
| 70 |
+
results = self.backend.numpy.clip(images, 0.0, 255.0)
|
| 71 |
+
results = self._transform_value_range(
|
| 72 |
+
results,
|
| 73 |
+
original_range=(0, 255),
|
| 74 |
+
target_range=self.value_range,
|
| 75 |
+
dtype=self.compute_dtype,
|
| 76 |
+
)
|
| 77 |
+
# don't process NaN channels
|
| 78 |
+
results = self.backend.numpy.where(
|
| 79 |
+
self.backend.numpy.isnan(results), original_images, results
|
| 80 |
+
)
|
| 81 |
+
if results.dtype == images.dtype:
|
| 82 |
+
return results
|
| 83 |
+
if backend.is_int_dtype(images.dtype):
|
| 84 |
+
results = self.backend.numpy.round(results)
|
| 85 |
+
return _saturate_cast(results, images.dtype, self.backend)
|
| 86 |
+
|
| 87 |
+
def transform_labels(self, labels, transformation, training=True):
|
| 88 |
+
return labels
|
| 89 |
+
|
| 90 |
+
def transform_bounding_boxes(
|
| 91 |
+
self,
|
| 92 |
+
bounding_boxes,
|
| 93 |
+
transformation,
|
| 94 |
+
training=True,
|
| 95 |
+
):
|
| 96 |
+
return bounding_boxes
|
| 97 |
+
|
| 98 |
+
def transform_segmentation_masks(
|
| 99 |
+
self, segmentation_masks, transformation, training=True
|
| 100 |
+
):
|
| 101 |
+
return segmentation_masks
|
| 102 |
+
|
| 103 |
+
def get_config(self):
|
| 104 |
+
config = super().get_config()
|
| 105 |
+
config.update({"value_range": self.value_range})
|
| 106 |
+
return config
|
| 107 |
+
|
| 108 |
+
def compute_output_shape(self, input_shape):
|
| 109 |
+
return input_shape
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/base_image_preprocessing_layer.py
ADDED
|
@@ -0,0 +1,385 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
from keras.src.backend import config as backend_config
|
| 4 |
+
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.validation import ( # noqa: E501
|
| 5 |
+
densify_bounding_boxes,
|
| 6 |
+
)
|
| 7 |
+
from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class BaseImagePreprocessingLayer(TFDataLayer):
|
| 11 |
+
_USE_BASE_FACTOR = True
|
| 12 |
+
_FACTOR_BOUNDS = (-1, 1)
|
| 13 |
+
|
| 14 |
+
def __init__(
|
| 15 |
+
self, factor=None, bounding_box_format=None, data_format=None, **kwargs
|
| 16 |
+
):
|
| 17 |
+
super().__init__(**kwargs)
|
| 18 |
+
self.bounding_box_format = bounding_box_format
|
| 19 |
+
self.data_format = backend_config.standardize_data_format(data_format)
|
| 20 |
+
if self._USE_BASE_FACTOR:
|
| 21 |
+
factor = factor or 0.0
|
| 22 |
+
self._set_factor(factor)
|
| 23 |
+
elif factor is not None:
|
| 24 |
+
raise ValueError(
|
| 25 |
+
f"Layer {self.__class__.__name__} does not take "
|
| 26 |
+
f"a `factor` argument. Received: factor={factor}"
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
def _set_factor(self, factor):
|
| 30 |
+
error_msg = (
|
| 31 |
+
"The `factor` argument should be a number "
|
| 32 |
+
"(or a list of two numbers) "
|
| 33 |
+
"in the range "
|
| 34 |
+
f"[{self._FACTOR_BOUNDS[0]}, {self._FACTOR_BOUNDS[1]}]. "
|
| 35 |
+
f"Received: factor={factor}"
|
| 36 |
+
)
|
| 37 |
+
if isinstance(factor, (tuple, list)):
|
| 38 |
+
if len(factor) != 2:
|
| 39 |
+
raise ValueError(error_msg)
|
| 40 |
+
if (
|
| 41 |
+
factor[0] > self._FACTOR_BOUNDS[1]
|
| 42 |
+
or factor[1] < self._FACTOR_BOUNDS[0]
|
| 43 |
+
):
|
| 44 |
+
raise ValueError(error_msg)
|
| 45 |
+
lower, upper = sorted(factor)
|
| 46 |
+
elif isinstance(factor, (int, float)):
|
| 47 |
+
if (
|
| 48 |
+
factor < self._FACTOR_BOUNDS[0]
|
| 49 |
+
or factor > self._FACTOR_BOUNDS[1]
|
| 50 |
+
):
|
| 51 |
+
raise ValueError(error_msg)
|
| 52 |
+
factor = abs(factor)
|
| 53 |
+
lower, upper = [max(-factor, self._FACTOR_BOUNDS[0]), factor]
|
| 54 |
+
else:
|
| 55 |
+
raise ValueError(error_msg)
|
| 56 |
+
self.factor = lower, upper
|
| 57 |
+
|
| 58 |
+
def get_random_transformation(self, data, training=True, seed=None):
|
| 59 |
+
return None
|
| 60 |
+
|
| 61 |
+
def transform_images(self, images, transformation, training=True):
|
| 62 |
+
raise NotImplementedError()
|
| 63 |
+
|
| 64 |
+
def transform_labels(self, labels, transformation, training=True):
|
| 65 |
+
raise NotImplementedError()
|
| 66 |
+
|
| 67 |
+
def transform_bounding_boxes(
|
| 68 |
+
self,
|
| 69 |
+
bounding_boxes,
|
| 70 |
+
transformation,
|
| 71 |
+
training=True,
|
| 72 |
+
):
|
| 73 |
+
raise NotImplementedError()
|
| 74 |
+
|
| 75 |
+
def transform_segmentation_masks(
|
| 76 |
+
self, segmentation_masks, transformation, training=True
|
| 77 |
+
):
|
| 78 |
+
raise NotImplementedError()
|
| 79 |
+
|
| 80 |
+
def transform_single_image(self, image, transformation, training=True):
|
| 81 |
+
images = self.backend.numpy.expand_dims(image, axis=0)
|
| 82 |
+
outputs = self.transform_images(
|
| 83 |
+
images, transformation=transformation, training=training
|
| 84 |
+
)
|
| 85 |
+
return self.backend.numpy.squeeze(outputs, axis=0)
|
| 86 |
+
|
| 87 |
+
def transform_single_label(self, label, transformation, training=True):
|
| 88 |
+
labels = self.backend.numpy.expand_dims(label, axis=0)
|
| 89 |
+
outputs = self.transform_labels(
|
| 90 |
+
labels, transformation=transformation, training=training
|
| 91 |
+
)
|
| 92 |
+
return self.backend.numpy.squeeze(outputs, axis=0)
|
| 93 |
+
|
| 94 |
+
def transform_single_bounding_box(
|
| 95 |
+
self,
|
| 96 |
+
bounding_box,
|
| 97 |
+
transformation,
|
| 98 |
+
training=True,
|
| 99 |
+
):
|
| 100 |
+
bounding_boxes = self._format_single_input_bounding_box(bounding_box)
|
| 101 |
+
outputs = self.transform_bounding_boxes(
|
| 102 |
+
bounding_boxes,
|
| 103 |
+
transformation=transformation,
|
| 104 |
+
training=training,
|
| 105 |
+
)
|
| 106 |
+
bounding_box = self._format_single_output_bounding_box(outputs)
|
| 107 |
+
return bounding_box
|
| 108 |
+
|
| 109 |
+
def transform_single_segmentation_mask(
|
| 110 |
+
self, segmentation_mask, transformation, training=True
|
| 111 |
+
):
|
| 112 |
+
segmentation_masks = self.backend.numpy.expand_dims(
|
| 113 |
+
segmentation_mask, axis=0
|
| 114 |
+
)
|
| 115 |
+
outputs = self.transform_segmentation_masks(
|
| 116 |
+
segmentation_masks, transformation=transformation, training=training
|
| 117 |
+
)
|
| 118 |
+
return self.backend.numpy.squeeze(outputs, axis=0)
|
| 119 |
+
|
| 120 |
+
def _is_batched(self, maybe_image_batch):
|
| 121 |
+
shape = self.backend.core.shape(maybe_image_batch)
|
| 122 |
+
if len(shape) == 3:
|
| 123 |
+
return False
|
| 124 |
+
if len(shape) == 4:
|
| 125 |
+
return True
|
| 126 |
+
raise ValueError(
|
| 127 |
+
"Expected image tensor to have rank 3 (single image) "
|
| 128 |
+
f"or 4 (batch of images). Received: data.shape={shape}"
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
def call(self, data, training=True):
|
| 132 |
+
transformation = self.get_random_transformation(data, training=training)
|
| 133 |
+
if isinstance(data, dict):
|
| 134 |
+
is_batched = self._is_batched(data["images"])
|
| 135 |
+
if is_batched:
|
| 136 |
+
data["images"] = self.transform_images(
|
| 137 |
+
self.backend.convert_to_tensor(data["images"]),
|
| 138 |
+
transformation=transformation,
|
| 139 |
+
training=training,
|
| 140 |
+
)
|
| 141 |
+
else:
|
| 142 |
+
data["images"] = self.transform_single_image(
|
| 143 |
+
self.backend.convert_to_tensor(data["images"]),
|
| 144 |
+
transformation=transformation,
|
| 145 |
+
training=training,
|
| 146 |
+
)
|
| 147 |
+
if "bounding_boxes" in data:
|
| 148 |
+
if not self.bounding_box_format:
|
| 149 |
+
raise ValueError(
|
| 150 |
+
"You passed an input with a 'bounding_boxes' key, "
|
| 151 |
+
"but you didn't specify a bounding box format. "
|
| 152 |
+
"Pass a `bounding_box_format` argument to your "
|
| 153 |
+
f"{self.__class__.__name__} layer, e.g. "
|
| 154 |
+
"`bounding_box_format='xyxy'`."
|
| 155 |
+
)
|
| 156 |
+
bounding_boxes = densify_bounding_boxes(
|
| 157 |
+
data["bounding_boxes"],
|
| 158 |
+
is_batched=is_batched,
|
| 159 |
+
backend=self.backend,
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
if is_batched:
|
| 163 |
+
data["bounding_boxes"] = self.transform_bounding_boxes(
|
| 164 |
+
bounding_boxes,
|
| 165 |
+
transformation=transformation,
|
| 166 |
+
training=training,
|
| 167 |
+
)
|
| 168 |
+
else:
|
| 169 |
+
data["bounding_boxes"] = self.transform_single_bounding_box(
|
| 170 |
+
bounding_boxes,
|
| 171 |
+
transformation=transformation,
|
| 172 |
+
training=training,
|
| 173 |
+
)
|
| 174 |
+
if "labels" in data:
|
| 175 |
+
if is_batched:
|
| 176 |
+
data["labels"] = self.transform_labels(
|
| 177 |
+
self.backend.convert_to_tensor(data["labels"]),
|
| 178 |
+
transformation=transformation,
|
| 179 |
+
training=training,
|
| 180 |
+
)
|
| 181 |
+
else:
|
| 182 |
+
data["labels"] = self.transform_single_label(
|
| 183 |
+
self.backend.convert_to_tensor(data["labels"]),
|
| 184 |
+
transformation=transformation,
|
| 185 |
+
training=training,
|
| 186 |
+
)
|
| 187 |
+
if "segmentation_masks" in data:
|
| 188 |
+
if is_batched:
|
| 189 |
+
data["segmentation_masks"] = (
|
| 190 |
+
self.transform_segmentation_masks(
|
| 191 |
+
data["segmentation_masks"],
|
| 192 |
+
transformation=transformation,
|
| 193 |
+
training=training,
|
| 194 |
+
)
|
| 195 |
+
)
|
| 196 |
+
else:
|
| 197 |
+
data["segmentation_masks"] = (
|
| 198 |
+
self.transform_single_segmentation_mask(
|
| 199 |
+
data["segmentation_masks"],
|
| 200 |
+
transformation=transformation,
|
| 201 |
+
training=training,
|
| 202 |
+
)
|
| 203 |
+
)
|
| 204 |
+
return data
|
| 205 |
+
|
| 206 |
+
# `data` is just images.
|
| 207 |
+
if self._is_batched(data):
|
| 208 |
+
return self.transform_images(
|
| 209 |
+
self.backend.convert_to_tensor(data),
|
| 210 |
+
transformation=transformation,
|
| 211 |
+
training=training,
|
| 212 |
+
)
|
| 213 |
+
return self.transform_single_image(
|
| 214 |
+
self.backend.convert_to_tensor(data),
|
| 215 |
+
transformation=transformation,
|
| 216 |
+
training=training,
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
def _format_single_input_bounding_box(self, bounding_box):
|
| 220 |
+
for key in bounding_box:
|
| 221 |
+
if key == "labels":
|
| 222 |
+
bounding_box[key] = self.backend.numpy.expand_dims(
|
| 223 |
+
bounding_box[key], axis=0
|
| 224 |
+
)
|
| 225 |
+
if key == "boxes":
|
| 226 |
+
bounding_box[key] = self.backend.numpy.expand_dims(
|
| 227 |
+
bounding_box[key], axis=0
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
return bounding_box
|
| 231 |
+
|
| 232 |
+
def _format_single_output_bounding_box(self, bounding_boxes):
|
| 233 |
+
for key in bounding_boxes:
|
| 234 |
+
if key == "labels":
|
| 235 |
+
bounding_boxes[key] = self.backend.numpy.squeeze(
|
| 236 |
+
bounding_boxes[key], axis=0
|
| 237 |
+
)
|
| 238 |
+
if key == "boxes":
|
| 239 |
+
bounding_boxes[key] = self.backend.numpy.squeeze(
|
| 240 |
+
bounding_boxes[key], axis=0
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
return bounding_boxes
|
| 244 |
+
|
| 245 |
+
def get_config(self):
|
| 246 |
+
config = super().get_config()
|
| 247 |
+
if self.bounding_box_format is not None:
|
| 248 |
+
config.update(
|
| 249 |
+
{
|
| 250 |
+
"bounding_box_format": self.bounding_box_format,
|
| 251 |
+
}
|
| 252 |
+
)
|
| 253 |
+
return config
|
| 254 |
+
|
| 255 |
+
def _transform_value_range(
|
| 256 |
+
self, images, original_range, target_range, dtype="float32"
|
| 257 |
+
):
|
| 258 |
+
"""Convert input values from `original_range` to `target_range`.
|
| 259 |
+
|
| 260 |
+
This function is intended to be used in preprocessing layers that
|
| 261 |
+
rely upon color values. This allows us to assume internally that
|
| 262 |
+
the input tensor is always in the range `(0, 255)`.
|
| 263 |
+
|
| 264 |
+
Args:
|
| 265 |
+
images: the set of images to transform to the target range.
|
| 266 |
+
original_range: the value range to transform from.
|
| 267 |
+
target_range: the value range to transform to.
|
| 268 |
+
dtype: the dtype to compute the conversion with,
|
| 269 |
+
defaults to "float32".
|
| 270 |
+
|
| 271 |
+
Returns:
|
| 272 |
+
a new Tensor with values in the target range.
|
| 273 |
+
|
| 274 |
+
Example:
|
| 275 |
+
|
| 276 |
+
```python
|
| 277 |
+
original_range = [0, 1]
|
| 278 |
+
target_range = [0, 255]
|
| 279 |
+
images = layer.preprocessing.transform_value_range(
|
| 280 |
+
images,
|
| 281 |
+
original_range,
|
| 282 |
+
target_range
|
| 283 |
+
)
|
| 284 |
+
images = ops.minimum(images + 10, 255)
|
| 285 |
+
images = layer.preprocessing.transform_value_range(
|
| 286 |
+
images,
|
| 287 |
+
target_range,
|
| 288 |
+
original_range
|
| 289 |
+
)
|
| 290 |
+
```
|
| 291 |
+
"""
|
| 292 |
+
if (
|
| 293 |
+
original_range[0] == target_range[0]
|
| 294 |
+
and original_range[1] == target_range[1]
|
| 295 |
+
):
|
| 296 |
+
return images
|
| 297 |
+
|
| 298 |
+
images = self.backend.cast(images, dtype=dtype)
|
| 299 |
+
original_min_value, original_max_value = self._unwrap_value_range(
|
| 300 |
+
original_range, dtype=dtype
|
| 301 |
+
)
|
| 302 |
+
target_min_value, target_max_value = self._unwrap_value_range(
|
| 303 |
+
target_range, dtype=dtype
|
| 304 |
+
)
|
| 305 |
+
|
| 306 |
+
# images in the [0, 1] scale
|
| 307 |
+
images = (images - original_min_value) / (
|
| 308 |
+
original_max_value - original_min_value
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
scale_factor = target_max_value - target_min_value
|
| 312 |
+
return (images * scale_factor) + target_min_value
|
| 313 |
+
|
| 314 |
+
def _unwrap_value_range(self, value_range, dtype="float32"):
|
| 315 |
+
min_value, max_value = value_range
|
| 316 |
+
min_value = self.backend.cast(min_value, dtype=dtype)
|
| 317 |
+
max_value = self.backend.cast(max_value, dtype=dtype)
|
| 318 |
+
return min_value, max_value
|
| 319 |
+
|
| 320 |
+
def _compute_affine_matrix(
|
| 321 |
+
self,
|
| 322 |
+
center_x,
|
| 323 |
+
center_y,
|
| 324 |
+
angle,
|
| 325 |
+
translate_x,
|
| 326 |
+
translate_y,
|
| 327 |
+
scale,
|
| 328 |
+
shear_x,
|
| 329 |
+
shear_y,
|
| 330 |
+
height,
|
| 331 |
+
width,
|
| 332 |
+
):
|
| 333 |
+
"""
|
| 334 |
+
# Scaling Shear Rotation
|
| 335 |
+
# [sx 0 0] [1 shx 0] [cos(θ) -sin(θ) 0]
|
| 336 |
+
# M = [0 sy 0] * [shy 1 0] * [sin(θ) cos(θ) 0]
|
| 337 |
+
# [0 0 1] [0 0 1] [0 0 1]
|
| 338 |
+
|
| 339 |
+
# a0 = sx * (cos(θ) + shx * sin(θ))
|
| 340 |
+
# a1 = sx * (-sin(θ) + shx * cos(θ))
|
| 341 |
+
# a2 = tx + cx - cx * a0 - cy * a1
|
| 342 |
+
# b0 = sy * (shy * cos(θ) + sin(θ))
|
| 343 |
+
# b1 = sy * (shy * -sin(θ) + cos(θ))
|
| 344 |
+
# b2 = ty + cy - cx * b0 - cy * b1
|
| 345 |
+
"""
|
| 346 |
+
ops = self.backend
|
| 347 |
+
|
| 348 |
+
degree_to_radian_factor = ops.convert_to_tensor(math.pi / 180.0)
|
| 349 |
+
|
| 350 |
+
angle = angle * degree_to_radian_factor
|
| 351 |
+
shear_x = shear_x * degree_to_radian_factor
|
| 352 |
+
shear_y = shear_y * degree_to_radian_factor
|
| 353 |
+
|
| 354 |
+
batch_size = ops.shape(angle)[0]
|
| 355 |
+
dtype = angle.dtype
|
| 356 |
+
width = ops.cast(width, dtype)
|
| 357 |
+
height = ops.cast(height, dtype)
|
| 358 |
+
cx = center_x * (width - 1)
|
| 359 |
+
cy = center_y * (height - 1)
|
| 360 |
+
|
| 361 |
+
cos_theta = ops.numpy.cos(angle)
|
| 362 |
+
sin_theta = ops.numpy.sin(angle)
|
| 363 |
+
shear_x = ops.numpy.tan(shear_x)
|
| 364 |
+
shear_y = ops.numpy.tan(shear_y)
|
| 365 |
+
|
| 366 |
+
a0 = scale * (cos_theta + shear_x * sin_theta)
|
| 367 |
+
a1 = scale * (-sin_theta + shear_x * cos_theta)
|
| 368 |
+
a2 = translate_x + cx - cx * a0 - cy * a1
|
| 369 |
+
b0 = scale * (shear_y * cos_theta + sin_theta)
|
| 370 |
+
b1 = scale * (shear_y * -sin_theta + cos_theta)
|
| 371 |
+
b2 = translate_y + cy - cx * b0 - cy * b1
|
| 372 |
+
affine_matrix = ops.numpy.concatenate(
|
| 373 |
+
[
|
| 374 |
+
a0[:, None],
|
| 375 |
+
a1[:, None],
|
| 376 |
+
a2[:, None],
|
| 377 |
+
b0[:, None],
|
| 378 |
+
b1[:, None],
|
| 379 |
+
b2[:, None],
|
| 380 |
+
ops.numpy.zeros((batch_size, 2)),
|
| 381 |
+
],
|
| 382 |
+
axis=1,
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
return affine_matrix
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/center_crop.py
ADDED
|
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src.api_export import keras_export
|
| 2 |
+
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
|
| 3 |
+
BaseImagePreprocessingLayer,
|
| 4 |
+
)
|
| 5 |
+
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
|
| 6 |
+
clip_to_image_size,
|
| 7 |
+
)
|
| 8 |
+
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
|
| 9 |
+
convert_format,
|
| 10 |
+
)
|
| 11 |
+
from keras.src.utils import image_utils
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@keras_export("keras.layers.CenterCrop")
|
| 15 |
+
class CenterCrop(BaseImagePreprocessingLayer):
|
| 16 |
+
"""A preprocessing layer which crops images.
|
| 17 |
+
|
| 18 |
+
This layers crops the central portion of the images to a target size. If an
|
| 19 |
+
image is smaller than the target size, it will be resized and cropped
|
| 20 |
+
so as to return the largest possible window in the image that matches
|
| 21 |
+
the target aspect ratio.
|
| 22 |
+
|
| 23 |
+
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`).
|
| 24 |
+
|
| 25 |
+
Input shape:
|
| 26 |
+
3D (unbatched) or 4D (batched) tensor with shape:
|
| 27 |
+
`(..., height, width, channels)`, in `"channels_last"` format,
|
| 28 |
+
or `(..., channels, height, width)`, in `"channels_first"` format.
|
| 29 |
+
|
| 30 |
+
Output shape:
|
| 31 |
+
3D (unbatched) or 4D (batched) tensor with shape:
|
| 32 |
+
`(..., target_height, target_width, channels)`,
|
| 33 |
+
or `(..., channels, target_height, target_width)`,
|
| 34 |
+
in `"channels_first"` format.
|
| 35 |
+
|
| 36 |
+
If the input height/width is even and the target height/width is odd (or
|
| 37 |
+
inversely), the input image is left-padded by 1 pixel.
|
| 38 |
+
|
| 39 |
+
**Note:** This layer is safe to use inside a `tf.data` pipeline
|
| 40 |
+
(independently of which backend you're using).
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
height: Integer, the height of the output shape.
|
| 44 |
+
width: Integer, the width of the output shape.
|
| 45 |
+
data_format: string, either `"channels_last"` or `"channels_first"`.
|
| 46 |
+
The ordering of the dimensions in the inputs. `"channels_last"`
|
| 47 |
+
corresponds to inputs with shape `(batch, height, width, channels)`
|
| 48 |
+
while `"channels_first"` corresponds to inputs with shape
|
| 49 |
+
`(batch, channels, height, width)`. It defaults to the
|
| 50 |
+
`image_data_format` value found in your Keras config file at
|
| 51 |
+
`~/.keras/keras.json`. If you never set it, then it will be
|
| 52 |
+
`"channels_last"`.
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
_USE_BASE_FACTOR = False
|
| 56 |
+
|
| 57 |
+
def __init__(self, height, width, data_format=None, **kwargs):
|
| 58 |
+
super().__init__(data_format=data_format, **kwargs)
|
| 59 |
+
self.height = height
|
| 60 |
+
self.width = width
|
| 61 |
+
|
| 62 |
+
def get_random_transformation(self, data, training=True, seed=None):
|
| 63 |
+
if isinstance(data, dict):
|
| 64 |
+
images = data["images"]
|
| 65 |
+
else:
|
| 66 |
+
images = data
|
| 67 |
+
shape = self.backend.core.shape(images)
|
| 68 |
+
return {"input_shape": shape}
|
| 69 |
+
|
| 70 |
+
def transform_labels(self, labels, transformation, training=True):
|
| 71 |
+
return labels
|
| 72 |
+
|
| 73 |
+
def transform_bounding_boxes(
|
| 74 |
+
self, bounding_boxes, transformation, training=True
|
| 75 |
+
):
|
| 76 |
+
def _get_height_width(input_shape):
|
| 77 |
+
if self.data_format == "channels_first":
|
| 78 |
+
input_height = input_shape[-2]
|
| 79 |
+
input_width = input_shape[-1]
|
| 80 |
+
else:
|
| 81 |
+
input_height = input_shape[-3]
|
| 82 |
+
input_width = input_shape[-2]
|
| 83 |
+
return input_height, input_width
|
| 84 |
+
|
| 85 |
+
def _get_clipped_bbox(bounding_boxes, h_end, h_start, w_end, w_start):
|
| 86 |
+
bboxes = bounding_boxes["boxes"]
|
| 87 |
+
x1, y1, x2, y2 = self.backend.numpy.split(bboxes, 4, axis=-1)
|
| 88 |
+
x1 = self.backend.numpy.clip(x1, w_start, w_end) - w_start
|
| 89 |
+
y1 = self.backend.numpy.clip(y1, h_start, h_end) - h_start
|
| 90 |
+
x2 = self.backend.numpy.clip(x2, w_start, w_end) - w_start
|
| 91 |
+
y2 = self.backend.numpy.clip(y2, h_start, h_end) - h_start
|
| 92 |
+
bounding_boxes["boxes"] = self.backend.numpy.concatenate(
|
| 93 |
+
[x1, y1, x2, y2], axis=-1
|
| 94 |
+
)
|
| 95 |
+
return bounding_boxes
|
| 96 |
+
|
| 97 |
+
input_shape = transformation["input_shape"]
|
| 98 |
+
|
| 99 |
+
init_height, init_width = _get_height_width(input_shape)
|
| 100 |
+
|
| 101 |
+
bounding_boxes = convert_format(
|
| 102 |
+
bounding_boxes,
|
| 103 |
+
source=self.bounding_box_format,
|
| 104 |
+
target="xyxy",
|
| 105 |
+
height=init_height,
|
| 106 |
+
width=init_width,
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
h_diff = init_height - self.height
|
| 110 |
+
w_diff = init_width - self.width
|
| 111 |
+
|
| 112 |
+
if h_diff >= 0 and w_diff >= 0:
|
| 113 |
+
h_start = int(h_diff / 2)
|
| 114 |
+
w_start = int(w_diff / 2)
|
| 115 |
+
|
| 116 |
+
h_end = h_start + self.height
|
| 117 |
+
w_end = w_start + self.width
|
| 118 |
+
|
| 119 |
+
bounding_boxes = _get_clipped_bbox(
|
| 120 |
+
bounding_boxes, h_end, h_start, w_end, w_start
|
| 121 |
+
)
|
| 122 |
+
else:
|
| 123 |
+
width = init_width
|
| 124 |
+
height = init_height
|
| 125 |
+
target_height = self.height
|
| 126 |
+
target_width = self.width
|
| 127 |
+
|
| 128 |
+
crop_height = int(float(width * target_height) / target_width)
|
| 129 |
+
crop_height = max(min(height, crop_height), 1)
|
| 130 |
+
crop_width = int(float(height * target_width) / target_height)
|
| 131 |
+
crop_width = max(min(width, crop_width), 1)
|
| 132 |
+
crop_box_hstart = int(float(height - crop_height) / 2)
|
| 133 |
+
crop_box_wstart = int(float(width - crop_width) / 2)
|
| 134 |
+
|
| 135 |
+
h_start = crop_box_hstart
|
| 136 |
+
w_start = crop_box_wstart
|
| 137 |
+
|
| 138 |
+
h_end = crop_box_hstart + crop_height
|
| 139 |
+
w_end = crop_box_wstart + crop_width
|
| 140 |
+
bounding_boxes = _get_clipped_bbox(
|
| 141 |
+
bounding_boxes, h_end, h_start, w_end, w_start
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
bounding_boxes = convert_format(
|
| 145 |
+
bounding_boxes,
|
| 146 |
+
source="xyxy",
|
| 147 |
+
target="rel_xyxy",
|
| 148 |
+
height=crop_height,
|
| 149 |
+
width=crop_width,
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
bounding_boxes = convert_format(
|
| 153 |
+
bounding_boxes,
|
| 154 |
+
source="rel_xyxy",
|
| 155 |
+
target="xyxy",
|
| 156 |
+
height=self.height,
|
| 157 |
+
width=self.width,
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
bounding_boxes = clip_to_image_size(
|
| 161 |
+
bounding_boxes=bounding_boxes,
|
| 162 |
+
height=self.height,
|
| 163 |
+
width=self.width,
|
| 164 |
+
bounding_box_format="xyxy",
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
bounding_boxes = convert_format(
|
| 168 |
+
bounding_boxes,
|
| 169 |
+
source="xyxy",
|
| 170 |
+
target=self.bounding_box_format,
|
| 171 |
+
height=self.height,
|
| 172 |
+
width=self.width,
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
return bounding_boxes
|
| 176 |
+
|
| 177 |
+
def transform_segmentation_masks(
|
| 178 |
+
self, segmentation_masks, transformation, training=True
|
| 179 |
+
):
|
| 180 |
+
return self.transform_images(
|
| 181 |
+
segmentation_masks, transformation, training=training
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
def transform_images(self, images, transformation=None, training=True):
|
| 185 |
+
inputs = self.backend.cast(images, self.compute_dtype)
|
| 186 |
+
if self.data_format == "channels_first":
|
| 187 |
+
init_height = inputs.shape[-2]
|
| 188 |
+
init_width = inputs.shape[-1]
|
| 189 |
+
else:
|
| 190 |
+
init_height = inputs.shape[-3]
|
| 191 |
+
init_width = inputs.shape[-2]
|
| 192 |
+
|
| 193 |
+
if init_height is None or init_width is None:
|
| 194 |
+
# Dynamic size case. TODO.
|
| 195 |
+
raise ValueError(
|
| 196 |
+
"At this time, CenterCrop can only "
|
| 197 |
+
"process images with a static spatial "
|
| 198 |
+
f"shape. Received: inputs.shape={inputs.shape}"
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
h_diff = init_height - self.height
|
| 202 |
+
w_diff = init_width - self.width
|
| 203 |
+
|
| 204 |
+
h_start = int(h_diff / 2)
|
| 205 |
+
w_start = int(w_diff / 2)
|
| 206 |
+
|
| 207 |
+
if h_diff >= 0 and w_diff >= 0:
|
| 208 |
+
if len(inputs.shape) == 4:
|
| 209 |
+
if self.data_format == "channels_first":
|
| 210 |
+
return inputs[
|
| 211 |
+
:,
|
| 212 |
+
:,
|
| 213 |
+
h_start : h_start + self.height,
|
| 214 |
+
w_start : w_start + self.width,
|
| 215 |
+
]
|
| 216 |
+
return inputs[
|
| 217 |
+
:,
|
| 218 |
+
h_start : h_start + self.height,
|
| 219 |
+
w_start : w_start + self.width,
|
| 220 |
+
:,
|
| 221 |
+
]
|
| 222 |
+
elif len(inputs.shape) == 3:
|
| 223 |
+
if self.data_format == "channels_first":
|
| 224 |
+
return inputs[
|
| 225 |
+
:,
|
| 226 |
+
h_start : h_start + self.height,
|
| 227 |
+
w_start : w_start + self.width,
|
| 228 |
+
]
|
| 229 |
+
return inputs[
|
| 230 |
+
h_start : h_start + self.height,
|
| 231 |
+
w_start : w_start + self.width,
|
| 232 |
+
:,
|
| 233 |
+
]
|
| 234 |
+
return image_utils.smart_resize(
|
| 235 |
+
inputs,
|
| 236 |
+
[self.height, self.width],
|
| 237 |
+
data_format=self.data_format,
|
| 238 |
+
backend_module=self.backend,
|
| 239 |
+
)
|
| 240 |
+
|
| 241 |
+
def compute_output_shape(self, input_shape):
|
| 242 |
+
input_shape = list(input_shape)
|
| 243 |
+
if isinstance(input_shape[0], (list, tuple)) or len(
|
| 244 |
+
input_shape
|
| 245 |
+
) not in (3, 4):
|
| 246 |
+
raise ValueError(
|
| 247 |
+
"`input_shape` must be a non-nested tuple or list "
|
| 248 |
+
"of rank-1 with size 3 (unbatched) or 4 (batched). "
|
| 249 |
+
)
|
| 250 |
+
if len(input_shape) == 4:
|
| 251 |
+
if self.data_format == "channels_last":
|
| 252 |
+
input_shape[1] = self.height
|
| 253 |
+
input_shape[2] = self.width
|
| 254 |
+
else:
|
| 255 |
+
input_shape[2] = self.height
|
| 256 |
+
input_shape[3] = self.width
|
| 257 |
+
else:
|
| 258 |
+
if self.data_format == "channels_last":
|
| 259 |
+
input_shape[0] = self.height
|
| 260 |
+
input_shape[1] = self.width
|
| 261 |
+
else:
|
| 262 |
+
input_shape[1] = self.height
|
| 263 |
+
input_shape[2] = self.width
|
| 264 |
+
return tuple(input_shape)
|
| 265 |
+
|
| 266 |
+
def get_config(self):
|
| 267 |
+
base_config = super().get_config()
|
| 268 |
+
config = {
|
| 269 |
+
"height": self.height,
|
| 270 |
+
"width": self.width,
|
| 271 |
+
"data_format": self.data_format,
|
| 272 |
+
}
|
| 273 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/equalization.py
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
|
| 4 |
+
BaseImagePreprocessingLayer,
|
| 5 |
+
)
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@keras_export("keras.layers.Equalization")
|
| 9 |
+
class Equalization(BaseImagePreprocessingLayer):
|
| 10 |
+
"""Preprocessing layer for histogram equalization on image channels.
|
| 11 |
+
|
| 12 |
+
Histogram equalization is a technique to adjust image intensities to
|
| 13 |
+
enhance contrast by effectively spreading out the most frequent
|
| 14 |
+
intensity values. This layer applies equalization on a channel-wise
|
| 15 |
+
basis, which can improve the visibility of details in images.
|
| 16 |
+
|
| 17 |
+
This layer works with both grayscale and color images, performing
|
| 18 |
+
equalization independently on each color channel. At inference time,
|
| 19 |
+
the equalization is consistently applied.
|
| 20 |
+
|
| 21 |
+
**Note:** This layer is safe to use inside a `tf.data` pipeline
|
| 22 |
+
(independently of which backend you're using).
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
value_range: Optional list/tuple of 2 floats specifying the lower
|
| 26 |
+
and upper limits of the input data values. Defaults to `[0, 255]`.
|
| 27 |
+
If the input image has been scaled, use the appropriate range
|
| 28 |
+
(e.g., `[0.0, 1.0]`). The equalization will be scaled to this
|
| 29 |
+
range, and output values will be clipped accordingly.
|
| 30 |
+
bins: Integer specifying the number of histogram bins to use for
|
| 31 |
+
equalization. Defaults to 256, which is suitable for 8-bit images.
|
| 32 |
+
Larger values can provide more granular intensity redistribution.
|
| 33 |
+
|
| 34 |
+
Input shape:
|
| 35 |
+
3D (unbatched) or 4D (batched) tensor with shape:
|
| 36 |
+
`(..., height, width, channels)`, in `"channels_last"` format,
|
| 37 |
+
or `(..., channels, height, width)`, in `"channels_first"` format.
|
| 38 |
+
|
| 39 |
+
Output shape:
|
| 40 |
+
3D (unbatched) or 4D (batched) tensor with shape:
|
| 41 |
+
`(..., target_height, target_width, channels)`,
|
| 42 |
+
or `(..., channels, target_height, target_width)`,
|
| 43 |
+
in `"channels_first"` format.
|
| 44 |
+
|
| 45 |
+
Example:
|
| 46 |
+
|
| 47 |
+
```python
|
| 48 |
+
# Create an equalization layer for standard 8-bit images
|
| 49 |
+
equalizer = keras.layers.Equalization()
|
| 50 |
+
|
| 51 |
+
# An image with uneven intensity distribution
|
| 52 |
+
image = [...] # your input image
|
| 53 |
+
|
| 54 |
+
# Apply histogram equalization
|
| 55 |
+
equalized_image = equalizer(image)
|
| 56 |
+
|
| 57 |
+
# For images with custom value range
|
| 58 |
+
custom_equalizer = keras.layers.Equalization(
|
| 59 |
+
value_range=[0.0, 1.0], # for normalized images
|
| 60 |
+
bins=128 # fewer bins for more subtle equalization
|
| 61 |
+
)
|
| 62 |
+
custom_equalized = custom_equalizer(normalized_image)
|
| 63 |
+
```
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
def __init__(
|
| 67 |
+
self, value_range=(0, 255), bins=256, data_format=None, **kwargs
|
| 68 |
+
):
|
| 69 |
+
super().__init__(**kwargs)
|
| 70 |
+
self.bins = bins
|
| 71 |
+
self._set_value_range(value_range)
|
| 72 |
+
self.data_format = backend.standardize_data_format(data_format)
|
| 73 |
+
|
| 74 |
+
def _set_value_range(self, value_range):
|
| 75 |
+
if not isinstance(value_range, (tuple, list)):
|
| 76 |
+
raise ValueError(
|
| 77 |
+
self._VALUE_RANGE_VALIDATION_ERROR
|
| 78 |
+
+ f"Received: value_range={value_range}"
|
| 79 |
+
)
|
| 80 |
+
if len(value_range) != 2:
|
| 81 |
+
raise ValueError(
|
| 82 |
+
self._VALUE_RANGE_VALIDATION_ERROR
|
| 83 |
+
+ f"Received: value_range={value_range}"
|
| 84 |
+
)
|
| 85 |
+
self.value_range = sorted(value_range)
|
| 86 |
+
|
| 87 |
+
def _custom_histogram_fixed_width(self, values, value_range, nbins):
|
| 88 |
+
values = self.backend.cast(values, "float32")
|
| 89 |
+
value_min, value_max = value_range
|
| 90 |
+
value_min = self.backend.cast(value_min, "float32")
|
| 91 |
+
value_max = self.backend.cast(value_max, "float32")
|
| 92 |
+
|
| 93 |
+
scaled = (values - value_min) * (nbins - 1) / (value_max - value_min)
|
| 94 |
+
indices = self.backend.cast(scaled, "int32")
|
| 95 |
+
indices = self.backend.numpy.clip(indices, 0, nbins - 1)
|
| 96 |
+
flat_indices = self.backend.numpy.reshape(indices, [-1])
|
| 97 |
+
|
| 98 |
+
if backend.backend() == "jax":
|
| 99 |
+
# for JAX bincount is never jittable because of output shape
|
| 100 |
+
histogram = self.backend.numpy.zeros(nbins, dtype="int32")
|
| 101 |
+
for i in range(nbins):
|
| 102 |
+
matches = self.backend.cast(
|
| 103 |
+
self.backend.numpy.equal(flat_indices, i), "int32"
|
| 104 |
+
)
|
| 105 |
+
bin_count = self.backend.numpy.sum(matches)
|
| 106 |
+
one_hot = self.backend.cast(
|
| 107 |
+
self.backend.numpy.arange(nbins) == i, "int32"
|
| 108 |
+
)
|
| 109 |
+
histogram = histogram + (bin_count * one_hot)
|
| 110 |
+
return histogram
|
| 111 |
+
else:
|
| 112 |
+
# TensorFlow/PyTorch/NumPy implementation using bincount
|
| 113 |
+
return self.backend.numpy.bincount(
|
| 114 |
+
flat_indices,
|
| 115 |
+
minlength=nbins,
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
def _scale_values(self, values, source_range, target_range):
|
| 119 |
+
source_min, source_max = source_range
|
| 120 |
+
target_min, target_max = target_range
|
| 121 |
+
scale = (target_max - target_min) / (source_max - source_min)
|
| 122 |
+
offset = target_min - source_min * scale
|
| 123 |
+
return values * scale + offset
|
| 124 |
+
|
| 125 |
+
def _equalize_channel(self, channel, value_range):
|
| 126 |
+
if value_range != (0, 255):
|
| 127 |
+
channel = self._scale_values(channel, value_range, (0, 255))
|
| 128 |
+
|
| 129 |
+
hist = self._custom_histogram_fixed_width(
|
| 130 |
+
channel, value_range=(0, 255), nbins=self.bins
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
nonzero_bins = self.backend.numpy.count_nonzero(hist)
|
| 134 |
+
equalized = self.backend.numpy.where(
|
| 135 |
+
nonzero_bins <= 1, channel, self._apply_equalization(channel, hist)
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
if value_range != (0, 255):
|
| 139 |
+
equalized = self._scale_values(equalized, (0, 255), value_range)
|
| 140 |
+
|
| 141 |
+
return equalized
|
| 142 |
+
|
| 143 |
+
def _apply_equalization(self, channel, hist):
|
| 144 |
+
cdf = self.backend.numpy.cumsum(hist)
|
| 145 |
+
|
| 146 |
+
if self.backend.name == "jax":
|
| 147 |
+
mask = cdf > 0
|
| 148 |
+
first_nonzero_idx = self.backend.numpy.argmax(mask)
|
| 149 |
+
cdf_min = self.backend.numpy.take(cdf, first_nonzero_idx)
|
| 150 |
+
else:
|
| 151 |
+
cdf_min = self.backend.numpy.take(
|
| 152 |
+
cdf, self.backend.numpy.nonzero(cdf)[0][0]
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
denominator = cdf[-1] - cdf_min
|
| 156 |
+
denominator = self.backend.numpy.where(
|
| 157 |
+
denominator == 0,
|
| 158 |
+
self.backend.numpy.ones_like(1, dtype=denominator.dtype),
|
| 159 |
+
denominator,
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
lookup_table = ((cdf - cdf_min) * 255) / denominator
|
| 163 |
+
lookup_table = self.backend.numpy.clip(
|
| 164 |
+
self.backend.numpy.round(lookup_table), 0, 255
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
scaled_channel = (channel / 255.0) * (self.bins - 1)
|
| 168 |
+
indices = self.backend.cast(
|
| 169 |
+
self.backend.numpy.clip(scaled_channel, 0, self.bins - 1), "int32"
|
| 170 |
+
)
|
| 171 |
+
return self.backend.numpy.take(lookup_table, indices)
|
| 172 |
+
|
| 173 |
+
def transform_images(self, images, transformation, training=True):
|
| 174 |
+
if training:
|
| 175 |
+
images = self.backend.cast(images, self.compute_dtype)
|
| 176 |
+
|
| 177 |
+
if self.data_format == "channels_first":
|
| 178 |
+
channels = []
|
| 179 |
+
for i in range(self.backend.core.shape(images)[-3]):
|
| 180 |
+
channel = images[..., i, :, :]
|
| 181 |
+
equalized = self._equalize_channel(
|
| 182 |
+
channel, self.value_range
|
| 183 |
+
)
|
| 184 |
+
channels.append(equalized)
|
| 185 |
+
equalized_images = self.backend.numpy.stack(channels, axis=-3)
|
| 186 |
+
else:
|
| 187 |
+
channels = []
|
| 188 |
+
for i in range(self.backend.core.shape(images)[-1]):
|
| 189 |
+
channel = images[..., i]
|
| 190 |
+
equalized = self._equalize_channel(
|
| 191 |
+
channel, self.value_range
|
| 192 |
+
)
|
| 193 |
+
channels.append(equalized)
|
| 194 |
+
equalized_images = self.backend.numpy.stack(channels, axis=-1)
|
| 195 |
+
|
| 196 |
+
return self.backend.cast(equalized_images, self.compute_dtype)
|
| 197 |
+
return images
|
| 198 |
+
|
| 199 |
+
def compute_output_shape(self, input_shape):
|
| 200 |
+
return input_shape
|
| 201 |
+
|
| 202 |
+
def compute_output_spec(self, inputs, **kwargs):
|
| 203 |
+
return inputs
|
| 204 |
+
|
| 205 |
+
def transform_bounding_boxes(
|
| 206 |
+
self,
|
| 207 |
+
bounding_boxes,
|
| 208 |
+
transformation,
|
| 209 |
+
training=True,
|
| 210 |
+
):
|
| 211 |
+
return bounding_boxes
|
| 212 |
+
|
| 213 |
+
def transform_labels(self, labels, transformation, training=True):
|
| 214 |
+
return labels
|
| 215 |
+
|
| 216 |
+
def transform_segmentation_masks(
|
| 217 |
+
self, segmentation_masks, transformation, training=True
|
| 218 |
+
):
|
| 219 |
+
return segmentation_masks
|
| 220 |
+
|
| 221 |
+
def get_config(self):
|
| 222 |
+
config = super().get_config()
|
| 223 |
+
config.update({"bins": self.bins, "value_range": self.value_range})
|
| 224 |
+
return config
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/max_num_bounding_box.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src.api_export import keras_export
|
| 2 |
+
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
|
| 3 |
+
BaseImagePreprocessingLayer,
|
| 4 |
+
)
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@keras_export("keras.layers.MaxNumBoundingBoxes")
|
| 8 |
+
class MaxNumBoundingBoxes(BaseImagePreprocessingLayer):
|
| 9 |
+
"""Ensure the maximum number of bounding boxes.
|
| 10 |
+
|
| 11 |
+
Args:
|
| 12 |
+
max_number: Desired output number of bounding boxes.
|
| 13 |
+
padding_value: The padding value of the `boxes` and `labels` in
|
| 14 |
+
`bounding_boxes`. Defaults to `-1`.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(self, max_number, fill_value=-1, **kwargs):
|
| 18 |
+
super().__init__(**kwargs)
|
| 19 |
+
self.max_number = int(max_number)
|
| 20 |
+
self.fill_value = int(fill_value)
|
| 21 |
+
|
| 22 |
+
def transform_images(self, images, transformation=None, training=True):
|
| 23 |
+
return images
|
| 24 |
+
|
| 25 |
+
def transform_labels(self, labels, transformation=None, training=True):
|
| 26 |
+
return labels
|
| 27 |
+
|
| 28 |
+
def transform_bounding_boxes(
|
| 29 |
+
self, bounding_boxes, transformation, training=True
|
| 30 |
+
):
|
| 31 |
+
ops = self.backend
|
| 32 |
+
boxes = bounding_boxes["boxes"]
|
| 33 |
+
labels = bounding_boxes["labels"]
|
| 34 |
+
boxes_shape = ops.shape(boxes)
|
| 35 |
+
batch_size = boxes_shape[0]
|
| 36 |
+
num_boxes = boxes_shape[1]
|
| 37 |
+
|
| 38 |
+
# Get pad size
|
| 39 |
+
pad_size = ops.numpy.maximum(
|
| 40 |
+
ops.numpy.subtract(self.max_number, num_boxes), 0
|
| 41 |
+
)
|
| 42 |
+
boxes = boxes[:, : self.max_number, ...]
|
| 43 |
+
boxes = ops.numpy.pad(
|
| 44 |
+
boxes,
|
| 45 |
+
[[0, 0], [0, pad_size], [0, 0]],
|
| 46 |
+
constant_values=self.fill_value,
|
| 47 |
+
)
|
| 48 |
+
labels = labels[:, : self.max_number]
|
| 49 |
+
labels = ops.numpy.pad(
|
| 50 |
+
labels, [[0, 0], [0, pad_size]], constant_values=self.fill_value
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
# Ensure shape
|
| 54 |
+
boxes = ops.numpy.reshape(boxes, [batch_size, self.max_number, 4])
|
| 55 |
+
labels = ops.numpy.reshape(labels, [batch_size, self.max_number])
|
| 56 |
+
|
| 57 |
+
bounding_boxes = bounding_boxes.copy()
|
| 58 |
+
bounding_boxes["boxes"] = boxes
|
| 59 |
+
bounding_boxes["labels"] = labels
|
| 60 |
+
return bounding_boxes
|
| 61 |
+
|
| 62 |
+
def transform_segmentation_masks(
|
| 63 |
+
self, segmentation_masks, transformation=None, training=True
|
| 64 |
+
):
|
| 65 |
+
return self.transform_images(segmentation_masks)
|
| 66 |
+
|
| 67 |
+
def compute_output_shape(self, input_shape):
|
| 68 |
+
if isinstance(input_shape, dict) and "bounding_boxes" in input_shape:
|
| 69 |
+
input_keys = set(input_shape["bounding_boxes"].keys())
|
| 70 |
+
extra_keys = input_keys - set(("boxes", "labels"))
|
| 71 |
+
if extra_keys:
|
| 72 |
+
raise KeyError(
|
| 73 |
+
"There are unsupported keys in `bounding_boxes`: "
|
| 74 |
+
f"{list(extra_keys)}. "
|
| 75 |
+
"Only `boxes` and `labels` are supported."
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
boxes_shape = list(input_shape["bounding_boxes"]["boxes"])
|
| 79 |
+
boxes_shape[1] = self.max_number
|
| 80 |
+
labels_shape = list(input_shape["bounding_boxes"]["labels"])
|
| 81 |
+
labels_shape[1] = self.max_number
|
| 82 |
+
input_shape["bounding_boxes"]["boxes"] = boxes_shape
|
| 83 |
+
input_shape["bounding_boxes"]["labels"] = labels_shape
|
| 84 |
+
return input_shape
|
| 85 |
+
|
| 86 |
+
def get_config(self):
|
| 87 |
+
config = super().get_config()
|
| 88 |
+
config.update({"max_number": self.max_number})
|
| 89 |
+
return config
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/mix_up.py
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import ops
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
|
| 4 |
+
BaseImagePreprocessingLayer,
|
| 5 |
+
)
|
| 6 |
+
from keras.src.random import SeedGenerator
|
| 7 |
+
from keras.src.utils import backend_utils
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@keras_export("keras.layers.MixUp")
|
| 11 |
+
class MixUp(BaseImagePreprocessingLayer):
|
| 12 |
+
"""MixUp implements the MixUp data augmentation technique.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
alpha: Float between 0 and 1. Controls the blending strength.
|
| 16 |
+
Smaller values mean less mixing, while larger values allow
|
| 17 |
+
for more blending between images. Defaults to 0.2,
|
| 18 |
+
recommended for ImageNet1k classification.
|
| 19 |
+
seed: Integer. Used to create a random seed.
|
| 20 |
+
|
| 21 |
+
References:
|
| 22 |
+
- [MixUp paper](https://arxiv.org/abs/1710.09412).
|
| 23 |
+
- [MixUp for Object Detection paper](https://arxiv.org/pdf/1902.04103).
|
| 24 |
+
|
| 25 |
+
Example:
|
| 26 |
+
```python
|
| 27 |
+
(images, labels), _ = keras.datasets.cifar10.load_data()
|
| 28 |
+
images, labels = images[:8], labels[:8]
|
| 29 |
+
labels = keras.ops.cast(keras.ops.one_hot(labels.flatten(), 10), "float32")
|
| 30 |
+
mix_up = keras.layers.MixUp(alpha=0.2)
|
| 31 |
+
output = mix_up({"images": images, "labels": labels})
|
| 32 |
+
```
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
def __init__(self, alpha=0.2, data_format=None, seed=None, **kwargs):
|
| 36 |
+
super().__init__(data_format=data_format, **kwargs)
|
| 37 |
+
self.alpha = alpha
|
| 38 |
+
self.seed = seed
|
| 39 |
+
self.generator = SeedGenerator(seed)
|
| 40 |
+
|
| 41 |
+
def get_random_transformation(self, data, training=True, seed=None):
|
| 42 |
+
if isinstance(data, dict):
|
| 43 |
+
images = data["images"]
|
| 44 |
+
else:
|
| 45 |
+
images = data
|
| 46 |
+
|
| 47 |
+
images_shape = self.backend.shape(images)
|
| 48 |
+
|
| 49 |
+
if len(images_shape) == 3:
|
| 50 |
+
batch_size = 1
|
| 51 |
+
else:
|
| 52 |
+
batch_size = self.backend.shape(images)[0]
|
| 53 |
+
|
| 54 |
+
if seed is None:
|
| 55 |
+
seed = self._get_seed_generator(self.backend._backend)
|
| 56 |
+
|
| 57 |
+
permutation_order = self.backend.random.shuffle(
|
| 58 |
+
self.backend.numpy.arange(0, batch_size, dtype="int64"),
|
| 59 |
+
seed=seed,
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
mix_weight = self.backend.random.beta(
|
| 63 |
+
(batch_size,), self.alpha, self.alpha, seed=seed
|
| 64 |
+
)
|
| 65 |
+
return {
|
| 66 |
+
"mix_weight": mix_weight,
|
| 67 |
+
"permutation_order": permutation_order,
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
def transform_images(self, images, transformation=None, training=True):
|
| 71 |
+
def _mix_up_input(images, transformation):
|
| 72 |
+
images = self.backend.cast(images, self.compute_dtype)
|
| 73 |
+
mix_weight = transformation["mix_weight"]
|
| 74 |
+
permutation_order = transformation["permutation_order"]
|
| 75 |
+
mix_weight = self.backend.cast(
|
| 76 |
+
self.backend.numpy.reshape(mix_weight, [-1, 1, 1, 1]),
|
| 77 |
+
dtype=self.compute_dtype,
|
| 78 |
+
)
|
| 79 |
+
mix_up_images = self.backend.cast(
|
| 80 |
+
self.backend.numpy.take(images, permutation_order, axis=0),
|
| 81 |
+
dtype=self.compute_dtype,
|
| 82 |
+
)
|
| 83 |
+
images = mix_weight * images + (1.0 - mix_weight) * mix_up_images
|
| 84 |
+
return images
|
| 85 |
+
|
| 86 |
+
if training:
|
| 87 |
+
images = _mix_up_input(images, transformation)
|
| 88 |
+
return images
|
| 89 |
+
|
| 90 |
+
def transform_labels(self, labels, transformation, training=True):
|
| 91 |
+
def _mix_up_labels(labels, transformation):
|
| 92 |
+
mix_weight = transformation["mix_weight"]
|
| 93 |
+
permutation_order = transformation["permutation_order"]
|
| 94 |
+
labels_for_mix_up = self.backend.numpy.take(
|
| 95 |
+
labels, permutation_order, axis=0
|
| 96 |
+
)
|
| 97 |
+
mix_weight = self.backend.numpy.reshape(mix_weight, [-1, 1])
|
| 98 |
+
labels = (
|
| 99 |
+
mix_weight * labels + (1.0 - mix_weight) * labels_for_mix_up
|
| 100 |
+
)
|
| 101 |
+
return labels
|
| 102 |
+
|
| 103 |
+
if training:
|
| 104 |
+
labels = _mix_up_labels(labels, transformation)
|
| 105 |
+
return labels
|
| 106 |
+
|
| 107 |
+
def transform_bounding_boxes(
|
| 108 |
+
self,
|
| 109 |
+
bounding_boxes,
|
| 110 |
+
transformation,
|
| 111 |
+
training=True,
|
| 112 |
+
):
|
| 113 |
+
def _mix_up_bounding_boxes(bounding_boxes, transformation):
|
| 114 |
+
if backend_utils.in_tf_graph():
|
| 115 |
+
self.backend.set_backend("tensorflow")
|
| 116 |
+
|
| 117 |
+
permutation_order = transformation["permutation_order"]
|
| 118 |
+
# Make sure we are on cpu for torch tensors.
|
| 119 |
+
permutation_order = ops.convert_to_numpy(permutation_order)
|
| 120 |
+
|
| 121 |
+
boxes, labels = bounding_boxes["boxes"], bounding_boxes["labels"]
|
| 122 |
+
boxes_for_mix_up = self.backend.numpy.take(
|
| 123 |
+
boxes, permutation_order, axis=0
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
labels_for_mix_up = self.backend.numpy.take(
|
| 127 |
+
labels, permutation_order, axis=0
|
| 128 |
+
)
|
| 129 |
+
boxes = self.backend.numpy.concatenate(
|
| 130 |
+
[boxes, boxes_for_mix_up], axis=1
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
labels = self.backend.numpy.concatenate(
|
| 134 |
+
[labels, labels_for_mix_up], axis=0
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
self.backend.reset()
|
| 138 |
+
|
| 139 |
+
return {"boxes": boxes, "labels": labels}
|
| 140 |
+
|
| 141 |
+
if training:
|
| 142 |
+
bounding_boxes = _mix_up_bounding_boxes(
|
| 143 |
+
bounding_boxes, transformation
|
| 144 |
+
)
|
| 145 |
+
return bounding_boxes
|
| 146 |
+
|
| 147 |
+
def transform_segmentation_masks(
|
| 148 |
+
self, segmentation_masks, transformation, training=True
|
| 149 |
+
):
|
| 150 |
+
def _mix_up_segmentation_masks(segmentation_masks, transformation):
|
| 151 |
+
mix_weight = transformation["mix_weight"]
|
| 152 |
+
# Make sure we are on cpu for torch tensors.
|
| 153 |
+
mix_weight = ops.convert_to_numpy(mix_weight)
|
| 154 |
+
permutation_order = transformation["permutation_order"]
|
| 155 |
+
mix_weight = self.backend.numpy.reshape(mix_weight, [-1, 1, 1, 1])
|
| 156 |
+
segmentation_masks_for_mix_up = self.backend.numpy.take(
|
| 157 |
+
segmentation_masks, permutation_order
|
| 158 |
+
)
|
| 159 |
+
segmentation_masks = (
|
| 160 |
+
mix_weight * segmentation_masks
|
| 161 |
+
+ (1.0 - mix_weight) * segmentation_masks_for_mix_up
|
| 162 |
+
)
|
| 163 |
+
return segmentation_masks
|
| 164 |
+
|
| 165 |
+
if training:
|
| 166 |
+
segmentation_masks = _mix_up_segmentation_masks(
|
| 167 |
+
segmentation_masks, transformation
|
| 168 |
+
)
|
| 169 |
+
return segmentation_masks
|
| 170 |
+
|
| 171 |
+
def compute_output_shape(self, input_shape):
|
| 172 |
+
return input_shape
|
| 173 |
+
|
| 174 |
+
def get_config(self):
|
| 175 |
+
config = {
|
| 176 |
+
"alpha": self.alpha,
|
| 177 |
+
"seed": self.seed,
|
| 178 |
+
}
|
| 179 |
+
base_config = super().get_config()
|
| 180 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/rand_augment.py
ADDED
|
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
|
| 3 |
+
import keras.src.layers as layers
|
| 4 |
+
from keras.src.api_export import keras_export
|
| 5 |
+
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
|
| 6 |
+
BaseImagePreprocessingLayer,
|
| 7 |
+
)
|
| 8 |
+
from keras.src.random import SeedGenerator
|
| 9 |
+
from keras.src.utils import backend_utils
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@keras_export("keras.layers.RandAugment")
|
| 13 |
+
class RandAugment(BaseImagePreprocessingLayer):
|
| 14 |
+
"""RandAugment performs the Rand Augment operation on input images.
|
| 15 |
+
|
| 16 |
+
This layer can be thought of as an all-in-one image augmentation layer. The
|
| 17 |
+
policy implemented by this layer has been benchmarked extensively and is
|
| 18 |
+
effective on a wide variety of datasets.
|
| 19 |
+
|
| 20 |
+
References:
|
| 21 |
+
- [RandAugment](https://arxiv.org/abs/1909.13719)
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
value_range: The range of values the input image can take.
|
| 25 |
+
Default is `(0, 255)`. Typically, this would be `(0, 1)`
|
| 26 |
+
for normalized images or `(0, 255)` for raw images.
|
| 27 |
+
num_ops: The number of augmentation operations to apply sequentially
|
| 28 |
+
to each image. Default is 2.
|
| 29 |
+
factor: The strength of the augmentation as a normalized value
|
| 30 |
+
between 0 and 1. Default is 0.5.
|
| 31 |
+
interpolation: The interpolation method to use for resizing operations.
|
| 32 |
+
Options include `nearest`, `bilinear`. Default is `bilinear`.
|
| 33 |
+
seed: Integer. Used to create a random seed.
|
| 34 |
+
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
_USE_BASE_FACTOR = False
|
| 38 |
+
_FACTOR_BOUNDS = (0, 1)
|
| 39 |
+
|
| 40 |
+
_AUGMENT_LAYERS = [
|
| 41 |
+
"random_shear",
|
| 42 |
+
"random_translation",
|
| 43 |
+
"random_rotation",
|
| 44 |
+
"random_brightness",
|
| 45 |
+
"random_color_degeneration",
|
| 46 |
+
"random_contrast",
|
| 47 |
+
"random_sharpness",
|
| 48 |
+
"random_posterization",
|
| 49 |
+
"solarization",
|
| 50 |
+
"auto_contrast",
|
| 51 |
+
"equalization",
|
| 52 |
+
]
|
| 53 |
+
|
| 54 |
+
def __init__(
|
| 55 |
+
self,
|
| 56 |
+
value_range=(0, 255),
|
| 57 |
+
num_ops=2,
|
| 58 |
+
factor=0.5,
|
| 59 |
+
interpolation="bilinear",
|
| 60 |
+
seed=None,
|
| 61 |
+
data_format=None,
|
| 62 |
+
**kwargs,
|
| 63 |
+
):
|
| 64 |
+
super().__init__(data_format=data_format, **kwargs)
|
| 65 |
+
|
| 66 |
+
self.value_range = value_range
|
| 67 |
+
self.num_ops = num_ops
|
| 68 |
+
self._set_factor(factor)
|
| 69 |
+
self.interpolation = interpolation
|
| 70 |
+
self.seed = seed
|
| 71 |
+
self.generator = SeedGenerator(seed)
|
| 72 |
+
|
| 73 |
+
self.random_shear = layers.RandomShear(
|
| 74 |
+
x_factor=self.factor,
|
| 75 |
+
y_factor=self.factor,
|
| 76 |
+
interpolation=interpolation,
|
| 77 |
+
seed=self.seed,
|
| 78 |
+
data_format=data_format,
|
| 79 |
+
**kwargs,
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
self.random_translation = layers.RandomTranslation(
|
| 83 |
+
height_factor=self.factor,
|
| 84 |
+
width_factor=self.factor,
|
| 85 |
+
interpolation=interpolation,
|
| 86 |
+
seed=self.seed,
|
| 87 |
+
data_format=data_format,
|
| 88 |
+
**kwargs,
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
self.random_rotation = layers.RandomRotation(
|
| 92 |
+
factor=self.factor,
|
| 93 |
+
interpolation=interpolation,
|
| 94 |
+
seed=self.seed,
|
| 95 |
+
data_format=data_format,
|
| 96 |
+
**kwargs,
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
self.random_brightness = layers.RandomBrightness(
|
| 100 |
+
factor=self.factor,
|
| 101 |
+
value_range=self.value_range,
|
| 102 |
+
seed=self.seed,
|
| 103 |
+
data_format=data_format,
|
| 104 |
+
**kwargs,
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
self.random_color_degeneration = layers.RandomColorDegeneration(
|
| 108 |
+
factor=self.factor,
|
| 109 |
+
value_range=self.value_range,
|
| 110 |
+
seed=self.seed,
|
| 111 |
+
data_format=data_format,
|
| 112 |
+
**kwargs,
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
self.random_contrast = layers.RandomContrast(
|
| 116 |
+
factor=self.factor,
|
| 117 |
+
value_range=self.value_range,
|
| 118 |
+
seed=self.seed,
|
| 119 |
+
data_format=data_format,
|
| 120 |
+
**kwargs,
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
self.random_sharpness = layers.RandomSharpness(
|
| 124 |
+
factor=self.factor,
|
| 125 |
+
value_range=self.value_range,
|
| 126 |
+
seed=self.seed,
|
| 127 |
+
data_format=data_format,
|
| 128 |
+
**kwargs,
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
self.solarization = layers.Solarization(
|
| 132 |
+
addition_factor=self.factor,
|
| 133 |
+
threshold_factor=self.factor,
|
| 134 |
+
value_range=self.value_range,
|
| 135 |
+
seed=self.seed,
|
| 136 |
+
data_format=data_format,
|
| 137 |
+
**kwargs,
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
self.random_posterization = layers.RandomPosterization(
|
| 141 |
+
factor=max(1, int(8 * self.factor[1])),
|
| 142 |
+
value_range=self.value_range,
|
| 143 |
+
seed=self.seed,
|
| 144 |
+
data_format=data_format,
|
| 145 |
+
**kwargs,
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
self.auto_contrast = layers.AutoContrast(
|
| 149 |
+
value_range=self.value_range, data_format=data_format, **kwargs
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
self.equalization = layers.Equalization(
|
| 153 |
+
value_range=self.value_range, data_format=data_format, **kwargs
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
def build(self, input_shape):
|
| 157 |
+
for layer_name in self._AUGMENT_LAYERS:
|
| 158 |
+
augmentation_layer = getattr(self, layer_name)
|
| 159 |
+
augmentation_layer.build(input_shape)
|
| 160 |
+
|
| 161 |
+
def get_random_transformation(self, data, training=True, seed=None):
|
| 162 |
+
if not training:
|
| 163 |
+
return None
|
| 164 |
+
|
| 165 |
+
if backend_utils.in_tf_graph():
|
| 166 |
+
self.backend.set_backend("tensorflow")
|
| 167 |
+
|
| 168 |
+
for layer_name in self._AUGMENT_LAYERS:
|
| 169 |
+
augmentation_layer = getattr(self, layer_name)
|
| 170 |
+
augmentation_layer.backend.set_backend("tensorflow")
|
| 171 |
+
|
| 172 |
+
transformation = {}
|
| 173 |
+
random.shuffle(self._AUGMENT_LAYERS)
|
| 174 |
+
for layer_name in self._AUGMENT_LAYERS[: self.num_ops]:
|
| 175 |
+
augmentation_layer = getattr(self, layer_name)
|
| 176 |
+
transformation[layer_name] = (
|
| 177 |
+
augmentation_layer.get_random_transformation(
|
| 178 |
+
data,
|
| 179 |
+
training=training,
|
| 180 |
+
seed=self._get_seed_generator(self.backend._backend),
|
| 181 |
+
)
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
return transformation
|
| 185 |
+
|
| 186 |
+
def transform_images(self, images, transformation, training=True):
|
| 187 |
+
if training:
|
| 188 |
+
images = self.backend.cast(images, self.compute_dtype)
|
| 189 |
+
|
| 190 |
+
for layer_name, transformation_value in transformation.items():
|
| 191 |
+
augmentation_layer = getattr(self, layer_name)
|
| 192 |
+
images = augmentation_layer.transform_images(
|
| 193 |
+
images, transformation_value
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
images = self.backend.cast(images, self.compute_dtype)
|
| 197 |
+
return images
|
| 198 |
+
|
| 199 |
+
def transform_labels(self, labels, transformation, training=True):
|
| 200 |
+
return labels
|
| 201 |
+
|
| 202 |
+
def transform_bounding_boxes(
|
| 203 |
+
self,
|
| 204 |
+
bounding_boxes,
|
| 205 |
+
transformation,
|
| 206 |
+
training=True,
|
| 207 |
+
):
|
| 208 |
+
if training:
|
| 209 |
+
for layer_name, transformation_value in transformation.items():
|
| 210 |
+
augmentation_layer = getattr(self, layer_name)
|
| 211 |
+
bounding_boxes = augmentation_layer.transform_bounding_boxes(
|
| 212 |
+
bounding_boxes, transformation_value, training=training
|
| 213 |
+
)
|
| 214 |
+
return bounding_boxes
|
| 215 |
+
|
| 216 |
+
def transform_segmentation_masks(
|
| 217 |
+
self, segmentation_masks, transformation, training=True
|
| 218 |
+
):
|
| 219 |
+
return self.transform_images(
|
| 220 |
+
segmentation_masks, transformation, training=training
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
def compute_output_shape(self, input_shape):
|
| 224 |
+
return input_shape
|
| 225 |
+
|
| 226 |
+
def get_config(self):
|
| 227 |
+
config = {
|
| 228 |
+
"value_range": self.value_range,
|
| 229 |
+
"num_ops": self.num_ops,
|
| 230 |
+
"factor": self.factor,
|
| 231 |
+
"interpolation": self.interpolation,
|
| 232 |
+
"seed": self.seed,
|
| 233 |
+
}
|
| 234 |
+
base_config = super().get_config()
|
| 235 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/random_brightness.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src.api_export import keras_export
|
| 2 |
+
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
|
| 3 |
+
BaseImagePreprocessingLayer,
|
| 4 |
+
)
|
| 5 |
+
from keras.src.random.seed_generator import SeedGenerator
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@keras_export("keras.layers.RandomBrightness")
|
| 9 |
+
class RandomBrightness(BaseImagePreprocessingLayer):
|
| 10 |
+
"""A preprocessing layer which randomly adjusts brightness during training.
|
| 11 |
+
|
| 12 |
+
This layer will randomly increase/reduce the brightness for the input RGB
|
| 13 |
+
images. At inference time, the output will be identical to the input.
|
| 14 |
+
Call the layer with `training=True` to adjust the brightness of the input.
|
| 15 |
+
|
| 16 |
+
**Note:** This layer is safe to use inside a `tf.data` pipeline
|
| 17 |
+
(independently of which backend you're using).
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
factor: Float or a list/tuple of 2 floats between -1.0 and 1.0. The
|
| 21 |
+
factor is used to determine the lower bound and upper bound of the
|
| 22 |
+
brightness adjustment. A float value will be chosen randomly between
|
| 23 |
+
the limits. When -1.0 is chosen, the output image will be black, and
|
| 24 |
+
when 1.0 is chosen, the image will be fully white.
|
| 25 |
+
When only one float is provided, eg, 0.2,
|
| 26 |
+
then -0.2 will be used for lower bound and 0.2
|
| 27 |
+
will be used for upper bound.
|
| 28 |
+
value_range: Optional list/tuple of 2 floats
|
| 29 |
+
for the lower and upper limit
|
| 30 |
+
of the values of the input data.
|
| 31 |
+
To make no change, use `[0.0, 1.0]`, e.g., if the image input
|
| 32 |
+
has been scaled before this layer. Defaults to `[0.0, 255.0]`.
|
| 33 |
+
The brightness adjustment will be scaled to this range, and the
|
| 34 |
+
output values will be clipped to this range.
|
| 35 |
+
seed: optional integer, for fixed RNG behavior.
|
| 36 |
+
|
| 37 |
+
Inputs: 3D (HWC) or 4D (NHWC) tensor, with float or int dtype. Input pixel
|
| 38 |
+
values can be of any range (e.g. `[0., 1.)` or `[0, 255]`)
|
| 39 |
+
|
| 40 |
+
Output: 3D (HWC) or 4D (NHWC) tensor with brightness adjusted based on the
|
| 41 |
+
`factor`. By default, the layer will output floats.
|
| 42 |
+
The output value will be clipped to the range `[0, 255]`,
|
| 43 |
+
the valid range of RGB colors, and
|
| 44 |
+
rescaled based on the `value_range` if needed.
|
| 45 |
+
|
| 46 |
+
Example:
|
| 47 |
+
|
| 48 |
+
```python
|
| 49 |
+
random_bright = keras.layers.RandomBrightness(factor=0.2)
|
| 50 |
+
|
| 51 |
+
# An image with shape [2, 2, 3]
|
| 52 |
+
image = [[[1, 2, 3], [4 ,5 ,6]], [[7, 8, 9], [10, 11, 12]]]
|
| 53 |
+
|
| 54 |
+
# Assume we randomly select the factor to be 0.1, then it will apply
|
| 55 |
+
# 0.1 * 255 to all the channel
|
| 56 |
+
output = random_bright(image, training=True)
|
| 57 |
+
|
| 58 |
+
# output will be int64 with 25.5 added to each channel and round down.
|
| 59 |
+
>>> array([[[26.5, 27.5, 28.5]
|
| 60 |
+
[29.5, 30.5, 31.5]]
|
| 61 |
+
[[32.5, 33.5, 34.5]
|
| 62 |
+
[35.5, 36.5, 37.5]]],
|
| 63 |
+
shape=(2, 2, 3), dtype=int64)
|
| 64 |
+
```
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
_VALUE_RANGE_VALIDATION_ERROR = (
|
| 68 |
+
"The `value_range` argument should be a list of two numbers. "
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
def __init__(self, factor, value_range=(0, 255), seed=None, **kwargs):
|
| 72 |
+
super().__init__(factor=factor, **kwargs)
|
| 73 |
+
self.seed = seed
|
| 74 |
+
self.generator = SeedGenerator(seed)
|
| 75 |
+
self._set_value_range(value_range)
|
| 76 |
+
|
| 77 |
+
def _set_value_range(self, value_range):
|
| 78 |
+
if not isinstance(value_range, (tuple, list)):
|
| 79 |
+
raise ValueError(
|
| 80 |
+
self._VALUE_RANGE_VALIDATION_ERROR
|
| 81 |
+
+ f"Received: value_range={value_range}"
|
| 82 |
+
)
|
| 83 |
+
if len(value_range) != 2:
|
| 84 |
+
raise ValueError(
|
| 85 |
+
self._VALUE_RANGE_VALIDATION_ERROR
|
| 86 |
+
+ f"Received: value_range={value_range}"
|
| 87 |
+
)
|
| 88 |
+
self.value_range = sorted(value_range)
|
| 89 |
+
|
| 90 |
+
def get_random_transformation(self, data, training=True, seed=None):
|
| 91 |
+
if isinstance(data, dict):
|
| 92 |
+
images = data["images"]
|
| 93 |
+
else:
|
| 94 |
+
images = data
|
| 95 |
+
images_shape = self.backend.shape(images)
|
| 96 |
+
rank = len(images_shape)
|
| 97 |
+
if rank == 3:
|
| 98 |
+
rgb_delta_shape = (1, 1, 1)
|
| 99 |
+
elif rank == 4:
|
| 100 |
+
# Keep only the batch dim. This will ensure to have same adjustment
|
| 101 |
+
# with in one image, but different across the images.
|
| 102 |
+
rgb_delta_shape = [images_shape[0], 1, 1, 1]
|
| 103 |
+
else:
|
| 104 |
+
raise ValueError(
|
| 105 |
+
"Expected the input image to be rank 3 or 4. Received "
|
| 106 |
+
f"inputs.shape={images_shape}"
|
| 107 |
+
)
|
| 108 |
+
if not training:
|
| 109 |
+
return {"rgb_delta": self.backend.numpy.zeros(rgb_delta_shape)}
|
| 110 |
+
|
| 111 |
+
if seed is None:
|
| 112 |
+
seed = self._get_seed_generator(self.backend._backend)
|
| 113 |
+
rgb_delta = self.backend.random.uniform(
|
| 114 |
+
minval=self.factor[0],
|
| 115 |
+
maxval=self.factor[1],
|
| 116 |
+
shape=rgb_delta_shape,
|
| 117 |
+
seed=seed,
|
| 118 |
+
)
|
| 119 |
+
rgb_delta = rgb_delta * (self.value_range[1] - self.value_range[0])
|
| 120 |
+
return {"rgb_delta": rgb_delta}
|
| 121 |
+
|
| 122 |
+
def transform_images(self, images, transformation, training=True):
|
| 123 |
+
if training:
|
| 124 |
+
rgb_delta = transformation["rgb_delta"]
|
| 125 |
+
rgb_delta = self.backend.cast(rgb_delta, images.dtype)
|
| 126 |
+
images += rgb_delta
|
| 127 |
+
return self.backend.numpy.clip(
|
| 128 |
+
images, self.value_range[0], self.value_range[1]
|
| 129 |
+
)
|
| 130 |
+
return images
|
| 131 |
+
|
| 132 |
+
def transform_labels(self, labels, transformation, training=True):
|
| 133 |
+
return labels
|
| 134 |
+
|
| 135 |
+
def transform_bounding_boxes(
|
| 136 |
+
self,
|
| 137 |
+
bounding_boxes,
|
| 138 |
+
transformation,
|
| 139 |
+
training=True,
|
| 140 |
+
):
|
| 141 |
+
return bounding_boxes
|
| 142 |
+
|
| 143 |
+
def transform_segmentation_masks(
|
| 144 |
+
self, segmentation_masks, transformation, training=True
|
| 145 |
+
):
|
| 146 |
+
return segmentation_masks
|
| 147 |
+
|
| 148 |
+
def compute_output_shape(self, input_shape):
|
| 149 |
+
return input_shape
|
| 150 |
+
|
| 151 |
+
def get_config(self):
|
| 152 |
+
config = {
|
| 153 |
+
"factor": self.factor,
|
| 154 |
+
"value_range": self.value_range,
|
| 155 |
+
"seed": self.seed,
|
| 156 |
+
}
|
| 157 |
+
base_config = super().get_config()
|
| 158 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/random_color_degeneration.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src.api_export import keras_export
|
| 2 |
+
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
|
| 3 |
+
BaseImagePreprocessingLayer,
|
| 4 |
+
)
|
| 5 |
+
from keras.src.random import SeedGenerator
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@keras_export("keras.layers.RandomColorDegeneration")
|
| 9 |
+
class RandomColorDegeneration(BaseImagePreprocessingLayer):
|
| 10 |
+
"""Randomly performs the color degeneration operation on given images.
|
| 11 |
+
|
| 12 |
+
The sharpness operation first converts an image to gray scale, then back to
|
| 13 |
+
color. It then takes a weighted average between original image and the
|
| 14 |
+
degenerated image. This makes colors appear more dull.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
factor: A tuple of two floats or a single float.
|
| 18 |
+
`factor` controls the extent to which the
|
| 19 |
+
image sharpness is impacted. `factor=0.0` makes this layer perform a
|
| 20 |
+
no-op operation, while a value of 1.0 uses the degenerated result
|
| 21 |
+
entirely. Values between 0 and 1 result in linear interpolation
|
| 22 |
+
between the original image and the sharpened image.
|
| 23 |
+
Values should be between `0.0` and `1.0`. If a tuple is used, a
|
| 24 |
+
`factor` is sampled between the two values for every image
|
| 25 |
+
augmented. If a single float is used, a value between `0.0` and the
|
| 26 |
+
passed float is sampled. In order to ensure the value is always the
|
| 27 |
+
same, please pass a tuple with two identical floats: `(0.5, 0.5)`.
|
| 28 |
+
seed: Integer. Used to create a random seed.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
_VALUE_RANGE_VALIDATION_ERROR = (
|
| 32 |
+
"The `value_range` argument should be a list of two numbers. "
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
def __init__(
|
| 36 |
+
self,
|
| 37 |
+
factor,
|
| 38 |
+
value_range=(0, 255),
|
| 39 |
+
data_format=None,
|
| 40 |
+
seed=None,
|
| 41 |
+
**kwargs,
|
| 42 |
+
):
|
| 43 |
+
super().__init__(data_format=data_format, **kwargs)
|
| 44 |
+
self._set_factor(factor)
|
| 45 |
+
self._set_value_range(value_range)
|
| 46 |
+
self.seed = seed
|
| 47 |
+
self.generator = SeedGenerator(seed)
|
| 48 |
+
|
| 49 |
+
def _set_value_range(self, value_range):
|
| 50 |
+
if not isinstance(value_range, (tuple, list)):
|
| 51 |
+
raise ValueError(
|
| 52 |
+
self._VALUE_RANGE_VALIDATION_ERROR
|
| 53 |
+
+ f"Received: value_range={value_range}"
|
| 54 |
+
)
|
| 55 |
+
if len(value_range) != 2:
|
| 56 |
+
raise ValueError(
|
| 57 |
+
self._VALUE_RANGE_VALIDATION_ERROR
|
| 58 |
+
+ f"Received: value_range={value_range}"
|
| 59 |
+
)
|
| 60 |
+
self.value_range = sorted(value_range)
|
| 61 |
+
|
| 62 |
+
def get_random_transformation(self, data, training=True, seed=None):
|
| 63 |
+
if isinstance(data, dict):
|
| 64 |
+
images = data["images"]
|
| 65 |
+
else:
|
| 66 |
+
images = data
|
| 67 |
+
images_shape = self.backend.shape(images)
|
| 68 |
+
rank = len(images_shape)
|
| 69 |
+
if rank == 3:
|
| 70 |
+
batch_size = 1
|
| 71 |
+
elif rank == 4:
|
| 72 |
+
batch_size = images_shape[0]
|
| 73 |
+
else:
|
| 74 |
+
raise ValueError(
|
| 75 |
+
"Expected the input image to be rank 3 or 4. Received: "
|
| 76 |
+
f"inputs.shape={images_shape}"
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
if seed is None:
|
| 80 |
+
seed = self._get_seed_generator(self.backend._backend)
|
| 81 |
+
|
| 82 |
+
factor = self.backend.random.uniform(
|
| 83 |
+
(batch_size, 1, 1, 1),
|
| 84 |
+
minval=self.factor[0],
|
| 85 |
+
maxval=self.factor[1],
|
| 86 |
+
seed=seed,
|
| 87 |
+
)
|
| 88 |
+
factor = factor
|
| 89 |
+
return {"factor": factor}
|
| 90 |
+
|
| 91 |
+
def transform_images(self, images, transformation=None, training=True):
|
| 92 |
+
if training:
|
| 93 |
+
images = self.backend.cast(images, self.compute_dtype)
|
| 94 |
+
factor = self.backend.cast(
|
| 95 |
+
transformation["factor"], self.compute_dtype
|
| 96 |
+
)
|
| 97 |
+
degenerates = self.backend.image.rgb_to_grayscale(
|
| 98 |
+
images, data_format=self.data_format
|
| 99 |
+
)
|
| 100 |
+
images = images + factor * (degenerates - images)
|
| 101 |
+
images = self.backend.numpy.clip(
|
| 102 |
+
images, self.value_range[0], self.value_range[1]
|
| 103 |
+
)
|
| 104 |
+
images = self.backend.cast(images, self.compute_dtype)
|
| 105 |
+
return images
|
| 106 |
+
|
| 107 |
+
def transform_labels(self, labels, transformation, training=True):
|
| 108 |
+
return labels
|
| 109 |
+
|
| 110 |
+
def transform_segmentation_masks(
|
| 111 |
+
self, segmentation_masks, transformation, training=True
|
| 112 |
+
):
|
| 113 |
+
return segmentation_masks
|
| 114 |
+
|
| 115 |
+
def transform_bounding_boxes(
|
| 116 |
+
self, bounding_boxes, transformation, training=True
|
| 117 |
+
):
|
| 118 |
+
return bounding_boxes
|
| 119 |
+
|
| 120 |
+
def get_config(self):
|
| 121 |
+
config = super().get_config()
|
| 122 |
+
config.update(
|
| 123 |
+
{
|
| 124 |
+
"factor": self.factor,
|
| 125 |
+
"value_range": self.value_range,
|
| 126 |
+
"seed": self.seed,
|
| 127 |
+
}
|
| 128 |
+
)
|
| 129 |
+
return config
|
| 130 |
+
|
| 131 |
+
def compute_output_shape(self, input_shape):
|
| 132 |
+
return input_shape
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/random_color_jitter.py
ADDED
|
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import keras.src.layers.preprocessing.image_preprocessing.random_brightness as random_brightness # noqa: E501
|
| 2 |
+
import keras.src.layers.preprocessing.image_preprocessing.random_contrast as random_contrast # noqa: E501
|
| 3 |
+
import keras.src.layers.preprocessing.image_preprocessing.random_hue as random_hue # noqa: E501
|
| 4 |
+
import keras.src.layers.preprocessing.image_preprocessing.random_saturation as random_saturation # noqa: E501
|
| 5 |
+
from keras.src.api_export import keras_export
|
| 6 |
+
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
|
| 7 |
+
BaseImagePreprocessingLayer,
|
| 8 |
+
)
|
| 9 |
+
from keras.src.random.seed_generator import SeedGenerator
|
| 10 |
+
from keras.src.utils import backend_utils
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@keras_export("keras.layers.RandomColorJitter")
|
| 14 |
+
class RandomColorJitter(BaseImagePreprocessingLayer):
|
| 15 |
+
"""RandomColorJitter class randomly apply brightness, contrast, saturation
|
| 16 |
+
and hue image processing operation sequentially and randomly on the
|
| 17 |
+
input.
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
value_range: the range of values the incoming images will have.
|
| 21 |
+
Represented as a two number tuple written [low, high].
|
| 22 |
+
This is typically either `[0, 1]` or `[0, 255]` depending
|
| 23 |
+
on how your preprocessing pipeline is set up.
|
| 24 |
+
brightness_factor: Float or a list/tuple of 2 floats between -1.0
|
| 25 |
+
and 1.0. The factor is used to determine the lower bound and
|
| 26 |
+
upper bound of the brightness adjustment. A float value will
|
| 27 |
+
be chosen randomly between the limits. When -1.0 is chosen,
|
| 28 |
+
the output image will be black, and when 1.0 is chosen, the
|
| 29 |
+
image will be fully white. When only one float is provided,
|
| 30 |
+
eg, 0.2, then -0.2 will be used for lower bound and 0.2 will
|
| 31 |
+
be used for upper bound.
|
| 32 |
+
contrast_factor: a positive float represented as fraction of value,
|
| 33 |
+
or a tuple of size 2 representing lower and upper bound. When
|
| 34 |
+
represented as a single float, lower = upper. The contrast
|
| 35 |
+
factor will be randomly picked between `[1.0 - lower, 1.0 +
|
| 36 |
+
upper]`. For any pixel x in the channel, the output will be
|
| 37 |
+
`(x - mean) * factor + mean` where `mean` is the mean value
|
| 38 |
+
of the channel.
|
| 39 |
+
saturation_factor: A tuple of two floats or a single float. `factor`
|
| 40 |
+
controls the extent to which the image saturation is impacted.
|
| 41 |
+
`factor=0.5` makes this layer perform a no-op operation.
|
| 42 |
+
`factor=0.0` makes the image fully grayscale. `factor=1.0`
|
| 43 |
+
makes the image fully saturated. Values should be between
|
| 44 |
+
`0.0` and `1.0`. If a tuple is used, a `factor` is sampled
|
| 45 |
+
between the two values for every image augmented. If a single
|
| 46 |
+
float is used, a value between `0.0` and the passed float is
|
| 47 |
+
sampled. To ensure the value is always the same, pass a tuple
|
| 48 |
+
with two identical floats: `(0.5, 0.5)`.
|
| 49 |
+
hue_factor: A single float or a tuple of two floats. `factor`
|
| 50 |
+
controls the extent to which the image hue is impacted.
|
| 51 |
+
`factor=0.0` makes this layer perform a no-op operation,
|
| 52 |
+
while a value of `1.0` performs the most aggressive contrast
|
| 53 |
+
adjustment available. If a tuple is used, a `factor` is
|
| 54 |
+
sampled between the two values for every image augmented.
|
| 55 |
+
If a single float is used, a value between `0.0` and the
|
| 56 |
+
passed float is sampled. In order to ensure the value is
|
| 57 |
+
always the same, please pass a tuple with two identical
|
| 58 |
+
floats: `(0.5, 0.5)`.
|
| 59 |
+
seed: Integer. Used to create a random seed.
|
| 60 |
+
"""
|
| 61 |
+
|
| 62 |
+
def __init__(
|
| 63 |
+
self,
|
| 64 |
+
value_range=(0, 255),
|
| 65 |
+
brightness_factor=None,
|
| 66 |
+
contrast_factor=None,
|
| 67 |
+
saturation_factor=None,
|
| 68 |
+
hue_factor=None,
|
| 69 |
+
seed=None,
|
| 70 |
+
data_format=None,
|
| 71 |
+
**kwargs,
|
| 72 |
+
):
|
| 73 |
+
super().__init__(data_format=data_format, **kwargs)
|
| 74 |
+
self.value_range = value_range
|
| 75 |
+
self.brightness_factor = brightness_factor
|
| 76 |
+
self.contrast_factor = contrast_factor
|
| 77 |
+
self.saturation_factor = saturation_factor
|
| 78 |
+
self.hue_factor = hue_factor
|
| 79 |
+
self.seed = seed
|
| 80 |
+
self.generator = SeedGenerator(seed)
|
| 81 |
+
|
| 82 |
+
self.random_brightness = None
|
| 83 |
+
self.random_contrast = None
|
| 84 |
+
self.random_saturation = None
|
| 85 |
+
self.random_hue = None
|
| 86 |
+
|
| 87 |
+
if self.brightness_factor is not None:
|
| 88 |
+
self.random_brightness = random_brightness.RandomBrightness(
|
| 89 |
+
factor=self.brightness_factor,
|
| 90 |
+
value_range=self.value_range,
|
| 91 |
+
seed=self.seed,
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
if self.contrast_factor is not None:
|
| 95 |
+
self.random_contrast = random_contrast.RandomContrast(
|
| 96 |
+
factor=self.contrast_factor,
|
| 97 |
+
value_range=self.value_range,
|
| 98 |
+
seed=self.seed,
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
if self.saturation_factor is not None:
|
| 102 |
+
self.random_saturation = random_saturation.RandomSaturation(
|
| 103 |
+
factor=self.saturation_factor,
|
| 104 |
+
value_range=self.value_range,
|
| 105 |
+
seed=self.seed,
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
if self.hue_factor is not None:
|
| 109 |
+
self.random_hue = random_hue.RandomHue(
|
| 110 |
+
factor=self.hue_factor,
|
| 111 |
+
value_range=self.value_range,
|
| 112 |
+
seed=self.seed,
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
def build(self, input_shape):
|
| 116 |
+
if self.brightness_factor is not None:
|
| 117 |
+
self.random_brightness.build(input_shape)
|
| 118 |
+
|
| 119 |
+
if self.contrast_factor is not None:
|
| 120 |
+
self.random_contrast.build(input_shape)
|
| 121 |
+
|
| 122 |
+
if self.saturation_factor is not None:
|
| 123 |
+
self.random_saturation.build(input_shape)
|
| 124 |
+
|
| 125 |
+
if self.hue_factor is not None:
|
| 126 |
+
self.random_hue.build(input_shape)
|
| 127 |
+
|
| 128 |
+
def transform_images(self, images, transformation, training=True):
|
| 129 |
+
if training:
|
| 130 |
+
if backend_utils.in_tf_graph():
|
| 131 |
+
self.backend.set_backend("tensorflow")
|
| 132 |
+
images = self.backend.cast(images, self.compute_dtype)
|
| 133 |
+
if self.brightness_factor is not None:
|
| 134 |
+
if backend_utils.in_tf_graph():
|
| 135 |
+
self.random_brightness.backend.set_backend("tensorflow")
|
| 136 |
+
transformation = (
|
| 137 |
+
self.random_brightness.get_random_transformation(
|
| 138 |
+
images,
|
| 139 |
+
seed=self._get_seed_generator(self.backend._backend),
|
| 140 |
+
)
|
| 141 |
+
)
|
| 142 |
+
images = self.random_brightness.transform_images(
|
| 143 |
+
images, transformation
|
| 144 |
+
)
|
| 145 |
+
if self.contrast_factor is not None:
|
| 146 |
+
if backend_utils.in_tf_graph():
|
| 147 |
+
self.random_contrast.backend.set_backend("tensorflow")
|
| 148 |
+
transformation = self.random_contrast.get_random_transformation(
|
| 149 |
+
images, seed=self._get_seed_generator(self.backend._backend)
|
| 150 |
+
)
|
| 151 |
+
transformation["contrast_factor"] = self.backend.cast(
|
| 152 |
+
transformation["contrast_factor"], dtype=self.compute_dtype
|
| 153 |
+
)
|
| 154 |
+
images = self.random_contrast.transform_images(
|
| 155 |
+
images, transformation
|
| 156 |
+
)
|
| 157 |
+
if self.saturation_factor is not None:
|
| 158 |
+
if backend_utils.in_tf_graph():
|
| 159 |
+
self.random_saturation.backend.set_backend("tensorflow")
|
| 160 |
+
transformation = (
|
| 161 |
+
self.random_saturation.get_random_transformation(
|
| 162 |
+
images,
|
| 163 |
+
seed=self._get_seed_generator(self.backend._backend),
|
| 164 |
+
)
|
| 165 |
+
)
|
| 166 |
+
images = self.random_saturation.transform_images(
|
| 167 |
+
images, transformation
|
| 168 |
+
)
|
| 169 |
+
if self.hue_factor is not None:
|
| 170 |
+
if backend_utils.in_tf_graph():
|
| 171 |
+
self.random_hue.backend.set_backend("tensorflow")
|
| 172 |
+
transformation = self.random_hue.get_random_transformation(
|
| 173 |
+
images, seed=self._get_seed_generator(self.backend._backend)
|
| 174 |
+
)
|
| 175 |
+
images = self.random_hue.transform_images(
|
| 176 |
+
images, transformation
|
| 177 |
+
)
|
| 178 |
+
images = self.backend.cast(images, self.compute_dtype)
|
| 179 |
+
return images
|
| 180 |
+
|
| 181 |
+
def transform_labels(self, labels, transformation, training=True):
|
| 182 |
+
return labels
|
| 183 |
+
|
| 184 |
+
def transform_bounding_boxes(
|
| 185 |
+
self,
|
| 186 |
+
bounding_boxes,
|
| 187 |
+
transformation,
|
| 188 |
+
training=True,
|
| 189 |
+
):
|
| 190 |
+
return bounding_boxes
|
| 191 |
+
|
| 192 |
+
def transform_segmentation_masks(
|
| 193 |
+
self, segmentation_masks, transformation, training=True
|
| 194 |
+
):
|
| 195 |
+
return segmentation_masks
|
| 196 |
+
|
| 197 |
+
def compute_output_shape(self, input_shape):
|
| 198 |
+
return input_shape
|
| 199 |
+
|
| 200 |
+
def get_config(self):
|
| 201 |
+
config = {
|
| 202 |
+
"value_range": self.value_range,
|
| 203 |
+
"brightness_factor": self.brightness_factor,
|
| 204 |
+
"contrast_factor": self.contrast_factor,
|
| 205 |
+
"saturation_factor": self.saturation_factor,
|
| 206 |
+
"hue_factor": self.hue_factor,
|
| 207 |
+
"seed": self.seed,
|
| 208 |
+
}
|
| 209 |
+
base_config = super().get_config()
|
| 210 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/random_contrast.py
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src.api_export import keras_export
|
| 2 |
+
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
|
| 3 |
+
BaseImagePreprocessingLayer,
|
| 4 |
+
)
|
| 5 |
+
from keras.src.random.seed_generator import SeedGenerator
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@keras_export("keras.layers.RandomContrast")
|
| 9 |
+
class RandomContrast(BaseImagePreprocessingLayer):
|
| 10 |
+
"""A preprocessing layer which randomly adjusts contrast during training.
|
| 11 |
+
|
| 12 |
+
This layer will randomly adjust the contrast of an image or images
|
| 13 |
+
by a random factor. Contrast is adjusted independently
|
| 14 |
+
for each channel of each image during training.
|
| 15 |
+
|
| 16 |
+
For each channel, this layer computes the mean of the image pixels in the
|
| 17 |
+
channel and then adjusts each component `x` of each pixel to
|
| 18 |
+
`(x - mean) * contrast_factor + mean`.
|
| 19 |
+
|
| 20 |
+
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
|
| 21 |
+
in integer or floating point dtype.
|
| 22 |
+
By default, the layer will output floats.
|
| 23 |
+
|
| 24 |
+
**Note:** This layer is safe to use inside a `tf.data` pipeline
|
| 25 |
+
(independently of which backend you're using).
|
| 26 |
+
|
| 27 |
+
Input shape:
|
| 28 |
+
3D (unbatched) or 4D (batched) tensor with shape:
|
| 29 |
+
`(..., height, width, channels)`, in `"channels_last"` format.
|
| 30 |
+
|
| 31 |
+
Output shape:
|
| 32 |
+
3D (unbatched) or 4D (batched) tensor with shape:
|
| 33 |
+
`(..., height, width, channels)`, in `"channels_last"` format.
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
factor: a positive float represented as fraction of value, or a tuple of
|
| 37 |
+
size 2 representing lower and upper bound.
|
| 38 |
+
When represented as a single float, lower = upper.
|
| 39 |
+
The contrast factor will be randomly picked between
|
| 40 |
+
`[1.0 - lower, 1.0 + upper]`. For any pixel x in the channel,
|
| 41 |
+
the output will be `(x - mean) * factor + mean`
|
| 42 |
+
where `mean` is the mean value of the channel.
|
| 43 |
+
value_range: the range of values the incoming images will have.
|
| 44 |
+
Represented as a two-number tuple written `[low, high]`. This is
|
| 45 |
+
typically either `[0, 1]` or `[0, 255]` depending on how your
|
| 46 |
+
preprocessing pipeline is set up.
|
| 47 |
+
seed: Integer. Used to create a random seed.
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
_FACTOR_BOUNDS = (0, 1)
|
| 51 |
+
|
| 52 |
+
def __init__(self, factor, value_range=(0, 255), seed=None, **kwargs):
|
| 53 |
+
super().__init__(**kwargs)
|
| 54 |
+
self._set_factor(factor)
|
| 55 |
+
self.value_range = value_range
|
| 56 |
+
self.seed = seed
|
| 57 |
+
self.generator = SeedGenerator(seed)
|
| 58 |
+
|
| 59 |
+
def get_random_transformation(self, data, training=True, seed=None):
|
| 60 |
+
if isinstance(data, dict):
|
| 61 |
+
images = data["images"]
|
| 62 |
+
else:
|
| 63 |
+
images = data
|
| 64 |
+
images_shape = self.backend.shape(images)
|
| 65 |
+
rank = len(images_shape)
|
| 66 |
+
if rank == 3:
|
| 67 |
+
factor_shape = (1, 1, 1)
|
| 68 |
+
elif rank == 4:
|
| 69 |
+
# Keep only the batch dim. This will ensure to have same adjustment
|
| 70 |
+
# with in one image, but different across the images.
|
| 71 |
+
factor_shape = [images_shape[0], 1, 1, 1]
|
| 72 |
+
else:
|
| 73 |
+
raise ValueError(
|
| 74 |
+
"Expected the input image to be rank 3 or 4. Received "
|
| 75 |
+
f"inputs.shape={images_shape}"
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
if not training:
|
| 79 |
+
return {"contrast_factor": self.backend.numpy.zeros(factor_shape)}
|
| 80 |
+
|
| 81 |
+
if seed is None:
|
| 82 |
+
seed = self._get_seed_generator(self.backend._backend)
|
| 83 |
+
|
| 84 |
+
factor = self.backend.random.uniform(
|
| 85 |
+
shape=factor_shape,
|
| 86 |
+
minval=1.0 - self.factor[0],
|
| 87 |
+
maxval=1.0 + self.factor[1],
|
| 88 |
+
seed=seed,
|
| 89 |
+
dtype=self.compute_dtype,
|
| 90 |
+
)
|
| 91 |
+
return {"contrast_factor": factor}
|
| 92 |
+
|
| 93 |
+
def transform_images(self, images, transformation, training=True):
|
| 94 |
+
if training:
|
| 95 |
+
constrast_factor = transformation["contrast_factor"]
|
| 96 |
+
outputs = self._adjust_constrast(images, constrast_factor)
|
| 97 |
+
outputs = self.backend.numpy.clip(
|
| 98 |
+
outputs, self.value_range[0], self.value_range[1]
|
| 99 |
+
)
|
| 100 |
+
self.backend.numpy.reshape(outputs, self.backend.shape(images))
|
| 101 |
+
return outputs
|
| 102 |
+
return images
|
| 103 |
+
|
| 104 |
+
def transform_labels(self, labels, transformation, training=True):
|
| 105 |
+
return labels
|
| 106 |
+
|
| 107 |
+
def transform_bounding_boxes(
|
| 108 |
+
self,
|
| 109 |
+
bounding_boxes,
|
| 110 |
+
transformation,
|
| 111 |
+
training=True,
|
| 112 |
+
):
|
| 113 |
+
return bounding_boxes
|
| 114 |
+
|
| 115 |
+
def transform_segmentation_masks(
|
| 116 |
+
self, segmentation_masks, transformation, training=True
|
| 117 |
+
):
|
| 118 |
+
return segmentation_masks
|
| 119 |
+
|
| 120 |
+
def _adjust_constrast(self, inputs, contrast_factor):
|
| 121 |
+
if self.data_format == "channels_first":
|
| 122 |
+
height_axis = -2
|
| 123 |
+
width_axis = -1
|
| 124 |
+
else:
|
| 125 |
+
height_axis = -3
|
| 126 |
+
width_axis = -2
|
| 127 |
+
# reduce mean on height
|
| 128 |
+
inp_mean = self.backend.numpy.mean(
|
| 129 |
+
inputs, axis=height_axis, keepdims=True
|
| 130 |
+
)
|
| 131 |
+
# reduce mean on width
|
| 132 |
+
inp_mean = self.backend.numpy.mean(
|
| 133 |
+
inp_mean, axis=width_axis, keepdims=True
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
outputs = (inputs - inp_mean) * contrast_factor + inp_mean
|
| 137 |
+
return outputs
|
| 138 |
+
|
| 139 |
+
def compute_output_shape(self, input_shape):
|
| 140 |
+
return input_shape
|
| 141 |
+
|
| 142 |
+
def get_config(self):
|
| 143 |
+
config = {
|
| 144 |
+
"factor": self.factor,
|
| 145 |
+
"value_range": self.value_range,
|
| 146 |
+
"seed": self.seed,
|
| 147 |
+
}
|
| 148 |
+
base_config = super().get_config()
|
| 149 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/random_crop.py
ADDED
|
@@ -0,0 +1,276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
|
| 4 |
+
BaseImagePreprocessingLayer,
|
| 5 |
+
)
|
| 6 |
+
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
|
| 7 |
+
convert_format,
|
| 8 |
+
)
|
| 9 |
+
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.validation import ( # noqa: E501
|
| 10 |
+
densify_bounding_boxes,
|
| 11 |
+
)
|
| 12 |
+
from keras.src.random.seed_generator import SeedGenerator
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@keras_export("keras.layers.RandomCrop")
|
| 16 |
+
class RandomCrop(BaseImagePreprocessingLayer):
|
| 17 |
+
"""A preprocessing layer which randomly crops images during training.
|
| 18 |
+
|
| 19 |
+
During training, this layer will randomly choose a location to crop images
|
| 20 |
+
down to a target size. The layer will crop all the images in the same batch
|
| 21 |
+
to the same cropping location.
|
| 22 |
+
|
| 23 |
+
At inference time, and during training if an input image is smaller than the
|
| 24 |
+
target size, the input will be resized and cropped so as to return the
|
| 25 |
+
largest possible window in the image that matches the target aspect ratio.
|
| 26 |
+
If you need to apply random cropping at inference time, set `training` to
|
| 27 |
+
True when calling the layer.
|
| 28 |
+
|
| 29 |
+
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
|
| 30 |
+
of integer or floating point dtype. By default, the layer will output
|
| 31 |
+
floats.
|
| 32 |
+
|
| 33 |
+
**Note:** This layer is safe to use inside a `tf.data` pipeline
|
| 34 |
+
(independently of which backend you're using).
|
| 35 |
+
|
| 36 |
+
Input shape:
|
| 37 |
+
3D (unbatched) or 4D (batched) tensor with shape:
|
| 38 |
+
`(..., height, width, channels)`, in `"channels_last"` format.
|
| 39 |
+
|
| 40 |
+
Output shape:
|
| 41 |
+
3D (unbatched) or 4D (batched) tensor with shape:
|
| 42 |
+
`(..., target_height, target_width, channels)`.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
height: Integer, the height of the output shape.
|
| 46 |
+
width: Integer, the width of the output shape.
|
| 47 |
+
seed: Integer. Used to create a random seed.
|
| 48 |
+
**kwargs: Base layer keyword arguments, such as
|
| 49 |
+
`name` and `dtype`.
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
def __init__(
|
| 53 |
+
self, height, width, seed=None, data_format=None, name=None, **kwargs
|
| 54 |
+
):
|
| 55 |
+
super().__init__(name=name, **kwargs)
|
| 56 |
+
self.height = height
|
| 57 |
+
self.width = width
|
| 58 |
+
self.seed = (
|
| 59 |
+
seed if seed is not None else backend.random.make_default_seed()
|
| 60 |
+
)
|
| 61 |
+
self.generator = SeedGenerator(seed)
|
| 62 |
+
self.data_format = backend.standardize_data_format(data_format)
|
| 63 |
+
|
| 64 |
+
if self.data_format == "channels_first":
|
| 65 |
+
self.height_axis = -2
|
| 66 |
+
self.width_axis = -1
|
| 67 |
+
elif self.data_format == "channels_last":
|
| 68 |
+
self.height_axis = -3
|
| 69 |
+
self.width_axis = -2
|
| 70 |
+
|
| 71 |
+
self.supports_masking = False
|
| 72 |
+
self.supports_jit = False
|
| 73 |
+
self._convert_input_args = False
|
| 74 |
+
self._allow_non_tensor_positional_args = True
|
| 75 |
+
|
| 76 |
+
def get_random_transformation(self, data, training=True, seed=None):
|
| 77 |
+
if seed is None:
|
| 78 |
+
seed = self._get_seed_generator(self.backend._backend)
|
| 79 |
+
|
| 80 |
+
if isinstance(data, dict):
|
| 81 |
+
input_shape = self.backend.shape(data["images"])
|
| 82 |
+
else:
|
| 83 |
+
input_shape = self.backend.shape(data)
|
| 84 |
+
|
| 85 |
+
input_height, input_width = (
|
| 86 |
+
input_shape[self.height_axis],
|
| 87 |
+
input_shape[self.width_axis],
|
| 88 |
+
)
|
| 89 |
+
if input_height is None or input_width is None:
|
| 90 |
+
raise ValueError(
|
| 91 |
+
"RandomCrop requires the input to have a fully defined "
|
| 92 |
+
f"height and width. Received: images.shape={input_shape}"
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
if training and input_height > self.height and input_width > self.width:
|
| 96 |
+
h_start = self.backend.cast(
|
| 97 |
+
self.backend.random.uniform(
|
| 98 |
+
(),
|
| 99 |
+
0,
|
| 100 |
+
maxval=float(input_height - self.height + 1),
|
| 101 |
+
seed=seed,
|
| 102 |
+
),
|
| 103 |
+
"int32",
|
| 104 |
+
)
|
| 105 |
+
w_start = self.backend.cast(
|
| 106 |
+
self.backend.random.uniform(
|
| 107 |
+
(),
|
| 108 |
+
0,
|
| 109 |
+
maxval=float(input_width - self.width + 1),
|
| 110 |
+
seed=seed,
|
| 111 |
+
),
|
| 112 |
+
"int32",
|
| 113 |
+
)
|
| 114 |
+
else:
|
| 115 |
+
crop_height = int(float(input_width * self.height) / self.width)
|
| 116 |
+
crop_height = max(min(input_height, crop_height), 1)
|
| 117 |
+
crop_width = int(float(input_height * self.width) / self.height)
|
| 118 |
+
crop_width = max(min(input_width, crop_width), 1)
|
| 119 |
+
h_start = int(float(input_height - crop_height) / 2)
|
| 120 |
+
w_start = int(float(input_width - crop_width) / 2)
|
| 121 |
+
|
| 122 |
+
return h_start, w_start
|
| 123 |
+
|
| 124 |
+
def transform_images(self, images, transformation, training=True):
|
| 125 |
+
if training:
|
| 126 |
+
images = self.backend.cast(images, self.compute_dtype)
|
| 127 |
+
crop_box_hstart, crop_box_wstart = transformation
|
| 128 |
+
crop_height = self.height
|
| 129 |
+
crop_width = self.width
|
| 130 |
+
|
| 131 |
+
if self.data_format == "channels_last":
|
| 132 |
+
if len(images.shape) == 4:
|
| 133 |
+
images = images[
|
| 134 |
+
:,
|
| 135 |
+
crop_box_hstart : crop_box_hstart + crop_height,
|
| 136 |
+
crop_box_wstart : crop_box_wstart + crop_width,
|
| 137 |
+
:,
|
| 138 |
+
]
|
| 139 |
+
else:
|
| 140 |
+
images = images[
|
| 141 |
+
crop_box_hstart : crop_box_hstart + crop_height,
|
| 142 |
+
crop_box_wstart : crop_box_wstart + crop_width,
|
| 143 |
+
:,
|
| 144 |
+
]
|
| 145 |
+
else:
|
| 146 |
+
if len(images.shape) == 4:
|
| 147 |
+
images = images[
|
| 148 |
+
:,
|
| 149 |
+
:,
|
| 150 |
+
crop_box_hstart : crop_box_hstart + crop_height,
|
| 151 |
+
crop_box_wstart : crop_box_wstart + crop_width,
|
| 152 |
+
]
|
| 153 |
+
else:
|
| 154 |
+
images = images[
|
| 155 |
+
:,
|
| 156 |
+
crop_box_hstart : crop_box_hstart + crop_height,
|
| 157 |
+
crop_box_wstart : crop_box_wstart + crop_width,
|
| 158 |
+
]
|
| 159 |
+
|
| 160 |
+
shape = self.backend.shape(images)
|
| 161 |
+
new_height = shape[self.height_axis]
|
| 162 |
+
new_width = shape[self.width_axis]
|
| 163 |
+
if (
|
| 164 |
+
not isinstance(new_height, int)
|
| 165 |
+
or not isinstance(new_width, int)
|
| 166 |
+
or new_height != self.height
|
| 167 |
+
or new_width != self.width
|
| 168 |
+
):
|
| 169 |
+
# Resize images if size mismatch or
|
| 170 |
+
# if size mismatch cannot be determined
|
| 171 |
+
# (in the case of a TF dynamic shape).
|
| 172 |
+
images = self.backend.image.resize(
|
| 173 |
+
images,
|
| 174 |
+
size=(self.height, self.width),
|
| 175 |
+
data_format=self.data_format,
|
| 176 |
+
)
|
| 177 |
+
# Resize may have upcasted the outputs
|
| 178 |
+
images = self.backend.cast(images, self.compute_dtype)
|
| 179 |
+
return images
|
| 180 |
+
|
| 181 |
+
def transform_labels(self, labels, transformation, training=True):
|
| 182 |
+
return labels
|
| 183 |
+
|
| 184 |
+
def transform_bounding_boxes(
|
| 185 |
+
self,
|
| 186 |
+
bounding_boxes,
|
| 187 |
+
transformation,
|
| 188 |
+
training=True,
|
| 189 |
+
):
|
| 190 |
+
"""
|
| 191 |
+
bounding_boxes = {
|
| 192 |
+
"boxes": (batch, num_boxes, 4), # left-top-right-bottom (xyxy)
|
| 193 |
+
"labels": (batch, num_boxes, num_classes),
|
| 194 |
+
}
|
| 195 |
+
or
|
| 196 |
+
bounding_boxes = {
|
| 197 |
+
"boxes": (num_boxes, 4),
|
| 198 |
+
"labels": (num_boxes, num_classes),
|
| 199 |
+
}
|
| 200 |
+
"""
|
| 201 |
+
|
| 202 |
+
if training:
|
| 203 |
+
h_start, w_start = transformation
|
| 204 |
+
if not self.backend.is_tensor(bounding_boxes["boxes"]):
|
| 205 |
+
bounding_boxes = densify_bounding_boxes(
|
| 206 |
+
bounding_boxes, backend=self.backend
|
| 207 |
+
)
|
| 208 |
+
boxes = bounding_boxes["boxes"]
|
| 209 |
+
# Convert to a standard xyxy as operations are done xyxy by default.
|
| 210 |
+
boxes = convert_format(
|
| 211 |
+
boxes=boxes,
|
| 212 |
+
source=self.bounding_box_format,
|
| 213 |
+
target="xyxy",
|
| 214 |
+
height=self.height,
|
| 215 |
+
width=self.width,
|
| 216 |
+
)
|
| 217 |
+
h_start = self.backend.cast(h_start, boxes.dtype)
|
| 218 |
+
w_start = self.backend.cast(w_start, boxes.dtype)
|
| 219 |
+
if len(self.backend.shape(boxes)) == 3:
|
| 220 |
+
boxes = self.backend.numpy.stack(
|
| 221 |
+
[
|
| 222 |
+
self.backend.numpy.maximum(boxes[:, :, 0] - h_start, 0),
|
| 223 |
+
self.backend.numpy.maximum(boxes[:, :, 1] - w_start, 0),
|
| 224 |
+
self.backend.numpy.maximum(boxes[:, :, 2] - h_start, 0),
|
| 225 |
+
self.backend.numpy.maximum(boxes[:, :, 3] - w_start, 0),
|
| 226 |
+
],
|
| 227 |
+
axis=-1,
|
| 228 |
+
)
|
| 229 |
+
else:
|
| 230 |
+
boxes = self.backend.numpy.stack(
|
| 231 |
+
[
|
| 232 |
+
self.backend.numpy.maximum(boxes[:, 0] - h_start, 0),
|
| 233 |
+
self.backend.numpy.maximum(boxes[:, 1] - w_start, 0),
|
| 234 |
+
self.backend.numpy.maximum(boxes[:, 2] - h_start, 0),
|
| 235 |
+
self.backend.numpy.maximum(boxes[:, 3] - w_start, 0),
|
| 236 |
+
],
|
| 237 |
+
axis=-1,
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
# Convert to user defined bounding box format
|
| 241 |
+
boxes = convert_format(
|
| 242 |
+
boxes=boxes,
|
| 243 |
+
source="xyxy",
|
| 244 |
+
target=self.bounding_box_format,
|
| 245 |
+
height=self.height,
|
| 246 |
+
width=self.width,
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
return {
|
| 250 |
+
"boxes": boxes,
|
| 251 |
+
"labels": bounding_boxes["labels"],
|
| 252 |
+
}
|
| 253 |
+
return bounding_boxes
|
| 254 |
+
|
| 255 |
+
def transform_segmentation_masks(
|
| 256 |
+
self, segmentation_masks, transformation, training=True
|
| 257 |
+
):
|
| 258 |
+
return self.transform_images(segmentation_masks, transformation)
|
| 259 |
+
|
| 260 |
+
def compute_output_shape(self, input_shape, *args, **kwargs):
|
| 261 |
+
input_shape = list(input_shape)
|
| 262 |
+
input_shape[self.height_axis] = self.height
|
| 263 |
+
input_shape[self.width_axis] = self.width
|
| 264 |
+
return tuple(input_shape)
|
| 265 |
+
|
| 266 |
+
def get_config(self):
|
| 267 |
+
config = super().get_config()
|
| 268 |
+
config.update(
|
| 269 |
+
{
|
| 270 |
+
"height": self.height,
|
| 271 |
+
"width": self.width,
|
| 272 |
+
"seed": self.seed,
|
| 273 |
+
"data_format": self.data_format,
|
| 274 |
+
}
|
| 275 |
+
)
|
| 276 |
+
return config
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/random_flip.py
ADDED
|
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src.api_export import keras_export
|
| 2 |
+
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
|
| 3 |
+
BaseImagePreprocessingLayer,
|
| 4 |
+
)
|
| 5 |
+
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
|
| 6 |
+
clip_to_image_size,
|
| 7 |
+
)
|
| 8 |
+
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
|
| 9 |
+
convert_format,
|
| 10 |
+
)
|
| 11 |
+
from keras.src.random.seed_generator import SeedGenerator
|
| 12 |
+
from keras.src.utils import backend_utils
|
| 13 |
+
|
| 14 |
+
HORIZONTAL = "horizontal"
|
| 15 |
+
VERTICAL = "vertical"
|
| 16 |
+
HORIZONTAL_AND_VERTICAL = "horizontal_and_vertical"
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@keras_export("keras.layers.RandomFlip")
|
| 20 |
+
class RandomFlip(BaseImagePreprocessingLayer):
|
| 21 |
+
"""A preprocessing layer which randomly flips images during training.
|
| 22 |
+
|
| 23 |
+
This layer will flip the images horizontally and or vertically based on the
|
| 24 |
+
`mode` attribute. During inference time, the output will be identical to
|
| 25 |
+
input. Call the layer with `training=True` to flip the input.
|
| 26 |
+
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
|
| 27 |
+
of integer or floating point dtype.
|
| 28 |
+
By default, the layer will output floats.
|
| 29 |
+
|
| 30 |
+
**Note:** This layer is safe to use inside a `tf.data` pipeline
|
| 31 |
+
(independently of which backend you're using).
|
| 32 |
+
|
| 33 |
+
Input shape:
|
| 34 |
+
3D (unbatched) or 4D (batched) tensor with shape:
|
| 35 |
+
`(..., height, width, channels)`, in `"channels_last"` format.
|
| 36 |
+
|
| 37 |
+
Output shape:
|
| 38 |
+
3D (unbatched) or 4D (batched) tensor with shape:
|
| 39 |
+
`(..., height, width, channels)`, in `"channels_last"` format.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
mode: String indicating which flip mode to use. Can be `"horizontal"`,
|
| 43 |
+
`"vertical"`, or `"horizontal_and_vertical"`. `"horizontal"` is a
|
| 44 |
+
left-right flip and `"vertical"` is a top-bottom flip. Defaults to
|
| 45 |
+
`"horizontal_and_vertical"`
|
| 46 |
+
seed: Integer. Used to create a random seed.
|
| 47 |
+
**kwargs: Base layer keyword arguments, such as
|
| 48 |
+
`name` and `dtype`.
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
_USE_BASE_FACTOR = False
|
| 52 |
+
|
| 53 |
+
def __init__(
|
| 54 |
+
self,
|
| 55 |
+
mode=HORIZONTAL_AND_VERTICAL,
|
| 56 |
+
seed=None,
|
| 57 |
+
data_format=None,
|
| 58 |
+
**kwargs,
|
| 59 |
+
):
|
| 60 |
+
super().__init__(data_format=data_format, **kwargs)
|
| 61 |
+
self.seed = seed
|
| 62 |
+
self.generator = SeedGenerator(seed)
|
| 63 |
+
self.mode = mode
|
| 64 |
+
self._convert_input_args = False
|
| 65 |
+
self._allow_non_tensor_positional_args = True
|
| 66 |
+
|
| 67 |
+
def get_random_transformation(self, data, training=True, seed=None):
|
| 68 |
+
if not training:
|
| 69 |
+
return None
|
| 70 |
+
|
| 71 |
+
if isinstance(data, dict):
|
| 72 |
+
images = data["images"]
|
| 73 |
+
else:
|
| 74 |
+
images = data
|
| 75 |
+
shape = self.backend.core.shape(images)
|
| 76 |
+
if len(shape) == 3:
|
| 77 |
+
flips_shape = (1, 1, 1)
|
| 78 |
+
else:
|
| 79 |
+
flips_shape = (shape[0], 1, 1, 1)
|
| 80 |
+
|
| 81 |
+
if seed is None:
|
| 82 |
+
seed = self._get_seed_generator(self.backend._backend)
|
| 83 |
+
|
| 84 |
+
flips = self.backend.numpy.less_equal(
|
| 85 |
+
self.backend.random.uniform(shape=flips_shape, seed=seed), 0.5
|
| 86 |
+
)
|
| 87 |
+
return {"flips": flips, "input_shape": shape}
|
| 88 |
+
|
| 89 |
+
def transform_images(self, images, transformation, training=True):
|
| 90 |
+
images = self.backend.cast(images, self.compute_dtype)
|
| 91 |
+
if training:
|
| 92 |
+
return self._flip_inputs(images, transformation)
|
| 93 |
+
return images
|
| 94 |
+
|
| 95 |
+
def transform_labels(self, labels, transformation, training=True):
|
| 96 |
+
return labels
|
| 97 |
+
|
| 98 |
+
def transform_bounding_boxes(
|
| 99 |
+
self,
|
| 100 |
+
bounding_boxes,
|
| 101 |
+
transformation,
|
| 102 |
+
training=True,
|
| 103 |
+
):
|
| 104 |
+
def _flip_boxes_horizontal(boxes):
|
| 105 |
+
x1, x2, x3, x4 = self.backend.numpy.split(boxes, 4, axis=-1)
|
| 106 |
+
outputs = self.backend.numpy.concatenate(
|
| 107 |
+
[1 - x3, x2, 1 - x1, x4], axis=-1
|
| 108 |
+
)
|
| 109 |
+
return outputs
|
| 110 |
+
|
| 111 |
+
def _flip_boxes_vertical(boxes):
|
| 112 |
+
x1, x2, x3, x4 = self.backend.numpy.split(boxes, 4, axis=-1)
|
| 113 |
+
outputs = self.backend.numpy.concatenate(
|
| 114 |
+
[x1, 1 - x4, x3, 1 - x2], axis=-1
|
| 115 |
+
)
|
| 116 |
+
return outputs
|
| 117 |
+
|
| 118 |
+
def _transform_xyxy(boxes, box_flips):
|
| 119 |
+
bboxes = boxes["boxes"]
|
| 120 |
+
if self.mode in {HORIZONTAL, HORIZONTAL_AND_VERTICAL}:
|
| 121 |
+
bboxes = self.backend.numpy.where(
|
| 122 |
+
box_flips,
|
| 123 |
+
_flip_boxes_horizontal(bboxes),
|
| 124 |
+
bboxes,
|
| 125 |
+
)
|
| 126 |
+
if self.mode in {VERTICAL, HORIZONTAL_AND_VERTICAL}:
|
| 127 |
+
bboxes = self.backend.numpy.where(
|
| 128 |
+
box_flips,
|
| 129 |
+
_flip_boxes_vertical(bboxes),
|
| 130 |
+
bboxes,
|
| 131 |
+
)
|
| 132 |
+
return bboxes
|
| 133 |
+
|
| 134 |
+
if training:
|
| 135 |
+
if backend_utils.in_tf_graph():
|
| 136 |
+
self.backend.set_backend("tensorflow")
|
| 137 |
+
|
| 138 |
+
flips = self.backend.numpy.squeeze(transformation["flips"], axis=-1)
|
| 139 |
+
|
| 140 |
+
if self.data_format == "channels_first":
|
| 141 |
+
height_axis = -2
|
| 142 |
+
width_axis = -1
|
| 143 |
+
else:
|
| 144 |
+
height_axis = -3
|
| 145 |
+
width_axis = -2
|
| 146 |
+
|
| 147 |
+
input_height, input_width = (
|
| 148 |
+
transformation["input_shape"][height_axis],
|
| 149 |
+
transformation["input_shape"][width_axis],
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
bounding_boxes = convert_format(
|
| 153 |
+
bounding_boxes,
|
| 154 |
+
source=self.bounding_box_format,
|
| 155 |
+
target="rel_xyxy",
|
| 156 |
+
height=input_height,
|
| 157 |
+
width=input_width,
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
bounding_boxes["boxes"] = _transform_xyxy(bounding_boxes, flips)
|
| 161 |
+
|
| 162 |
+
bounding_boxes = clip_to_image_size(
|
| 163 |
+
bounding_boxes=bounding_boxes,
|
| 164 |
+
height=input_height,
|
| 165 |
+
width=input_width,
|
| 166 |
+
bounding_box_format="xyxy",
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
bounding_boxes = convert_format(
|
| 170 |
+
bounding_boxes,
|
| 171 |
+
source="rel_xyxy",
|
| 172 |
+
target=self.bounding_box_format,
|
| 173 |
+
height=input_height,
|
| 174 |
+
width=input_width,
|
| 175 |
+
)
|
| 176 |
+
|
| 177 |
+
self.backend.reset()
|
| 178 |
+
|
| 179 |
+
return bounding_boxes
|
| 180 |
+
|
| 181 |
+
def transform_segmentation_masks(
|
| 182 |
+
self, segmentation_masks, transformation, training=True
|
| 183 |
+
):
|
| 184 |
+
return self.transform_images(
|
| 185 |
+
segmentation_masks, transformation, training=training
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
def _flip_inputs(self, inputs, transformation):
|
| 189 |
+
if transformation is None:
|
| 190 |
+
return inputs
|
| 191 |
+
|
| 192 |
+
flips = transformation["flips"]
|
| 193 |
+
inputs_shape = self.backend.shape(inputs)
|
| 194 |
+
unbatched = len(inputs_shape) == 3
|
| 195 |
+
if unbatched:
|
| 196 |
+
inputs = self.backend.numpy.expand_dims(inputs, axis=0)
|
| 197 |
+
|
| 198 |
+
flipped_outputs = inputs
|
| 199 |
+
if self.data_format == "channels_last":
|
| 200 |
+
horizontal_axis = -2
|
| 201 |
+
vertical_axis = -3
|
| 202 |
+
else:
|
| 203 |
+
horizontal_axis = -1
|
| 204 |
+
vertical_axis = -2
|
| 205 |
+
|
| 206 |
+
if self.mode == HORIZONTAL or self.mode == HORIZONTAL_AND_VERTICAL:
|
| 207 |
+
flipped_outputs = self.backend.numpy.where(
|
| 208 |
+
flips,
|
| 209 |
+
self.backend.numpy.flip(flipped_outputs, axis=horizontal_axis),
|
| 210 |
+
flipped_outputs,
|
| 211 |
+
)
|
| 212 |
+
if self.mode == VERTICAL or self.mode == HORIZONTAL_AND_VERTICAL:
|
| 213 |
+
flipped_outputs = self.backend.numpy.where(
|
| 214 |
+
flips,
|
| 215 |
+
self.backend.numpy.flip(flipped_outputs, axis=vertical_axis),
|
| 216 |
+
flipped_outputs,
|
| 217 |
+
)
|
| 218 |
+
if unbatched:
|
| 219 |
+
flipped_outputs = self.backend.numpy.squeeze(
|
| 220 |
+
flipped_outputs, axis=0
|
| 221 |
+
)
|
| 222 |
+
return flipped_outputs
|
| 223 |
+
|
| 224 |
+
def compute_output_shape(self, input_shape):
|
| 225 |
+
return input_shape
|
| 226 |
+
|
| 227 |
+
def get_config(self):
|
| 228 |
+
config = super().get_config()
|
| 229 |
+
config.update(
|
| 230 |
+
{
|
| 231 |
+
"seed": self.seed,
|
| 232 |
+
"mode": self.mode,
|
| 233 |
+
"data_format": self.data_format,
|
| 234 |
+
}
|
| 235 |
+
)
|
| 236 |
+
return config
|