Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/add.py +69 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/average.py +70 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/base_merge.py +281 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/concatenate.py +178 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/dot.py +376 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/maximum.py +67 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/minimum.py +67 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/multiply.py +91 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/subtract.py +82 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/batch_normalization.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/group_normalization.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/layer_normalization.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/spectral_normalization.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/unit_normalization.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/batch_normalization.py +352 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/group_normalization.py +240 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/layer_normalization.py +265 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/spectral_normalization.py +121 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/unit_normalization.py +63 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/average_pooling1d.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/average_pooling2d.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/average_pooling3d.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/base_global_pooling.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/base_pooling.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_average_pooling1d.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_average_pooling2d.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_average_pooling3d.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_max_pooling1d.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_max_pooling2d.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_max_pooling3d.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/max_pooling1d.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/max_pooling2d.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/max_pooling3d.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/average_pooling1d.py +92 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/average_pooling2d.py +109 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/average_pooling3d.py +85 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/base_global_pooling.py +49 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/base_pooling.py +81 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_average_pooling1d.py +86 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_average_pooling2d.py +68 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_average_pooling3d.py +69 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_max_pooling1d.py +66 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_max_pooling2d.py +68 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_max_pooling3d.py +69 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/max_pooling1d.py +93 -0
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/add.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import ops
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.layers.merging.base_merge import Merge
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@keras_export("keras.layers.Add")
|
| 7 |
+
class Add(Merge):
|
| 8 |
+
"""Performs elementwise addition operation.
|
| 9 |
+
|
| 10 |
+
It takes as input a list of tensors, all of the same shape,
|
| 11 |
+
and returns a single tensor (also of the same shape).
|
| 12 |
+
|
| 13 |
+
Examples:
|
| 14 |
+
|
| 15 |
+
>>> input_shape = (2, 3, 4)
|
| 16 |
+
>>> x1 = np.random.rand(*input_shape)
|
| 17 |
+
>>> x2 = np.random.rand(*input_shape)
|
| 18 |
+
>>> y = keras.layers.Add()([x1, x2])
|
| 19 |
+
|
| 20 |
+
Usage in a Keras model:
|
| 21 |
+
|
| 22 |
+
>>> input1 = keras.layers.Input(shape=(16,))
|
| 23 |
+
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
|
| 24 |
+
>>> input2 = keras.layers.Input(shape=(32,))
|
| 25 |
+
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
|
| 26 |
+
>>> # equivalent to `added = keras.layers.add([x1, x2])`
|
| 27 |
+
>>> added = keras.layers.Add()([x1, x2])
|
| 28 |
+
>>> out = keras.layers.Dense(4)(added)
|
| 29 |
+
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
|
| 30 |
+
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
def _merge_function(self, inputs):
|
| 34 |
+
output = inputs[0]
|
| 35 |
+
for i in range(1, len(inputs)):
|
| 36 |
+
output = ops.add(output, inputs[i])
|
| 37 |
+
return output
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
@keras_export("keras.layers.add")
|
| 41 |
+
def add(inputs, **kwargs):
|
| 42 |
+
"""Functional interface to the `keras.layers.Add` layer.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
inputs: A list of input tensors with the same shape.
|
| 46 |
+
**kwargs: Standard layer keyword arguments.
|
| 47 |
+
|
| 48 |
+
Returns:
|
| 49 |
+
A tensor as the sum of the inputs. It has the same shape as the inputs.
|
| 50 |
+
|
| 51 |
+
Examples:
|
| 52 |
+
|
| 53 |
+
>>> input_shape = (2, 3, 4)
|
| 54 |
+
>>> x1 = np.random.rand(*input_shape)
|
| 55 |
+
>>> x2 = np.random.rand(*input_shape)
|
| 56 |
+
>>> y = keras.layers.add([x1, x2])
|
| 57 |
+
|
| 58 |
+
Usage in a Keras model:
|
| 59 |
+
|
| 60 |
+
>>> input1 = keras.layers.Input(shape=(16,))
|
| 61 |
+
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
|
| 62 |
+
>>> input2 = keras.layers.Input(shape=(32,))
|
| 63 |
+
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
|
| 64 |
+
>>> added = keras.layers.add([x1, x2])
|
| 65 |
+
>>> out = keras.layers.Dense(4)(added)
|
| 66 |
+
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
|
| 67 |
+
|
| 68 |
+
"""
|
| 69 |
+
return Add(**kwargs)(inputs)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/average.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import ops
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.layers.merging.base_merge import Merge
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@keras_export("keras.layers.Average")
|
| 7 |
+
class Average(Merge):
|
| 8 |
+
"""Averages a list of inputs element-wise..
|
| 9 |
+
|
| 10 |
+
It takes as input a list of tensors, all of the same shape,
|
| 11 |
+
and returns a single tensor (also of the same shape).
|
| 12 |
+
|
| 13 |
+
Examples:
|
| 14 |
+
|
| 15 |
+
>>> input_shape = (2, 3, 4)
|
| 16 |
+
>>> x1 = np.random.rand(*input_shape)
|
| 17 |
+
>>> x2 = np.random.rand(*input_shape)
|
| 18 |
+
>>> y = keras.layers.Average()([x1, x2])
|
| 19 |
+
|
| 20 |
+
Usage in a Keras model:
|
| 21 |
+
|
| 22 |
+
>>> input1 = keras.layers.Input(shape=(16,))
|
| 23 |
+
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
|
| 24 |
+
>>> input2 = keras.layers.Input(shape=(32,))
|
| 25 |
+
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
|
| 26 |
+
>>> # equivalent to `y = keras.layers.average([x1, x2])`
|
| 27 |
+
>>> y = keras.layers.Average()([x1, x2])
|
| 28 |
+
>>> out = keras.layers.Dense(4)(y)
|
| 29 |
+
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
|
| 30 |
+
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
def _merge_function(self, inputs):
|
| 34 |
+
output = inputs[0]
|
| 35 |
+
for i in range(1, len(inputs)):
|
| 36 |
+
output = ops.add(output, inputs[i])
|
| 37 |
+
return output / len(inputs)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
@keras_export("keras.layers.average")
|
| 41 |
+
def average(inputs, **kwargs):
|
| 42 |
+
"""Functional interface to the `keras.layers.Average` layer.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
inputs: A list of input tensors , all of the same shape.
|
| 46 |
+
**kwargs: Standard layer keyword arguments.
|
| 47 |
+
|
| 48 |
+
Returns:
|
| 49 |
+
A tensor as the element-wise product of the inputs with the same
|
| 50 |
+
shape as the inputs.
|
| 51 |
+
|
| 52 |
+
Examples:
|
| 53 |
+
|
| 54 |
+
>>> input_shape = (2, 3, 4)
|
| 55 |
+
>>> x1 = np.random.rand(*input_shape)
|
| 56 |
+
>>> x2 = np.random.rand(*input_shape)
|
| 57 |
+
>>> y = keras.layers.average([x1, x2])
|
| 58 |
+
|
| 59 |
+
Usage in a Keras model:
|
| 60 |
+
|
| 61 |
+
>>> input1 = keras.layers.Input(shape=(16,))
|
| 62 |
+
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
|
| 63 |
+
>>> input2 = keras.layers.Input(shape=(32,))
|
| 64 |
+
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
|
| 65 |
+
>>> y = keras.layers.average([x1, x2])
|
| 66 |
+
>>> out = keras.layers.Dense(4)(y)
|
| 67 |
+
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
|
| 68 |
+
|
| 69 |
+
"""
|
| 70 |
+
return Average(**kwargs)(inputs)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/base_merge.py
ADDED
|
@@ -0,0 +1,281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src import ops
|
| 3 |
+
from keras.src.backend.common.keras_tensor import KerasTensor
|
| 4 |
+
from keras.src.layers.layer import Layer
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class Merge(Layer):
|
| 8 |
+
"""Generic merge layer for elementwise merge functions.
|
| 9 |
+
|
| 10 |
+
Used to implement `Sum`, `Average`, etc.
|
| 11 |
+
|
| 12 |
+
Args:
|
| 13 |
+
**kwargs: standard layer keyword arguments.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(self, **kwargs):
|
| 17 |
+
super().__init__(**kwargs)
|
| 18 |
+
self.supports_masking = True
|
| 19 |
+
|
| 20 |
+
def _merge_function(self, inputs):
|
| 21 |
+
raise NotImplementedError
|
| 22 |
+
|
| 23 |
+
def _apply_merge_op_and_or_mask(self, op_fn, inputs):
|
| 24 |
+
"""Merge a set of inputs by applying `op_fn` and ORing the masks.
|
| 25 |
+
|
| 26 |
+
We use this for `Minimum` and `Maximum` as it handles the fact that
|
| 27 |
+
there is no identity element. If applicable, the mask obtained by ORing
|
| 28 |
+
all masks is set on the output.
|
| 29 |
+
|
| 30 |
+
Args:
|
| 31 |
+
op_fn: binary operation to apply to tensor pair.
|
| 32 |
+
inputs: array of tensors to apply operation on.
|
| 33 |
+
"""
|
| 34 |
+
output = None
|
| 35 |
+
output_mask = None
|
| 36 |
+
|
| 37 |
+
for x in inputs:
|
| 38 |
+
mask = backend.get_keras_mask(x)
|
| 39 |
+
if mask is not None:
|
| 40 |
+
mask = ops.broadcast_to(ops.expand_dims(mask, -1), ops.shape(x))
|
| 41 |
+
if output is None:
|
| 42 |
+
output = x
|
| 43 |
+
output_mask = mask
|
| 44 |
+
continue
|
| 45 |
+
if mask is not None:
|
| 46 |
+
x = ops.where(mask, x, output)
|
| 47 |
+
if output_mask is not None:
|
| 48 |
+
output = ops.where(output_mask, output, x)
|
| 49 |
+
if mask is not None and output_mask is not None:
|
| 50 |
+
output_mask = ops.logical_or(output_mask, mask)
|
| 51 |
+
else:
|
| 52 |
+
output_mask = None
|
| 53 |
+
output = op_fn(output, x)
|
| 54 |
+
|
| 55 |
+
if output_mask is not None:
|
| 56 |
+
output_mask = ops.any(output_mask, axis=-1, keepdims=False)
|
| 57 |
+
backend.set_keras_mask(output, output_mask)
|
| 58 |
+
return output
|
| 59 |
+
|
| 60 |
+
def _compute_elemwise_op_output_shape(self, shape1, shape2):
|
| 61 |
+
"""Computes the shape of the resultant of an elementwise operation.
|
| 62 |
+
|
| 63 |
+
Args:
|
| 64 |
+
shape1: Tuple or None. Shape of the first tensor
|
| 65 |
+
shape2: Tuple or None. Shape of the second tensor
|
| 66 |
+
|
| 67 |
+
Returns:
|
| 68 |
+
Expected output shape when an element-wise operation is
|
| 69 |
+
carried out on 2 tensors with shapes shape1 and shape2.
|
| 70 |
+
tuple or None.
|
| 71 |
+
|
| 72 |
+
Raises:
|
| 73 |
+
ValueError: If shape1 and shape2 are not compatible for
|
| 74 |
+
element-wise operations.
|
| 75 |
+
"""
|
| 76 |
+
|
| 77 |
+
if None in [shape1, shape2]:
|
| 78 |
+
return None
|
| 79 |
+
elif len(shape1) < len(shape2):
|
| 80 |
+
return self._compute_elemwise_op_output_shape(shape2, shape1)
|
| 81 |
+
elif not shape2:
|
| 82 |
+
return shape1
|
| 83 |
+
output_shape = list(shape1[: -len(shape2)])
|
| 84 |
+
for i, j in zip(shape1[-len(shape2) :], shape2):
|
| 85 |
+
if i is None or j is None:
|
| 86 |
+
output_shape.append(None)
|
| 87 |
+
elif i == 1:
|
| 88 |
+
output_shape.append(j)
|
| 89 |
+
elif j == 1:
|
| 90 |
+
output_shape.append(i)
|
| 91 |
+
else:
|
| 92 |
+
if i != j:
|
| 93 |
+
raise ValueError(
|
| 94 |
+
"Inputs have incompatible shapes. "
|
| 95 |
+
f"Received shapes {shape1} and {shape2}"
|
| 96 |
+
)
|
| 97 |
+
output_shape.append(i)
|
| 98 |
+
return tuple(output_shape)
|
| 99 |
+
|
| 100 |
+
def build(self, input_shape):
|
| 101 |
+
# Used purely for shape validation.
|
| 102 |
+
if not isinstance(input_shape[0], (tuple, list)):
|
| 103 |
+
raise ValueError(
|
| 104 |
+
"A merge layer should be called on a list of inputs. "
|
| 105 |
+
f"Received: input_shape={input_shape} (not a list of shapes)"
|
| 106 |
+
)
|
| 107 |
+
if len(input_shape) < 1:
|
| 108 |
+
raise ValueError(
|
| 109 |
+
"A merge layer should be called "
|
| 110 |
+
"on a list of at least 1 input. "
|
| 111 |
+
f"Received {len(input_shape)} inputs. "
|
| 112 |
+
f"Full input_shape received: {input_shape}"
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
batch_sizes = {s[0] for s in input_shape if s} - {None}
|
| 116 |
+
if len(batch_sizes) > 1:
|
| 117 |
+
raise ValueError(
|
| 118 |
+
"Cannot merge tensors with different batch sizes. "
|
| 119 |
+
f"Received tensors with shapes {input_shape}"
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
if input_shape[0] is None:
|
| 123 |
+
output_shape = None
|
| 124 |
+
else:
|
| 125 |
+
output_shape = input_shape[0][1:]
|
| 126 |
+
|
| 127 |
+
for i in range(1, len(input_shape)):
|
| 128 |
+
if input_shape[i] is None:
|
| 129 |
+
shape = None
|
| 130 |
+
else:
|
| 131 |
+
shape = input_shape[i][1:]
|
| 132 |
+
output_shape = self._compute_elemwise_op_output_shape(
|
| 133 |
+
output_shape, shape
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
# If the inputs have different ranks, we have to reshape them
|
| 137 |
+
# to make them broadcastable.
|
| 138 |
+
if None not in input_shape and len(set(map(len, input_shape))) == 1:
|
| 139 |
+
self._reshape_required = False
|
| 140 |
+
else:
|
| 141 |
+
self._reshape_required = True
|
| 142 |
+
self.built = True
|
| 143 |
+
|
| 144 |
+
def call(self, inputs):
|
| 145 |
+
if not isinstance(inputs, (list, tuple)):
|
| 146 |
+
raise ValueError(
|
| 147 |
+
"A merge layer should be called on a list of inputs. "
|
| 148 |
+
f"Received: inputs={inputs} (not a list of tensors)"
|
| 149 |
+
)
|
| 150 |
+
if self._reshape_required:
|
| 151 |
+
reshaped_inputs = []
|
| 152 |
+
input_ndims = list(map(ops.ndim, inputs))
|
| 153 |
+
if None not in input_ndims:
|
| 154 |
+
# If ranks of all inputs are available,
|
| 155 |
+
# we simply expand each of them at axis=1
|
| 156 |
+
# until all of them have the same rank.
|
| 157 |
+
max_ndim = max(input_ndims)
|
| 158 |
+
for x in inputs:
|
| 159 |
+
x_ndim = ops.ndim(x)
|
| 160 |
+
for _ in range(max_ndim - x_ndim):
|
| 161 |
+
x = ops.expand_dims(x, axis=1)
|
| 162 |
+
reshaped_inputs.append(x)
|
| 163 |
+
return self._merge_function(reshaped_inputs)
|
| 164 |
+
else:
|
| 165 |
+
# Transpose all inputs so that batch size is the last dimension.
|
| 166 |
+
# (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... ,
|
| 167 |
+
# batch_size)
|
| 168 |
+
transposed = False
|
| 169 |
+
for x in inputs:
|
| 170 |
+
x_ndim = ops.ndim(x)
|
| 171 |
+
|
| 172 |
+
if x_ndim is None:
|
| 173 |
+
x_shape = ops.shape(x)
|
| 174 |
+
batch_size = x_shape[0]
|
| 175 |
+
|
| 176 |
+
new_shape = backend.concatenate(
|
| 177 |
+
[x_shape[1:], ops.expand_dims(batch_size, axis=-1)]
|
| 178 |
+
)
|
| 179 |
+
x_transposed = ops.reshape(
|
| 180 |
+
x,
|
| 181 |
+
ops.stack(
|
| 182 |
+
[batch_size, ops.prod(x_shape[1:])],
|
| 183 |
+
axis=0,
|
| 184 |
+
),
|
| 185 |
+
)
|
| 186 |
+
x_transposed = ops.transpose(x_transposed, perm=(1, 0))
|
| 187 |
+
x_transposed = ops.reshape(x_transposed, new_shape)
|
| 188 |
+
|
| 189 |
+
reshaped_inputs.append(x_transposed)
|
| 190 |
+
transposed = True
|
| 191 |
+
|
| 192 |
+
elif x_ndim > 1:
|
| 193 |
+
dims = list(range(1, x_ndim)) + [0]
|
| 194 |
+
reshaped_inputs.append(ops.transpose(x, perm=dims))
|
| 195 |
+
print(dims)
|
| 196 |
+
transposed = True
|
| 197 |
+
else:
|
| 198 |
+
# We don't transpose inputs if they are 1D vectors or
|
| 199 |
+
# scalars.
|
| 200 |
+
reshaped_inputs.append(x)
|
| 201 |
+
|
| 202 |
+
y = self._merge_function(reshaped_inputs)
|
| 203 |
+
y_ndim = ops.ndim(y)
|
| 204 |
+
|
| 205 |
+
if transposed:
|
| 206 |
+
# If inputs have been transposed, we have to transpose the
|
| 207 |
+
# output too.
|
| 208 |
+
if y_ndim is None:
|
| 209 |
+
y_shape = ops.shape(y)
|
| 210 |
+
y_ndim = ops.shape(y_shape)[0]
|
| 211 |
+
batch_size = y_shape[y_ndim - 1]
|
| 212 |
+
new_shape = ops.concatenate(
|
| 213 |
+
[
|
| 214 |
+
ops.expand_dims(batch_size, axis=-1),
|
| 215 |
+
y_shape[: y_ndim - 1],
|
| 216 |
+
]
|
| 217 |
+
)
|
| 218 |
+
y = ops.reshape(y, (-1, batch_size))
|
| 219 |
+
y = ops.transpose(y, perm=(1, 0))
|
| 220 |
+
y = ops.reshape(y, new_shape)
|
| 221 |
+
elif y_ndim > 1:
|
| 222 |
+
dims = [y_ndim - 1] + list(range(y_ndim - 1))
|
| 223 |
+
y = ops.transpose(y, perm=dims)
|
| 224 |
+
return y
|
| 225 |
+
else:
|
| 226 |
+
return self._merge_function(inputs)
|
| 227 |
+
|
| 228 |
+
def compute_output_shape(self, input_shape):
|
| 229 |
+
if input_shape[0] is None:
|
| 230 |
+
output_shape = None
|
| 231 |
+
else:
|
| 232 |
+
output_shape = input_shape[0][1:]
|
| 233 |
+
|
| 234 |
+
for i in range(1, len(input_shape)):
|
| 235 |
+
if input_shape[i] is None:
|
| 236 |
+
shape = None
|
| 237 |
+
else:
|
| 238 |
+
shape = input_shape[i][1:]
|
| 239 |
+
output_shape = self._compute_elemwise_op_output_shape(
|
| 240 |
+
output_shape, shape
|
| 241 |
+
)
|
| 242 |
+
batch_sizes = {s[0] for s in input_shape if s is not None} - {None}
|
| 243 |
+
if len(batch_sizes) == 1:
|
| 244 |
+
output_shape = (list(batch_sizes)[0],) + output_shape
|
| 245 |
+
else:
|
| 246 |
+
output_shape = (None,) + output_shape
|
| 247 |
+
return output_shape
|
| 248 |
+
|
| 249 |
+
def compute_output_spec(self, inputs):
|
| 250 |
+
output_shape = self.compute_output_shape([x.shape for x in inputs])
|
| 251 |
+
output_sparse = all(x.sparse for x in inputs)
|
| 252 |
+
return KerasTensor(
|
| 253 |
+
output_shape, dtype=self.compute_dtype, sparse=output_sparse
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
def compute_mask(self, inputs, mask=None):
|
| 257 |
+
if mask is None:
|
| 258 |
+
return None
|
| 259 |
+
if not isinstance(mask, (tuple, list)):
|
| 260 |
+
raise ValueError(f"`mask` should be a list. Received: mask={mask}")
|
| 261 |
+
if not isinstance(inputs, (tuple, list)):
|
| 262 |
+
raise ValueError(
|
| 263 |
+
f"`inputs` should be a list. Received: inputs={inputs}"
|
| 264 |
+
)
|
| 265 |
+
if len(mask) != len(inputs):
|
| 266 |
+
raise ValueError(
|
| 267 |
+
"The lists `inputs` and `mask` should have the same length. "
|
| 268 |
+
f"Received: inputs={inputs} of length {len(inputs)}, and "
|
| 269 |
+
f"mask={mask} of length {len(mask)}"
|
| 270 |
+
)
|
| 271 |
+
# Default implementation does an OR between the masks, which works
|
| 272 |
+
# for `Add`, `Subtract`, `Average`, `Maximum`, `Minimum`, `Multiply`.
|
| 273 |
+
if any(m is None for m in mask):
|
| 274 |
+
return None
|
| 275 |
+
output_mask = mask[0]
|
| 276 |
+
for m in mask[1:]:
|
| 277 |
+
output_mask = ops.logical_or(output_mask, m)
|
| 278 |
+
return output_mask
|
| 279 |
+
|
| 280 |
+
def get_config(self):
|
| 281 |
+
return super().get_config()
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/concatenate.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
|
| 3 |
+
from keras.src import ops
|
| 4 |
+
from keras.src.api_export import keras_export
|
| 5 |
+
from keras.src.layers.merging.base_merge import Merge
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@keras_export("keras.layers.Concatenate")
|
| 9 |
+
class Concatenate(Merge):
|
| 10 |
+
"""Concatenates a list of inputs.
|
| 11 |
+
|
| 12 |
+
It takes as input a list of tensors, all of the same shape except
|
| 13 |
+
for the concatenation axis, and returns a single tensor that is the
|
| 14 |
+
concatenation of all inputs.
|
| 15 |
+
|
| 16 |
+
Examples:
|
| 17 |
+
|
| 18 |
+
>>> x = np.arange(20).reshape(2, 2, 5)
|
| 19 |
+
>>> y = np.arange(20, 30).reshape(2, 1, 5)
|
| 20 |
+
>>> keras.layers.Concatenate(axis=1)([x, y])
|
| 21 |
+
|
| 22 |
+
Usage in a Keras model:
|
| 23 |
+
|
| 24 |
+
>>> x1 = keras.layers.Dense(8)(np.arange(10).reshape(5, 2))
|
| 25 |
+
>>> x2 = keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))
|
| 26 |
+
>>> y = keras.layers.Concatenate()([x1, x2])
|
| 27 |
+
|
| 28 |
+
Args:
|
| 29 |
+
axis: Axis along which to concatenate.
|
| 30 |
+
**kwargs: Standard layer keyword arguments.
|
| 31 |
+
|
| 32 |
+
Returns:
|
| 33 |
+
A tensor, the concatenation of the inputs alongside axis `axis`.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def __init__(self, axis=-1, **kwargs):
|
| 37 |
+
super().__init__(**kwargs)
|
| 38 |
+
self.axis = axis
|
| 39 |
+
self.supports_masking = True
|
| 40 |
+
self._reshape_required = False
|
| 41 |
+
|
| 42 |
+
def build(self, input_shape):
|
| 43 |
+
# Used purely for shape validation.
|
| 44 |
+
if len(input_shape) < 1 or not isinstance(
|
| 45 |
+
input_shape[0], (tuple, list)
|
| 46 |
+
):
|
| 47 |
+
raise ValueError(
|
| 48 |
+
"A `Concatenate` layer should be called on a list of "
|
| 49 |
+
f"at least 1 input. Received: input_shape={input_shape}"
|
| 50 |
+
)
|
| 51 |
+
if all(shape is None for shape in input_shape):
|
| 52 |
+
return
|
| 53 |
+
|
| 54 |
+
reduced_inputs_shapes = [list(shape) for shape in input_shape]
|
| 55 |
+
reduced_inputs_shapes_copy = copy.copy(reduced_inputs_shapes)
|
| 56 |
+
shape_set = set()
|
| 57 |
+
for i in range(len(reduced_inputs_shapes_copy)):
|
| 58 |
+
# Convert self.axis to positive axis for each input
|
| 59 |
+
# in case self.axis is a negative number
|
| 60 |
+
concat_axis = self.axis % len(reduced_inputs_shapes_copy[i])
|
| 61 |
+
# Skip batch axis.
|
| 62 |
+
for axis, axis_value in enumerate(
|
| 63 |
+
reduced_inputs_shapes_copy, start=1
|
| 64 |
+
):
|
| 65 |
+
# Remove squeezable axes (axes with value of 1)
|
| 66 |
+
# if not in the axis that will be used for concatenation
|
| 67 |
+
# otherwise leave it.
|
| 68 |
+
# This approach allows building the layer,
|
| 69 |
+
# but if tensor shapes are not the same when
|
| 70 |
+
# calling, an exception will be raised.
|
| 71 |
+
if axis != concat_axis and axis_value == 1:
|
| 72 |
+
del reduced_inputs_shapes[i][axis]
|
| 73 |
+
|
| 74 |
+
if len(reduced_inputs_shapes[i]) > self.axis:
|
| 75 |
+
del reduced_inputs_shapes[i][self.axis]
|
| 76 |
+
shape_set.add(tuple(reduced_inputs_shapes[i]))
|
| 77 |
+
|
| 78 |
+
if len(shape_set) != 1:
|
| 79 |
+
err_msg = (
|
| 80 |
+
"A `Concatenate` layer requires inputs with matching shapes "
|
| 81 |
+
"except for the concatenation axis. "
|
| 82 |
+
f"Received: input_shape={input_shape}"
|
| 83 |
+
)
|
| 84 |
+
# Make sure all the shapes have same ranks.
|
| 85 |
+
ranks = set(len(shape) for shape in shape_set)
|
| 86 |
+
if len(ranks) != 1:
|
| 87 |
+
raise ValueError(err_msg)
|
| 88 |
+
# Get the only rank for the set.
|
| 89 |
+
(rank,) = ranks
|
| 90 |
+
for axis in range(rank):
|
| 91 |
+
# Skip the Nones in the shape since they are dynamic, also the
|
| 92 |
+
# axis for concat has been removed above.
|
| 93 |
+
unique_dims = set(
|
| 94 |
+
shape[axis]
|
| 95 |
+
for shape in shape_set
|
| 96 |
+
if shape[axis] is not None
|
| 97 |
+
)
|
| 98 |
+
if len(unique_dims) > 1:
|
| 99 |
+
raise ValueError(err_msg)
|
| 100 |
+
self.built = True
|
| 101 |
+
|
| 102 |
+
def _merge_function(self, inputs):
|
| 103 |
+
return ops.concatenate(inputs, axis=self.axis)
|
| 104 |
+
|
| 105 |
+
def compute_output_shape(self, input_shape):
|
| 106 |
+
if (not isinstance(input_shape, (tuple, list))) or (
|
| 107 |
+
not isinstance(input_shape[0], (tuple, list))
|
| 108 |
+
):
|
| 109 |
+
raise ValueError(
|
| 110 |
+
"A `Concatenate` layer should be called on a list of inputs. "
|
| 111 |
+
f"Received: input_shape={input_shape}"
|
| 112 |
+
)
|
| 113 |
+
input_shapes = input_shape
|
| 114 |
+
output_shape = list(input_shapes[0])
|
| 115 |
+
|
| 116 |
+
for shape in input_shapes[1:]:
|
| 117 |
+
if output_shape[self.axis] is None or shape[self.axis] is None:
|
| 118 |
+
output_shape[self.axis] = None
|
| 119 |
+
break
|
| 120 |
+
output_shape[self.axis] += shape[self.axis]
|
| 121 |
+
return tuple(output_shape)
|
| 122 |
+
|
| 123 |
+
def compute_mask(self, inputs, mask=None):
|
| 124 |
+
if mask is None:
|
| 125 |
+
return None
|
| 126 |
+
if not isinstance(mask, (tuple, list)):
|
| 127 |
+
raise ValueError(f"`mask` should be a list. Received mask={mask}")
|
| 128 |
+
if not isinstance(inputs, (tuple, list)):
|
| 129 |
+
raise ValueError(
|
| 130 |
+
f"`inputs` should be a list. Received: inputs={inputs}"
|
| 131 |
+
)
|
| 132 |
+
if len(mask) != len(inputs):
|
| 133 |
+
raise ValueError(
|
| 134 |
+
"The lists `inputs` and `mask` should have the same length. "
|
| 135 |
+
f"Received: inputs={inputs} of length {len(inputs)}, and "
|
| 136 |
+
f"mask={mask} of length {len(mask)}"
|
| 137 |
+
)
|
| 138 |
+
if all(m is None for m in mask):
|
| 139 |
+
return None
|
| 140 |
+
# Make a list of masks while making sure
|
| 141 |
+
# the dimensionality of each mask
|
| 142 |
+
# is the same as the corresponding input.
|
| 143 |
+
masks = []
|
| 144 |
+
for input_i, mask_i in zip(inputs, mask):
|
| 145 |
+
if mask_i is None:
|
| 146 |
+
# Input is unmasked. Append all 1s to masks,
|
| 147 |
+
masks.append(ops.ones_like(input_i, dtype="bool"))
|
| 148 |
+
elif mask_i.ndim < input_i.ndim:
|
| 149 |
+
# Mask is smaller than the input, expand it
|
| 150 |
+
masks.append(
|
| 151 |
+
ops.broadcast_to(
|
| 152 |
+
ops.expand_dims(mask_i, axis=-1), ops.shape(input_i)
|
| 153 |
+
)
|
| 154 |
+
)
|
| 155 |
+
else:
|
| 156 |
+
masks.append(mask_i)
|
| 157 |
+
concatenated = ops.concatenate(masks, axis=self.axis)
|
| 158 |
+
return ops.any(concatenated, axis=-1, keepdims=False)
|
| 159 |
+
|
| 160 |
+
def get_config(self):
|
| 161 |
+
config = {"axis": self.axis}
|
| 162 |
+
base_config = super().get_config()
|
| 163 |
+
return dict(list(base_config.items()) + list(config.items()))
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
@keras_export("keras.layers.concatenate")
|
| 167 |
+
def concatenate(inputs, axis=-1, **kwargs):
|
| 168 |
+
"""Functional interface to the `Concatenate` layer.
|
| 169 |
+
|
| 170 |
+
Args:
|
| 171 |
+
inputs: A list of input tensors.
|
| 172 |
+
axis: Concatenation axis.
|
| 173 |
+
**kwargs: Standard layer keyword arguments.
|
| 174 |
+
|
| 175 |
+
Returns:
|
| 176 |
+
A tensor, the concatenation of the inputs alongside axis `axis`.
|
| 177 |
+
"""
|
| 178 |
+
return Concatenate(axis=axis, **kwargs)(inputs)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/dot.py
ADDED
|
@@ -0,0 +1,376 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import ops
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.layers.merging.base_merge import Merge
|
| 4 |
+
from keras.src.utils.numerical_utils import normalize
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def batch_dot(x, y, axes=None):
|
| 8 |
+
"""Batchwise dot product.
|
| 9 |
+
|
| 10 |
+
`batch_dot` is used to compute dot product of `x` and `y` when
|
| 11 |
+
`x` and `y` are data in batch, i.e. in a shape of `(batch_size, :)`.
|
| 12 |
+
`batch_dot` results in a tensor or variable with less dimensions
|
| 13 |
+
than the input. If the number of dimensions is reduced to 1,
|
| 14 |
+
we use `expand_dims` to make sure that ndim is at least 2.
|
| 15 |
+
|
| 16 |
+
Shape inference:
|
| 17 |
+
|
| 18 |
+
Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`.
|
| 19 |
+
If `axes` is (1, 2), to find the output shape of resultant tensor,
|
| 20 |
+
loop through each dimension in `x`'s shape and `y`'s shape:
|
| 21 |
+
|
| 22 |
+
* `x.shape[0]` : 100 : append to output shape
|
| 23 |
+
* `x.shape[1]` : 20 : do not append to output shape, dimension 1 of
|
| 24 |
+
`x` has been summed over. (`dot_axes[0]` = 1)
|
| 25 |
+
* `y.shape[0]` : 100 : do not append to output shape, always ignore
|
| 26 |
+
first dimension of `y`
|
| 27 |
+
* `y.shape[1]` : 30 : append to output shape
|
| 28 |
+
* `y.shape[2]` : 20 : do not append to output shape, dimension 2 of
|
| 29 |
+
`y` has been summed over.
|
| 30 |
+
(`dot_axes[1]` = 2) `output_shape` = `(100, 30)`
|
| 31 |
+
|
| 32 |
+
Example:
|
| 33 |
+
|
| 34 |
+
>>> x_batch = np.ones(shape=(32, 20, 1))
|
| 35 |
+
>>> y_batch = np.ones(shape=(32, 30, 20))
|
| 36 |
+
>>> xy_batch_dot = batch_dot(x_batch, y_batch, axes=(1, 2))
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
x: Keras tensor or variable with `ndim >= 2`.
|
| 40 |
+
y: Keras tensor or variable with `ndim >= 2`.
|
| 41 |
+
axes: Tuple or list of integers with target dimensions, or single
|
| 42 |
+
integer. The sizes of `x.shape[axes[0]]` and `y.shape[axes[1]]`
|
| 43 |
+
should be equal.
|
| 44 |
+
|
| 45 |
+
Returns:
|
| 46 |
+
A tensor with shape equal to the concatenation of `x`'s shape
|
| 47 |
+
(less the dimension that was summed over) and `y`'s shape (less the
|
| 48 |
+
batch dimension and the dimension that was summed over). If the final
|
| 49 |
+
rank is 1, we reshape it to `(batch_size, 1)`.
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
x_shape = x.shape
|
| 53 |
+
y_shape = y.shape
|
| 54 |
+
|
| 55 |
+
x_ndim = len(x_shape)
|
| 56 |
+
y_ndim = len(y_shape)
|
| 57 |
+
|
| 58 |
+
if x_ndim < 2 or y_ndim < 2:
|
| 59 |
+
raise ValueError(
|
| 60 |
+
f"Cannot do batch_dot on inputs "
|
| 61 |
+
f"with rank < 2. "
|
| 62 |
+
f"Received inputs with shapes "
|
| 63 |
+
f"{x_shape} and {y_shape}."
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
x_batch_size = x_shape[0]
|
| 67 |
+
y_batch_size = y_shape[0]
|
| 68 |
+
|
| 69 |
+
if x_batch_size is not None and y_batch_size is not None:
|
| 70 |
+
if x_batch_size != y_batch_size:
|
| 71 |
+
raise ValueError(
|
| 72 |
+
f"Cannot do batch_dot on inputs "
|
| 73 |
+
f"with different batch sizes. "
|
| 74 |
+
f"Received inputs with shapes "
|
| 75 |
+
f"{x_shape} and {y_shape}."
|
| 76 |
+
)
|
| 77 |
+
if isinstance(axes, int):
|
| 78 |
+
axes = [axes, axes]
|
| 79 |
+
|
| 80 |
+
if axes is None:
|
| 81 |
+
if y_ndim == 2:
|
| 82 |
+
axes = [x_ndim - 1, y_ndim - 1]
|
| 83 |
+
else:
|
| 84 |
+
axes = [x_ndim - 1, y_ndim - 2]
|
| 85 |
+
|
| 86 |
+
if any(isinstance(a, (list, tuple)) for a in axes):
|
| 87 |
+
raise ValueError(
|
| 88 |
+
f"Multiple target dimensions are not supported. "
|
| 89 |
+
f"Expected: None, int, (int, int), "
|
| 90 |
+
f"Provided: {axes} "
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
# if tuple, convert to list.
|
| 94 |
+
axes = list(axes)
|
| 95 |
+
|
| 96 |
+
# convert negative indices.
|
| 97 |
+
if axes[0] < 0:
|
| 98 |
+
axes[0] += x_ndim
|
| 99 |
+
if axes[1] < 0:
|
| 100 |
+
axes[1] += y_ndim
|
| 101 |
+
|
| 102 |
+
# sanity checks
|
| 103 |
+
if 0 in axes:
|
| 104 |
+
raise ValueError(
|
| 105 |
+
"Cannot perform batch_dot over axis 0. "
|
| 106 |
+
"If your inputs are not batched, "
|
| 107 |
+
"add a dummy batch dimension to your "
|
| 108 |
+
"inputs using keras.ops.expand_dims(x, 0)"
|
| 109 |
+
)
|
| 110 |
+
a0, a1 = axes
|
| 111 |
+
d1 = x_shape[a0]
|
| 112 |
+
d2 = y_shape[a1]
|
| 113 |
+
|
| 114 |
+
if d1 is not None and d2 is not None and d1 != d2:
|
| 115 |
+
raise ValueError(
|
| 116 |
+
f"Cannot do batch_dot on inputs with shapes "
|
| 117 |
+
f"{x_shape} and {y_shape} with axes={axes}. "
|
| 118 |
+
f"x.shape[{axes[0]}] != y.shape[{axes[1]}] ({d1} != {d2})."
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
# backup ndims. Need them later.
|
| 122 |
+
orig_x_ndim = x_ndim
|
| 123 |
+
orig_y_ndim = y_ndim
|
| 124 |
+
|
| 125 |
+
# if rank is 2, expand to 3.
|
| 126 |
+
if x_ndim == 2:
|
| 127 |
+
x = ops.expand_dims(x, 1)
|
| 128 |
+
a0 += 1
|
| 129 |
+
x_ndim += 1
|
| 130 |
+
if y_ndim == 2:
|
| 131 |
+
y = ops.expand_dims(y, 2)
|
| 132 |
+
y_ndim += 1
|
| 133 |
+
|
| 134 |
+
# bring x's dimension to be reduced to last axis.
|
| 135 |
+
if a0 != x_ndim - 1:
|
| 136 |
+
pattern = list(range(x_ndim))
|
| 137 |
+
for i in range(a0, x_ndim - 1):
|
| 138 |
+
pattern[i] = pattern[i + 1]
|
| 139 |
+
pattern[-1] = a0
|
| 140 |
+
x = ops.transpose(x, pattern)
|
| 141 |
+
|
| 142 |
+
# bring y's dimension to be reduced to axis 1.
|
| 143 |
+
if a1 != 1:
|
| 144 |
+
pattern = list(range(y_ndim))
|
| 145 |
+
for i in range(a1, 1, -1):
|
| 146 |
+
pattern[i] = pattern[i - 1]
|
| 147 |
+
pattern[1] = a1
|
| 148 |
+
y = ops.transpose(y, pattern)
|
| 149 |
+
|
| 150 |
+
# normalize both inputs to rank 3.
|
| 151 |
+
if x_ndim > 3:
|
| 152 |
+
# squash middle dimensions of x.
|
| 153 |
+
x_shape = ops.shape(x)
|
| 154 |
+
x_mid_dims = x_shape[1:-1]
|
| 155 |
+
x_squashed_shape = (x_shape[0], -1, x_shape[-1])
|
| 156 |
+
x = ops.reshape(x, x_squashed_shape)
|
| 157 |
+
x_squashed = True
|
| 158 |
+
else:
|
| 159 |
+
x_squashed = False
|
| 160 |
+
|
| 161 |
+
if y_ndim > 3:
|
| 162 |
+
# squash trailing dimensions of y.
|
| 163 |
+
y_shape = ops.shape(y)
|
| 164 |
+
y_trail_dims = y_shape[2:]
|
| 165 |
+
y_squashed_shape = (y_shape[0], y_shape[1], -1)
|
| 166 |
+
y = ops.reshape(y, y_squashed_shape)
|
| 167 |
+
y_squashed = True
|
| 168 |
+
else:
|
| 169 |
+
y_squashed = False
|
| 170 |
+
|
| 171 |
+
result = ops.matmul(x, y)
|
| 172 |
+
|
| 173 |
+
# if inputs were squashed, we have to reshape the matmul output.
|
| 174 |
+
output_shape = ops.shape(result)
|
| 175 |
+
do_reshape = False
|
| 176 |
+
|
| 177 |
+
if x_squashed:
|
| 178 |
+
output_shape = output_shape[:1] + x_mid_dims + output_shape[-1:]
|
| 179 |
+
do_reshape = True
|
| 180 |
+
|
| 181 |
+
if y_squashed:
|
| 182 |
+
output_shape = output_shape[:-1] + y_trail_dims
|
| 183 |
+
do_reshape = True
|
| 184 |
+
|
| 185 |
+
if do_reshape:
|
| 186 |
+
result = ops.reshape(result, output_shape)
|
| 187 |
+
|
| 188 |
+
# if the inputs were originally rank 2, we remove the added 1 dim.
|
| 189 |
+
if orig_x_ndim == 2:
|
| 190 |
+
result = ops.squeeze(result, 1)
|
| 191 |
+
elif orig_y_ndim == 2:
|
| 192 |
+
result = ops.squeeze(result, -1)
|
| 193 |
+
|
| 194 |
+
return result
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
@keras_export("keras.layers.Dot")
|
| 198 |
+
class Dot(Merge):
|
| 199 |
+
"""Computes element-wise dot product of two tensors.
|
| 200 |
+
|
| 201 |
+
It takes a list of inputs of size 2, and the axes
|
| 202 |
+
corresponding to each input along with the dot product
|
| 203 |
+
is to be performed.
|
| 204 |
+
|
| 205 |
+
Let's say `x` and `y` are the two input tensors with shapes
|
| 206 |
+
`(2, 3, 5)` and `(2, 10, 3)`. The batch dimension should be
|
| 207 |
+
of same size for both the inputs, and `axes` should correspond
|
| 208 |
+
to the dimensions that have the same size in the corresponding
|
| 209 |
+
inputs. e.g. with `axes=(1, 2)`, the dot product of `x`, and `y`
|
| 210 |
+
will result in a tensor with shape `(2, 5, 10)`
|
| 211 |
+
|
| 212 |
+
Example:
|
| 213 |
+
|
| 214 |
+
>>> x = np.arange(10).reshape(1, 5, 2)
|
| 215 |
+
>>> y = np.arange(10, 20).reshape(1, 2, 5)
|
| 216 |
+
>>> keras.layers.Dot(axes=(1, 2))([x, y])
|
| 217 |
+
|
| 218 |
+
Usage in a Keras model:
|
| 219 |
+
|
| 220 |
+
>>> x1 = keras.layers.Dense(8)(np.arange(10).reshape(5, 2))
|
| 221 |
+
>>> x2 = keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))
|
| 222 |
+
>>> y = keras.layers.Dot(axes=1)([x1, x2])
|
| 223 |
+
|
| 224 |
+
Args:
|
| 225 |
+
axes: Integer or tuple of integers, axis or axes along which to
|
| 226 |
+
take the dot product. If a tuple, should be two integers
|
| 227 |
+
corresponding to the desired axis from the first input and the
|
| 228 |
+
desired axis from the second input, respectively. Note that the
|
| 229 |
+
size of the two selected axes must match.
|
| 230 |
+
normalize: Whether to L2-normalize samples along the dot product axis
|
| 231 |
+
before taking the dot product. If set to `True`, then
|
| 232 |
+
the output of the dot product is the cosine proximity
|
| 233 |
+
between the two samples.
|
| 234 |
+
**kwargs: Standard layer keyword arguments.
|
| 235 |
+
|
| 236 |
+
Returns:
|
| 237 |
+
A tensor, the dot product of the samples from the inputs.
|
| 238 |
+
"""
|
| 239 |
+
|
| 240 |
+
def __init__(self, axes, normalize=False, **kwargs):
|
| 241 |
+
super().__init__(**kwargs)
|
| 242 |
+
if not isinstance(axes, int):
|
| 243 |
+
if not isinstance(axes, (list, tuple)):
|
| 244 |
+
raise TypeError(
|
| 245 |
+
f"Invalid type for argument `axes`: it should be "
|
| 246 |
+
f"a list or an int. Received: axes={axes}"
|
| 247 |
+
)
|
| 248 |
+
if len(axes) != 2:
|
| 249 |
+
raise ValueError(
|
| 250 |
+
f"Invalid format for argument `axes`: it should contain "
|
| 251 |
+
f"two elements. Received: axes={axes}"
|
| 252 |
+
)
|
| 253 |
+
if not isinstance(axes[0], int) or not isinstance(axes[1], int):
|
| 254 |
+
raise ValueError(
|
| 255 |
+
f"Invalid format for argument `axes`: list elements should "
|
| 256 |
+
f"be integers. Received: axes={axes}"
|
| 257 |
+
)
|
| 258 |
+
self.axes = axes
|
| 259 |
+
self.normalize = normalize
|
| 260 |
+
self.supports_masking = True
|
| 261 |
+
self._reshape_required = False
|
| 262 |
+
|
| 263 |
+
def build(self, input_shape):
|
| 264 |
+
# Used purely for shape validation.
|
| 265 |
+
if (
|
| 266 |
+
not isinstance(input_shape[0], (tuple, list))
|
| 267 |
+
or len(input_shape) != 2
|
| 268 |
+
):
|
| 269 |
+
raise ValueError(
|
| 270 |
+
f"A `Dot` layer should be called on a list of 2 inputs. "
|
| 271 |
+
f"Received: input_shape={input_shape}"
|
| 272 |
+
)
|
| 273 |
+
shape1 = input_shape[0]
|
| 274 |
+
shape2 = input_shape[1]
|
| 275 |
+
if shape1 is None or shape2 is None:
|
| 276 |
+
return
|
| 277 |
+
if isinstance(self.axes, int):
|
| 278 |
+
if self.axes < 0:
|
| 279 |
+
axes = [self.axes % len(shape1), self.axes % len(shape2)]
|
| 280 |
+
else:
|
| 281 |
+
axes = [self.axes] * 2
|
| 282 |
+
else:
|
| 283 |
+
axes = self.axes
|
| 284 |
+
if shape1[axes[0]] != shape2[axes[1]]:
|
| 285 |
+
raise ValueError(
|
| 286 |
+
f"Incompatible input shapes: "
|
| 287 |
+
f"axis values {shape1[axes[0]]} (at axis {axes[0]}) != "
|
| 288 |
+
f"{shape2[axes[1]]} (at axis {axes[1]}). "
|
| 289 |
+
f"Full input shapes: {shape1}, {shape2}"
|
| 290 |
+
)
|
| 291 |
+
self.built = True
|
| 292 |
+
|
| 293 |
+
def _merge_function(self, inputs):
|
| 294 |
+
if len(inputs) != 2:
|
| 295 |
+
raise ValueError(
|
| 296 |
+
f"A `Dot` layer should be called on exactly 2 inputs. "
|
| 297 |
+
f"Received: inputs={inputs}"
|
| 298 |
+
)
|
| 299 |
+
x1 = inputs[0]
|
| 300 |
+
x2 = inputs[1]
|
| 301 |
+
|
| 302 |
+
if isinstance(self.axes, int):
|
| 303 |
+
if self.axes < 0:
|
| 304 |
+
axes = [
|
| 305 |
+
self.axes % len(x1.shape),
|
| 306 |
+
self.axes % len(x2.shape),
|
| 307 |
+
]
|
| 308 |
+
else:
|
| 309 |
+
axes = [self.axes] * 2
|
| 310 |
+
else:
|
| 311 |
+
axes = []
|
| 312 |
+
for i in range(len(self.axes)):
|
| 313 |
+
if self.axes[i] < 0:
|
| 314 |
+
axes.append(self.axes[i] % len(inputs[i].shape))
|
| 315 |
+
else:
|
| 316 |
+
axes.append(self.axes[i])
|
| 317 |
+
|
| 318 |
+
if self.normalize:
|
| 319 |
+
x1 = normalize(x1, axis=axes[0])
|
| 320 |
+
x2 = normalize(x2, axis=axes[1])
|
| 321 |
+
output = batch_dot(x1, x2, axes)
|
| 322 |
+
return output
|
| 323 |
+
|
| 324 |
+
def compute_output_shape(self, input_shape):
|
| 325 |
+
if not isinstance(input_shape, (tuple, list)) or len(input_shape) != 2:
|
| 326 |
+
raise ValueError(
|
| 327 |
+
f"A `Dot` layer should be called on a list of 2 inputs. "
|
| 328 |
+
f"Received: input_shape={input_shape}"
|
| 329 |
+
)
|
| 330 |
+
shape1 = list(input_shape[0])
|
| 331 |
+
shape2 = list(input_shape[1])
|
| 332 |
+
if isinstance(self.axes, int):
|
| 333 |
+
if self.axes < 0:
|
| 334 |
+
axes = [self.axes % len(shape1), self.axes % len(shape2)]
|
| 335 |
+
else:
|
| 336 |
+
axes = [self.axes] * 2
|
| 337 |
+
else:
|
| 338 |
+
axes = self.axes
|
| 339 |
+
shape1.pop(axes[0])
|
| 340 |
+
shape2.pop(axes[1])
|
| 341 |
+
shape2.pop(0)
|
| 342 |
+
output_shape = shape1 + shape2
|
| 343 |
+
if len(output_shape) == 1:
|
| 344 |
+
output_shape += [1]
|
| 345 |
+
return tuple(output_shape)
|
| 346 |
+
|
| 347 |
+
def compute_mask(self, inputs, mask=None):
|
| 348 |
+
return None
|
| 349 |
+
|
| 350 |
+
def get_config(self):
|
| 351 |
+
config = {
|
| 352 |
+
"axes": self.axes,
|
| 353 |
+
"normalize": self.normalize,
|
| 354 |
+
}
|
| 355 |
+
base_config = super().get_config()
|
| 356 |
+
return dict(list(base_config.items()) + list(config.items()))
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
@keras_export("keras.layers.dot")
|
| 360 |
+
def dot(inputs, axes=-1, **kwargs):
|
| 361 |
+
"""Functional interface to the `Dot` layer.
|
| 362 |
+
|
| 363 |
+
Args:
|
| 364 |
+
inputs: A list of input tensors (at least 2).
|
| 365 |
+
axes: Integer or tuple of integers,
|
| 366 |
+
axis or axes along which to take the dot product.
|
| 367 |
+
normalize: Whether to L2-normalize samples along the
|
| 368 |
+
dot product axis before taking the dot product.
|
| 369 |
+
If set to `True`, then the output of the dot product
|
| 370 |
+
is the cosine proximity between the two samples.
|
| 371 |
+
**kwargs: Standard layer keyword arguments.
|
| 372 |
+
|
| 373 |
+
Returns:
|
| 374 |
+
A tensor, the dot product of the samples from the inputs.
|
| 375 |
+
"""
|
| 376 |
+
return Dot(axes=axes, **kwargs)(inputs)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/maximum.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import ops
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.layers.merging.base_merge import Merge
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@keras_export("keras.layers.Maximum")
|
| 7 |
+
class Maximum(Merge):
|
| 8 |
+
"""Computes element-wise maximum on a list of inputs.
|
| 9 |
+
|
| 10 |
+
It takes as input a list of tensors, all of the same shape,
|
| 11 |
+
and returns a single tensor (also of the same shape).
|
| 12 |
+
|
| 13 |
+
Examples:
|
| 14 |
+
|
| 15 |
+
>>> input_shape = (2, 3, 4)
|
| 16 |
+
>>> x1 = np.random.rand(*input_shape)
|
| 17 |
+
>>> x2 = np.random.rand(*input_shape)
|
| 18 |
+
>>> y = keras.layers.Maximum()([x1, x2])
|
| 19 |
+
|
| 20 |
+
Usage in a Keras model:
|
| 21 |
+
|
| 22 |
+
>>> input1 = keras.layers.Input(shape=(16,))
|
| 23 |
+
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
|
| 24 |
+
>>> input2 = keras.layers.Input(shape=(32,))
|
| 25 |
+
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
|
| 26 |
+
>>> # equivalent to `y = keras.layers.maximum([x1, x2])`
|
| 27 |
+
>>> y = keras.layers.Maximum()([x1, x2])
|
| 28 |
+
>>> out = keras.layers.Dense(4)(y)
|
| 29 |
+
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
|
| 30 |
+
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
def _merge_function(self, inputs):
|
| 34 |
+
return self._apply_merge_op_and_or_mask(ops.maximum, inputs)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@keras_export("keras.layers.maximum")
|
| 38 |
+
def maximum(inputs, **kwargs):
|
| 39 |
+
"""Functional interface to the `keras.layers.Maximum` layer.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
inputs: A list of input tensors , all of the same shape.
|
| 43 |
+
**kwargs: Standard layer keyword arguments.
|
| 44 |
+
|
| 45 |
+
Returns:
|
| 46 |
+
A tensor as the element-wise product of the inputs with the same
|
| 47 |
+
shape as the inputs.
|
| 48 |
+
|
| 49 |
+
Examples:
|
| 50 |
+
|
| 51 |
+
>>> input_shape = (2, 3, 4)
|
| 52 |
+
>>> x1 = np.random.rand(*input_shape)
|
| 53 |
+
>>> x2 = np.random.rand(*input_shape)
|
| 54 |
+
>>> y = keras.layers.maximum([x1, x2])
|
| 55 |
+
|
| 56 |
+
Usage in a Keras model:
|
| 57 |
+
|
| 58 |
+
>>> input1 = keras.layers.Input(shape=(16,))
|
| 59 |
+
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
|
| 60 |
+
>>> input2 = keras.layers.Input(shape=(32,))
|
| 61 |
+
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
|
| 62 |
+
>>> y = keras.layers.maximum([x1, x2])
|
| 63 |
+
>>> out = keras.layers.Dense(4)(y)
|
| 64 |
+
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
|
| 65 |
+
|
| 66 |
+
"""
|
| 67 |
+
return Maximum(**kwargs)(inputs)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/minimum.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import ops
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.layers.merging.base_merge import Merge
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@keras_export("keras.layers.Minimum")
|
| 7 |
+
class Minimum(Merge):
|
| 8 |
+
"""Computes elementwise minimum on a list of inputs.
|
| 9 |
+
|
| 10 |
+
It takes as input a list of tensors, all of the same shape,
|
| 11 |
+
and returns a single tensor (also of the same shape).
|
| 12 |
+
|
| 13 |
+
Examples:
|
| 14 |
+
|
| 15 |
+
>>> input_shape = (2, 3, 4)
|
| 16 |
+
>>> x1 = np.random.rand(*input_shape)
|
| 17 |
+
>>> x2 = np.random.rand(*input_shape)
|
| 18 |
+
>>> y = keras.layers.Minimum()([x1, x2])
|
| 19 |
+
|
| 20 |
+
Usage in a Keras model:
|
| 21 |
+
|
| 22 |
+
>>> input1 = keras.layers.Input(shape=(16,))
|
| 23 |
+
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
|
| 24 |
+
>>> input2 = keras.layers.Input(shape=(32,))
|
| 25 |
+
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
|
| 26 |
+
>>> # equivalent to `y = keras.layers.minimum([x1, x2])`
|
| 27 |
+
>>> y = keras.layers.Minimum()([x1, x2])
|
| 28 |
+
>>> out = keras.layers.Dense(4)(y)
|
| 29 |
+
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
|
| 30 |
+
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
def _merge_function(self, inputs):
|
| 34 |
+
return self._apply_merge_op_and_or_mask(ops.minimum, inputs)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@keras_export("keras.layers.minimum")
|
| 38 |
+
def minimum(inputs, **kwargs):
|
| 39 |
+
"""Functional interface to the `keras.layers.Minimum` layer.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
inputs: A list of input tensors , all of the same shape.
|
| 43 |
+
**kwargs: Standard layer keyword arguments.
|
| 44 |
+
|
| 45 |
+
Returns:
|
| 46 |
+
A tensor as the elementwise product of the inputs with the same
|
| 47 |
+
shape as the inputs.
|
| 48 |
+
|
| 49 |
+
Examples:
|
| 50 |
+
|
| 51 |
+
>>> input_shape = (2, 3, 4)
|
| 52 |
+
>>> x1 = np.random.rand(*input_shape)
|
| 53 |
+
>>> x2 = np.random.rand(*input_shape)
|
| 54 |
+
>>> y = keras.layers.minimum([x1, x2])
|
| 55 |
+
|
| 56 |
+
Usage in a Keras model:
|
| 57 |
+
|
| 58 |
+
>>> input1 = keras.layers.Input(shape=(16,))
|
| 59 |
+
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
|
| 60 |
+
>>> input2 = keras.layers.Input(shape=(32,))
|
| 61 |
+
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
|
| 62 |
+
>>> y = keras.layers.minimum([x1, x2])
|
| 63 |
+
>>> out = keras.layers.Dense(4)(y)
|
| 64 |
+
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
|
| 65 |
+
|
| 66 |
+
"""
|
| 67 |
+
return Minimum(**kwargs)(inputs)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/multiply.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src import ops
|
| 3 |
+
from keras.src.api_export import keras_export
|
| 4 |
+
from keras.src.layers.merging.base_merge import Merge
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@keras_export("keras.layers.Multiply")
|
| 8 |
+
class Multiply(Merge):
|
| 9 |
+
"""Performs elementwise multiplication.
|
| 10 |
+
|
| 11 |
+
It takes as input a list of tensors, all of the same shape,
|
| 12 |
+
and returns a single tensor (also of the same shape).
|
| 13 |
+
|
| 14 |
+
Examples:
|
| 15 |
+
|
| 16 |
+
>>> input_shape = (2, 3, 4)
|
| 17 |
+
>>> x1 = np.random.rand(*input_shape)
|
| 18 |
+
>>> x2 = np.random.rand(*input_shape)
|
| 19 |
+
>>> y = keras.layers.Multiply()([x1, x2])
|
| 20 |
+
|
| 21 |
+
Usage in a Keras model:
|
| 22 |
+
|
| 23 |
+
>>> input1 = keras.layers.Input(shape=(16,))
|
| 24 |
+
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
|
| 25 |
+
>>> input2 = keras.layers.Input(shape=(32,))
|
| 26 |
+
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
|
| 27 |
+
>>> # equivalent to `y = keras.layers.multiply([x1, x2])`
|
| 28 |
+
>>> y = keras.layers.Multiply()([x1, x2])
|
| 29 |
+
>>> out = keras.layers.Dense(4)(y)
|
| 30 |
+
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
|
| 31 |
+
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
def _merge_function(self, inputs):
|
| 35 |
+
masks = [backend.get_keras_mask(x) for x in inputs]
|
| 36 |
+
has_output_mask = all(mask is not None for mask in masks)
|
| 37 |
+
output = None
|
| 38 |
+
output_mask = None
|
| 39 |
+
|
| 40 |
+
for x, mask in zip(inputs, masks):
|
| 41 |
+
if mask is not None:
|
| 42 |
+
mask = ops.broadcast_to(ops.expand_dims(mask, -1), ops.shape(x))
|
| 43 |
+
# Replace 0s with 1s outside of mask.
|
| 44 |
+
x = ops.where(mask, x, ops.cast(1, x.dtype))
|
| 45 |
+
if has_output_mask:
|
| 46 |
+
output_mask = (
|
| 47 |
+
mask
|
| 48 |
+
if output_mask is None
|
| 49 |
+
else ops.logical_or(output_mask, mask)
|
| 50 |
+
)
|
| 51 |
+
output = x if output is None else ops.multiply(output, x)
|
| 52 |
+
|
| 53 |
+
if has_output_mask:
|
| 54 |
+
# Replace 1s with 0s outside of mask per standard masking rules.
|
| 55 |
+
output = ops.where(output_mask, output, ops.cast(0, output.dtype))
|
| 56 |
+
output_mask = ops.any(output_mask, axis=-1, keepdims=False)
|
| 57 |
+
backend.set_keras_mask(output, output_mask)
|
| 58 |
+
return output
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
@keras_export("keras.layers.multiply")
|
| 62 |
+
def multiply(inputs, **kwargs):
|
| 63 |
+
"""Functional interface to the `keras.layers.Multiply` layer.
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
inputs: A list of input tensors , all of the same shape.
|
| 67 |
+
**kwargs: Standard layer keyword arguments.
|
| 68 |
+
|
| 69 |
+
Returns:
|
| 70 |
+
A tensor as the elementwise product of the inputs with the same
|
| 71 |
+
shape as the inputs.
|
| 72 |
+
|
| 73 |
+
Examples:
|
| 74 |
+
|
| 75 |
+
>>> input_shape = (2, 3, 4)
|
| 76 |
+
>>> x1 = np.random.rand(*input_shape)
|
| 77 |
+
>>> x2 = np.random.rand(*input_shape)
|
| 78 |
+
>>> y = keras.layers.multiply([x1, x2])
|
| 79 |
+
|
| 80 |
+
Usage in a Keras model:
|
| 81 |
+
|
| 82 |
+
>>> input1 = keras.layers.Input(shape=(16,))
|
| 83 |
+
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
|
| 84 |
+
>>> input2 = keras.layers.Input(shape=(32,))
|
| 85 |
+
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
|
| 86 |
+
>>> y = keras.layers.multiply([x1, x2])
|
| 87 |
+
>>> out = keras.layers.Dense(4)(y)
|
| 88 |
+
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
|
| 89 |
+
|
| 90 |
+
"""
|
| 91 |
+
return Multiply(**kwargs)(inputs)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/subtract.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import ops
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.layers.merging.base_merge import Merge
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@keras_export("keras.layers.Subtract")
|
| 7 |
+
class Subtract(Merge):
|
| 8 |
+
"""Performs elementwise subtraction.
|
| 9 |
+
|
| 10 |
+
It takes as input a list of tensors of size 2 both of the
|
| 11 |
+
same shape, and returns a single tensor (inputs[0] - inputs[1])
|
| 12 |
+
of same shape.
|
| 13 |
+
|
| 14 |
+
Examples:
|
| 15 |
+
|
| 16 |
+
>>> input_shape = (2, 3, 4)
|
| 17 |
+
>>> x1 = np.random.rand(*input_shape)
|
| 18 |
+
>>> x2 = np.random.rand(*input_shape)
|
| 19 |
+
>>> y = keras.layers.Subtract()([x1, x2])
|
| 20 |
+
|
| 21 |
+
Usage in a Keras model:
|
| 22 |
+
|
| 23 |
+
>>> input1 = keras.layers.Input(shape=(16,))
|
| 24 |
+
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
|
| 25 |
+
>>> input2 = keras.layers.Input(shape=(32,))
|
| 26 |
+
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
|
| 27 |
+
>>> # equivalent to `subtracted = keras.layers.subtract([x1, x2])`
|
| 28 |
+
>>> subtracted = keras.layers.Subtract()([x1, x2])
|
| 29 |
+
>>> out = keras.layers.Dense(4)(subtracted)
|
| 30 |
+
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
|
| 31 |
+
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
def build(self, input_shape):
|
| 35 |
+
super().build(input_shape)
|
| 36 |
+
if len(input_shape) != 2:
|
| 37 |
+
raise ValueError(
|
| 38 |
+
"A `Subtract` layer should be called on exactly 2 inputs. "
|
| 39 |
+
f"Received: input_shape={input_shape}"
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
def _merge_function(self, inputs):
|
| 43 |
+
if len(inputs) != 2:
|
| 44 |
+
raise ValueError(
|
| 45 |
+
"A `Subtract` layer should be called on exactly 2 inputs. "
|
| 46 |
+
f"Received: inputs={inputs}"
|
| 47 |
+
)
|
| 48 |
+
return ops.subtract(inputs[0], inputs[1])
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
@keras_export("keras.layers.subtract")
|
| 52 |
+
def subtract(inputs, **kwargs):
|
| 53 |
+
"""Functional interface to the `keras.layers.Subtract` layer.
|
| 54 |
+
|
| 55 |
+
Args:
|
| 56 |
+
inputs: A list of input tensors of size 2, each tensor of
|
| 57 |
+
the same shape.
|
| 58 |
+
**kwargs: Standard layer keyword arguments.
|
| 59 |
+
|
| 60 |
+
Returns:
|
| 61 |
+
A tensor as the difference of the inputs. It has the same shape
|
| 62 |
+
as the inputs.
|
| 63 |
+
|
| 64 |
+
Examples:
|
| 65 |
+
|
| 66 |
+
>>> input_shape = (2, 3, 4)
|
| 67 |
+
>>> x1 = np.random.rand(*input_shape)
|
| 68 |
+
>>> x2 = np.random.rand(*input_shape)
|
| 69 |
+
>>> y = keras.layers.subtract([x1, x2])
|
| 70 |
+
|
| 71 |
+
Usage in a Keras model:
|
| 72 |
+
|
| 73 |
+
>>> input1 = keras.layers.Input(shape=(16,))
|
| 74 |
+
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
|
| 75 |
+
>>> input2 = keras.layers.Input(shape=(32,))
|
| 76 |
+
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
|
| 77 |
+
>>> subtracted = keras.layers.subtract([x1, x2])
|
| 78 |
+
>>> out = keras.layers.Dense(4)(subtracted)
|
| 79 |
+
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
|
| 80 |
+
|
| 81 |
+
"""
|
| 82 |
+
return Subtract(**kwargs)(inputs)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (206 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/batch_normalization.cpython-310.pyc
ADDED
|
Binary file (10.9 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/group_normalization.cpython-310.pyc
ADDED
|
Binary file (7.56 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/layer_normalization.cpython-310.pyc
ADDED
|
Binary file (8.28 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/spectral_normalization.cpython-310.pyc
ADDED
|
Binary file (4.45 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/unit_normalization.cpython-310.pyc
ADDED
|
Binary file (2.44 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/batch_normalization.py
ADDED
|
@@ -0,0 +1,352 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src import constraints
|
| 3 |
+
from keras.src import initializers
|
| 4 |
+
from keras.src import ops
|
| 5 |
+
from keras.src import regularizers
|
| 6 |
+
from keras.src.api_export import keras_export
|
| 7 |
+
from keras.src.layers.input_spec import InputSpec
|
| 8 |
+
from keras.src.layers.layer import Layer
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@keras_export("keras.layers.BatchNormalization")
|
| 12 |
+
class BatchNormalization(Layer):
|
| 13 |
+
"""Layer that normalizes its inputs.
|
| 14 |
+
|
| 15 |
+
Batch normalization applies a transformation that maintains the mean output
|
| 16 |
+
close to 0 and the output standard deviation close to 1.
|
| 17 |
+
|
| 18 |
+
Importantly, batch normalization works differently during training and
|
| 19 |
+
during inference.
|
| 20 |
+
|
| 21 |
+
**During training** (i.e. when using `fit()` or when calling the layer/model
|
| 22 |
+
with the argument `training=True`), the layer normalizes its output using
|
| 23 |
+
the mean and standard deviation of the current batch of inputs. That is to
|
| 24 |
+
say, for each channel being normalized, the layer returns
|
| 25 |
+
`gamma * (batch - mean(batch)) / sqrt(var(batch) + epsilon) + beta`, where:
|
| 26 |
+
|
| 27 |
+
- `epsilon` is small constant (configurable as part of the constructor
|
| 28 |
+
arguments)
|
| 29 |
+
- `gamma` is a learned scaling factor (initialized as 1), which
|
| 30 |
+
can be disabled by passing `scale=False` to the constructor.
|
| 31 |
+
- `beta` is a learned offset factor (initialized as 0), which
|
| 32 |
+
can be disabled by passing `center=False` to the constructor.
|
| 33 |
+
|
| 34 |
+
**During inference** (i.e. when using `evaluate()` or `predict()` or when
|
| 35 |
+
calling the layer/model with the argument `training=False` (which is the
|
| 36 |
+
default), the layer normalizes its output using a moving average of the
|
| 37 |
+
mean and standard deviation of the batches it has seen during training. That
|
| 38 |
+
is to say, it returns
|
| 39 |
+
`gamma * (batch - self.moving_mean) / sqrt(self.moving_var+epsilon) + beta`.
|
| 40 |
+
|
| 41 |
+
`self.moving_mean` and `self.moving_var` are non-trainable variables that
|
| 42 |
+
are updated each time the layer in called in training mode, as such:
|
| 43 |
+
|
| 44 |
+
- `moving_mean = moving_mean * momentum + mean(batch) * (1 - momentum)`
|
| 45 |
+
- `moving_var = moving_var * momentum + var(batch) * (1 - momentum)`
|
| 46 |
+
|
| 47 |
+
As such, the layer will only normalize its inputs during inference
|
| 48 |
+
*after having been trained on data that has similar statistics as the
|
| 49 |
+
inference data*.
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
axis: Integer, the axis that should be normalized
|
| 53 |
+
(typically the features axis). For instance, after a `Conv2D` layer
|
| 54 |
+
with `data_format="channels_first"`, use `axis=1`.
|
| 55 |
+
momentum: Momentum for the moving average.
|
| 56 |
+
epsilon: Small float added to variance to avoid dividing by zero.
|
| 57 |
+
center: If `True`, add offset of `beta` to normalized tensor.
|
| 58 |
+
If `False`, `beta` is ignored.
|
| 59 |
+
scale: If `True`, multiply by `gamma`. If `False`, `gamma` is not used.
|
| 60 |
+
When the next layer is linear this can be disabled
|
| 61 |
+
since the scaling will be done by the next layer.
|
| 62 |
+
beta_initializer: Initializer for the beta weight.
|
| 63 |
+
gamma_initializer: Initializer for the gamma weight.
|
| 64 |
+
moving_mean_initializer: Initializer for the moving mean.
|
| 65 |
+
moving_variance_initializer: Initializer for the moving variance.
|
| 66 |
+
beta_regularizer: Optional regularizer for the beta weight.
|
| 67 |
+
gamma_regularizer: Optional regularizer for the gamma weight.
|
| 68 |
+
beta_constraint: Optional constraint for the beta weight.
|
| 69 |
+
gamma_constraint: Optional constraint for the gamma weight.
|
| 70 |
+
synchronized: Only applicable with the TensorFlow backend.
|
| 71 |
+
If `True`, synchronizes the global batch statistics (mean and
|
| 72 |
+
variance) for the layer across all devices at each training step
|
| 73 |
+
in a distributed training strategy.
|
| 74 |
+
If `False`, each replica uses its own local batch statistics.
|
| 75 |
+
**kwargs: Base layer keyword arguments (e.g. `name` and `dtype`).
|
| 76 |
+
|
| 77 |
+
Call arguments:
|
| 78 |
+
inputs: Input tensor (of any rank).
|
| 79 |
+
training: Python boolean indicating whether the layer should behave in
|
| 80 |
+
training mode or in inference mode.
|
| 81 |
+
- `training=True`: The layer will normalize its inputs using
|
| 82 |
+
the mean and variance of the current batch of inputs.
|
| 83 |
+
- `training=False`: The layer will normalize its inputs using
|
| 84 |
+
the mean and variance of its moving statistics, learned during
|
| 85 |
+
training.
|
| 86 |
+
mask: Binary tensor of shape broadcastable to `inputs` tensor, with
|
| 87 |
+
`True` values indicating the positions for which mean and variance
|
| 88 |
+
should be computed. Masked elements of the current inputs are not
|
| 89 |
+
taken into account for mean and variance computation during
|
| 90 |
+
training. Any prior unmasked element values will be taken into
|
| 91 |
+
account until their momentum expires.
|
| 92 |
+
|
| 93 |
+
Reference:
|
| 94 |
+
|
| 95 |
+
- [Ioffe and Szegedy, 2015](https://arxiv.org/abs/1502.03167).
|
| 96 |
+
|
| 97 |
+
**About setting `layer.trainable = False` on a `BatchNormalization` layer:**
|
| 98 |
+
|
| 99 |
+
The meaning of setting `layer.trainable = False` is to freeze the layer,
|
| 100 |
+
i.e. its internal state will not change during training:
|
| 101 |
+
its trainable weights will not be updated
|
| 102 |
+
during `fit()` or `train_on_batch()`, and its state updates will not be run.
|
| 103 |
+
|
| 104 |
+
Usually, this does not necessarily mean that the layer is run in inference
|
| 105 |
+
mode (which is normally controlled by the `training` argument that can
|
| 106 |
+
be passed when calling a layer). "Frozen state" and "inference mode"
|
| 107 |
+
are two separate concepts.
|
| 108 |
+
|
| 109 |
+
However, in the case of the `BatchNormalization` layer, **setting
|
| 110 |
+
`trainable = False` on the layer means that the layer will be
|
| 111 |
+
subsequently run in inference mode** (meaning that it will use
|
| 112 |
+
the moving mean and the moving variance to normalize the current batch,
|
| 113 |
+
rather than using the mean and variance of the current batch).
|
| 114 |
+
|
| 115 |
+
Note that:
|
| 116 |
+
|
| 117 |
+
- Setting `trainable` on an model containing other layers will recursively
|
| 118 |
+
set the `trainable` value of all inner layers.
|
| 119 |
+
- If the value of the `trainable` attribute is changed after calling
|
| 120 |
+
`compile()` on a model, the new value doesn't take effect for this model
|
| 121 |
+
until `compile()` is called again.
|
| 122 |
+
"""
|
| 123 |
+
|
| 124 |
+
def __init__(
|
| 125 |
+
self,
|
| 126 |
+
axis=-1,
|
| 127 |
+
momentum=0.99,
|
| 128 |
+
epsilon=1e-3,
|
| 129 |
+
center=True,
|
| 130 |
+
scale=True,
|
| 131 |
+
beta_initializer="zeros",
|
| 132 |
+
gamma_initializer="ones",
|
| 133 |
+
moving_mean_initializer="zeros",
|
| 134 |
+
moving_variance_initializer="ones",
|
| 135 |
+
beta_regularizer=None,
|
| 136 |
+
gamma_regularizer=None,
|
| 137 |
+
beta_constraint=None,
|
| 138 |
+
gamma_constraint=None,
|
| 139 |
+
synchronized=False,
|
| 140 |
+
**kwargs,
|
| 141 |
+
):
|
| 142 |
+
super().__init__(**kwargs)
|
| 143 |
+
self.axis = int(axis)
|
| 144 |
+
|
| 145 |
+
if synchronized and backend.backend() != "tensorflow":
|
| 146 |
+
raise ValueError(
|
| 147 |
+
"Argument synchronized=True is only supported "
|
| 148 |
+
"with the TensorFlow backend."
|
| 149 |
+
)
|
| 150 |
+
self.synchronized = synchronized
|
| 151 |
+
|
| 152 |
+
self.momentum = float(momentum)
|
| 153 |
+
self.epsilon = float(epsilon)
|
| 154 |
+
self.center = center
|
| 155 |
+
self.scale = scale
|
| 156 |
+
self.beta_initializer = initializers.get(beta_initializer)
|
| 157 |
+
self.gamma_initializer = initializers.get(gamma_initializer)
|
| 158 |
+
self.moving_mean_initializer = initializers.get(moving_mean_initializer)
|
| 159 |
+
self.moving_variance_initializer = initializers.get(
|
| 160 |
+
moving_variance_initializer
|
| 161 |
+
)
|
| 162 |
+
self.beta_regularizer = regularizers.get(beta_regularizer)
|
| 163 |
+
self.gamma_regularizer = regularizers.get(gamma_regularizer)
|
| 164 |
+
self.beta_constraint = constraints.get(beta_constraint)
|
| 165 |
+
self.gamma_constraint = constraints.get(gamma_constraint)
|
| 166 |
+
self.supports_masking = True
|
| 167 |
+
|
| 168 |
+
self.gamma = None
|
| 169 |
+
self.beta = None
|
| 170 |
+
self.moving_mean = None
|
| 171 |
+
self.moving_variance = None
|
| 172 |
+
self._reduction_axes = None
|
| 173 |
+
|
| 174 |
+
def build(self, input_shape):
|
| 175 |
+
shape = (input_shape[self.axis],)
|
| 176 |
+
if self.scale:
|
| 177 |
+
self.gamma = self.add_weight(
|
| 178 |
+
shape=shape,
|
| 179 |
+
name="gamma",
|
| 180 |
+
initializer=self.gamma_initializer,
|
| 181 |
+
regularizer=self.gamma_regularizer,
|
| 182 |
+
constraint=self.gamma_constraint,
|
| 183 |
+
trainable=True,
|
| 184 |
+
autocast=False,
|
| 185 |
+
)
|
| 186 |
+
if self.center:
|
| 187 |
+
self.beta = self.add_weight(
|
| 188 |
+
shape=shape,
|
| 189 |
+
name="beta",
|
| 190 |
+
initializer=self.beta_initializer,
|
| 191 |
+
regularizer=self.beta_regularizer,
|
| 192 |
+
constraint=self.beta_constraint,
|
| 193 |
+
trainable=True,
|
| 194 |
+
autocast=False,
|
| 195 |
+
)
|
| 196 |
+
self.moving_mean = self.add_weight(
|
| 197 |
+
shape=shape,
|
| 198 |
+
name="moving_mean",
|
| 199 |
+
initializer=self.moving_mean_initializer,
|
| 200 |
+
trainable=False,
|
| 201 |
+
autocast=False,
|
| 202 |
+
)
|
| 203 |
+
self.moving_variance = self.add_weight(
|
| 204 |
+
shape=shape,
|
| 205 |
+
name="moving_variance",
|
| 206 |
+
initializer=self.moving_variance_initializer,
|
| 207 |
+
trainable=False,
|
| 208 |
+
autocast=False,
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
self.input_spec = InputSpec(
|
| 212 |
+
ndim=len(input_shape), axes={self.axis: input_shape[self.axis]}
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
reduction_axes = list(range(len(input_shape)))
|
| 216 |
+
del reduction_axes[self.axis]
|
| 217 |
+
self._reduction_axes = reduction_axes
|
| 218 |
+
self.built = True
|
| 219 |
+
|
| 220 |
+
def compute_output_shape(self, input_shape):
|
| 221 |
+
if isinstance(self.axis, int):
|
| 222 |
+
axes = [self.axis]
|
| 223 |
+
else:
|
| 224 |
+
axes = self.axis
|
| 225 |
+
|
| 226 |
+
for axis in axes:
|
| 227 |
+
if axis >= len(input_shape) or axis < -len(input_shape):
|
| 228 |
+
raise ValueError(
|
| 229 |
+
f"Axis {axis} is out of bounds for "
|
| 230 |
+
f"input shape {input_shape}. "
|
| 231 |
+
f"Received: axis={self.axis}"
|
| 232 |
+
)
|
| 233 |
+
return input_shape
|
| 234 |
+
|
| 235 |
+
def call(self, inputs, training=None, mask=None):
|
| 236 |
+
# Check if the mask has one less dimension than the inputs.
|
| 237 |
+
if mask is not None:
|
| 238 |
+
if len(mask.shape) != len(inputs.shape) - 1:
|
| 239 |
+
# Raise a value error
|
| 240 |
+
raise ValueError(
|
| 241 |
+
"The mask provided should be one dimension less "
|
| 242 |
+
"than the inputs. Received: "
|
| 243 |
+
f"mask.shape={mask.shape}, inputs.shape={inputs.shape}"
|
| 244 |
+
)
|
| 245 |
+
|
| 246 |
+
compute_dtype = backend.result_type(inputs.dtype, "float32")
|
| 247 |
+
# BN is prone to overflow with float16/bfloat16 inputs, so we upcast to
|
| 248 |
+
# float32 for the subsequent computations.
|
| 249 |
+
inputs = ops.cast(inputs, compute_dtype)
|
| 250 |
+
|
| 251 |
+
moving_mean = ops.cast(self.moving_mean, inputs.dtype)
|
| 252 |
+
moving_variance = ops.cast(self.moving_variance, inputs.dtype)
|
| 253 |
+
|
| 254 |
+
if training and self.trainable:
|
| 255 |
+
mean, variance = self._moments(inputs, mask)
|
| 256 |
+
|
| 257 |
+
self.moving_mean.assign(
|
| 258 |
+
moving_mean * self.momentum + mean * (1.0 - self.momentum)
|
| 259 |
+
)
|
| 260 |
+
self.moving_variance.assign(
|
| 261 |
+
moving_variance * self.momentum
|
| 262 |
+
+ variance * (1.0 - self.momentum)
|
| 263 |
+
)
|
| 264 |
+
else:
|
| 265 |
+
mean = moving_mean
|
| 266 |
+
variance = moving_variance
|
| 267 |
+
|
| 268 |
+
if self.scale:
|
| 269 |
+
gamma = ops.cast(self.gamma, inputs.dtype)
|
| 270 |
+
else:
|
| 271 |
+
gamma = None
|
| 272 |
+
|
| 273 |
+
if self.center:
|
| 274 |
+
beta = ops.cast(self.beta, inputs.dtype)
|
| 275 |
+
else:
|
| 276 |
+
beta = None
|
| 277 |
+
|
| 278 |
+
outputs = ops.batch_normalization(
|
| 279 |
+
x=inputs,
|
| 280 |
+
mean=mean,
|
| 281 |
+
variance=variance,
|
| 282 |
+
axis=self.axis,
|
| 283 |
+
offset=beta,
|
| 284 |
+
scale=gamma,
|
| 285 |
+
epsilon=self.epsilon,
|
| 286 |
+
)
|
| 287 |
+
return ops.cast(outputs, self.compute_dtype)
|
| 288 |
+
|
| 289 |
+
def get_config(self):
|
| 290 |
+
base_config = super().get_config()
|
| 291 |
+
config = {
|
| 292 |
+
"axis": self.axis,
|
| 293 |
+
"momentum": self.momentum,
|
| 294 |
+
"epsilon": self.epsilon,
|
| 295 |
+
"center": self.center,
|
| 296 |
+
"scale": self.scale,
|
| 297 |
+
"beta_initializer": initializers.serialize(self.beta_initializer),
|
| 298 |
+
"gamma_initializer": initializers.serialize(self.gamma_initializer),
|
| 299 |
+
"moving_mean_initializer": initializers.serialize(
|
| 300 |
+
self.moving_mean_initializer
|
| 301 |
+
),
|
| 302 |
+
"moving_variance_initializer": initializers.serialize(
|
| 303 |
+
self.moving_variance_initializer
|
| 304 |
+
),
|
| 305 |
+
"beta_regularizer": regularizers.serialize(self.beta_regularizer),
|
| 306 |
+
"gamma_regularizer": regularizers.serialize(self.gamma_regularizer),
|
| 307 |
+
"beta_constraint": constraints.serialize(self.beta_constraint),
|
| 308 |
+
"gamma_constraint": constraints.serialize(self.gamma_constraint),
|
| 309 |
+
"synchronized": self.synchronized,
|
| 310 |
+
}
|
| 311 |
+
return {**base_config, **config}
|
| 312 |
+
|
| 313 |
+
def _moments(self, inputs, mask):
|
| 314 |
+
if mask is None:
|
| 315 |
+
return ops.moments(
|
| 316 |
+
inputs,
|
| 317 |
+
axes=self._reduction_axes,
|
| 318 |
+
synchronized=self.synchronized,
|
| 319 |
+
)
|
| 320 |
+
|
| 321 |
+
mask_weights = ops.cast(
|
| 322 |
+
mask,
|
| 323 |
+
inputs.dtype,
|
| 324 |
+
)
|
| 325 |
+
mask_weights_broadcasted = ops.expand_dims(
|
| 326 |
+
mask_weights,
|
| 327 |
+
axis=-1,
|
| 328 |
+
)
|
| 329 |
+
weighted_inputs = mask_weights_broadcasted * inputs
|
| 330 |
+
|
| 331 |
+
weighted_input_sum = ops.sum(
|
| 332 |
+
weighted_inputs,
|
| 333 |
+
self._reduction_axes,
|
| 334 |
+
keepdims=True,
|
| 335 |
+
)
|
| 336 |
+
sum_of_weights = ops.sum(
|
| 337 |
+
mask_weights_broadcasted,
|
| 338 |
+
self._reduction_axes,
|
| 339 |
+
keepdims=True,
|
| 340 |
+
)
|
| 341 |
+
mean = weighted_input_sum / (sum_of_weights + backend.config.epsilon())
|
| 342 |
+
|
| 343 |
+
difference = weighted_inputs - mean
|
| 344 |
+
squared_difference = ops.square(difference)
|
| 345 |
+
weighted_distsq = ops.sum(
|
| 346 |
+
mask_weights_broadcasted * squared_difference,
|
| 347 |
+
self._reduction_axes,
|
| 348 |
+
keepdims=True,
|
| 349 |
+
)
|
| 350 |
+
variance = weighted_distsq / (sum_of_weights + backend.config.epsilon())
|
| 351 |
+
|
| 352 |
+
return ops.squeeze(mean), ops.squeeze(variance)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/group_normalization.py
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src import constraints
|
| 3 |
+
from keras.src import initializers
|
| 4 |
+
from keras.src import ops
|
| 5 |
+
from keras.src import regularizers
|
| 6 |
+
from keras.src.api_export import keras_export
|
| 7 |
+
from keras.src.layers.input_spec import InputSpec
|
| 8 |
+
from keras.src.layers.layer import Layer
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@keras_export("keras.layers.GroupNormalization")
|
| 12 |
+
class GroupNormalization(Layer):
|
| 13 |
+
"""Group normalization layer.
|
| 14 |
+
|
| 15 |
+
Group Normalization divides the channels into groups and computes
|
| 16 |
+
within each group the mean and variance for normalization.
|
| 17 |
+
Empirically, its accuracy is more stable than batch norm in a wide
|
| 18 |
+
range of small batch sizes, if learning rate is adjusted linearly
|
| 19 |
+
with batch sizes.
|
| 20 |
+
|
| 21 |
+
Relation to Layer Normalization:
|
| 22 |
+
If the number of groups is set to 1, then this operation becomes nearly
|
| 23 |
+
identical to Layer Normalization (see Layer Normalization docs for details).
|
| 24 |
+
|
| 25 |
+
Relation to Instance Normalization:
|
| 26 |
+
If the number of groups is set to the input dimension (number of groups is
|
| 27 |
+
equal to number of channels), then this operation becomes identical to
|
| 28 |
+
Instance Normalization. You can achieve this via `groups=-1`.
|
| 29 |
+
|
| 30 |
+
Args:
|
| 31 |
+
groups: Integer, the number of groups for Group Normalization. Can be in
|
| 32 |
+
the range `[1, N]` where N is the input dimension. The input
|
| 33 |
+
dimension must be divisible by the number of groups.
|
| 34 |
+
Defaults to 32.
|
| 35 |
+
axis: Integer or List/Tuple. The axis or axes to normalize across.
|
| 36 |
+
Typically, this is the features axis/axes. The left-out axes are
|
| 37 |
+
typically the batch axis/axes. -1 is the last dimension in the
|
| 38 |
+
input. Defaults to `-1`.
|
| 39 |
+
epsilon: Small float added to variance to avoid dividing by zero.
|
| 40 |
+
Defaults to 1e-3.
|
| 41 |
+
center: If `True`, add offset of `beta` to normalized tensor.
|
| 42 |
+
If `False`, `beta` is ignored. Defaults to `True`.
|
| 43 |
+
scale: If `True`, multiply by `gamma`. If `False`, `gamma` is not used.
|
| 44 |
+
When the next layer is linear (also e.g. `relu`), this can be
|
| 45 |
+
disabled since the scaling will be done by the next layer.
|
| 46 |
+
Defaults to `True`.
|
| 47 |
+
beta_initializer: Initializer for the beta weight. Defaults to zeros.
|
| 48 |
+
gamma_initializer: Initializer for the gamma weight. Defaults to ones.
|
| 49 |
+
beta_regularizer: Optional regularizer for the beta weight. None by
|
| 50 |
+
default.
|
| 51 |
+
gamma_regularizer: Optional regularizer for the gamma weight. None by
|
| 52 |
+
default.
|
| 53 |
+
beta_constraint: Optional constraint for the beta weight.
|
| 54 |
+
None by default.
|
| 55 |
+
gamma_constraint: Optional constraint for the gamma weight. None by
|
| 56 |
+
default. Input shape: Arbitrary. Use the keyword argument
|
| 57 |
+
`input_shape` (tuple of integers, does not include the samples
|
| 58 |
+
axis) when using this layer as the first layer in a model.
|
| 59 |
+
Output shape: Same shape as input.
|
| 60 |
+
**kwargs: Base layer keyword arguments (e.g. `name` and `dtype`).
|
| 61 |
+
|
| 62 |
+
Reference:
|
| 63 |
+
|
| 64 |
+
- [Yuxin Wu & Kaiming He, 2018](https://arxiv.org/abs/1803.08494)
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
def __init__(
|
| 68 |
+
self,
|
| 69 |
+
groups=32,
|
| 70 |
+
axis=-1,
|
| 71 |
+
epsilon=1e-3,
|
| 72 |
+
center=True,
|
| 73 |
+
scale=True,
|
| 74 |
+
beta_initializer="zeros",
|
| 75 |
+
gamma_initializer="ones",
|
| 76 |
+
beta_regularizer=None,
|
| 77 |
+
gamma_regularizer=None,
|
| 78 |
+
beta_constraint=None,
|
| 79 |
+
gamma_constraint=None,
|
| 80 |
+
**kwargs,
|
| 81 |
+
):
|
| 82 |
+
super().__init__(**kwargs)
|
| 83 |
+
self.supports_masking = True
|
| 84 |
+
self.groups = groups
|
| 85 |
+
self.axis = axis
|
| 86 |
+
self.epsilon = epsilon
|
| 87 |
+
self.center = center
|
| 88 |
+
self.scale = scale
|
| 89 |
+
self.beta_initializer = initializers.get(beta_initializer)
|
| 90 |
+
self.gamma_initializer = initializers.get(gamma_initializer)
|
| 91 |
+
self.beta_regularizer = regularizers.get(beta_regularizer)
|
| 92 |
+
self.gamma_regularizer = regularizers.get(gamma_regularizer)
|
| 93 |
+
self.beta_constraint = constraints.get(beta_constraint)
|
| 94 |
+
self.gamma_constraint = constraints.get(gamma_constraint)
|
| 95 |
+
|
| 96 |
+
def build(self, input_shape):
|
| 97 |
+
dim = input_shape[self.axis]
|
| 98 |
+
|
| 99 |
+
if dim is None:
|
| 100 |
+
raise ValueError(
|
| 101 |
+
f"Axis {self.axis} of input tensor should have a defined "
|
| 102 |
+
"dimension but the layer received an input with shape "
|
| 103 |
+
f"{input_shape}."
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
if self.groups == -1:
|
| 107 |
+
self.groups = dim
|
| 108 |
+
|
| 109 |
+
if dim < self.groups:
|
| 110 |
+
raise ValueError(
|
| 111 |
+
f"Number of groups ({self.groups}) cannot be more than the "
|
| 112 |
+
f"number of channels ({dim})."
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
if dim % self.groups != 0:
|
| 116 |
+
raise ValueError(
|
| 117 |
+
f"Number of groups ({self.groups}) must be a multiple "
|
| 118 |
+
f"of the number of channels ({dim})."
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
self.input_spec = InputSpec(
|
| 122 |
+
ndim=len(input_shape), axes={self.axis: dim}
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
if self.scale:
|
| 126 |
+
self.gamma = self.add_weight(
|
| 127 |
+
shape=(dim,),
|
| 128 |
+
name="gamma",
|
| 129 |
+
initializer=self.gamma_initializer,
|
| 130 |
+
regularizer=self.gamma_regularizer,
|
| 131 |
+
constraint=self.gamma_constraint,
|
| 132 |
+
)
|
| 133 |
+
else:
|
| 134 |
+
self.gamma = None
|
| 135 |
+
|
| 136 |
+
if self.center:
|
| 137 |
+
self.beta = self.add_weight(
|
| 138 |
+
shape=(dim,),
|
| 139 |
+
name="beta",
|
| 140 |
+
initializer=self.beta_initializer,
|
| 141 |
+
regularizer=self.beta_regularizer,
|
| 142 |
+
constraint=self.beta_constraint,
|
| 143 |
+
)
|
| 144 |
+
else:
|
| 145 |
+
self.beta = None
|
| 146 |
+
|
| 147 |
+
super().build(input_shape)
|
| 148 |
+
|
| 149 |
+
def call(self, inputs):
|
| 150 |
+
reshaped_inputs = self._reshape_into_groups(inputs)
|
| 151 |
+
normalized_inputs = self._apply_normalization(
|
| 152 |
+
reshaped_inputs, inputs.shape
|
| 153 |
+
)
|
| 154 |
+
return ops.reshape(normalized_inputs, ops.shape(inputs))
|
| 155 |
+
|
| 156 |
+
def _reshape_into_groups(self, inputs):
|
| 157 |
+
input_shape = ops.shape(inputs)
|
| 158 |
+
group_shape = list(inputs.shape)
|
| 159 |
+
group_shape[0] = -1
|
| 160 |
+
for i, e in enumerate(group_shape[1:]):
|
| 161 |
+
if e is None:
|
| 162 |
+
group_shape[i + 1] = input_shape[i + 1]
|
| 163 |
+
|
| 164 |
+
group_shape[self.axis] = input_shape[self.axis] // self.groups
|
| 165 |
+
group_shape.insert(self.axis, self.groups)
|
| 166 |
+
reshaped_inputs = ops.reshape(inputs, group_shape)
|
| 167 |
+
return reshaped_inputs
|
| 168 |
+
|
| 169 |
+
def _apply_normalization(self, reshaped_inputs, input_shape):
|
| 170 |
+
inputs_dtype = reshaped_inputs.dtype
|
| 171 |
+
compute_dtype = backend.result_type(inputs_dtype, "float32")
|
| 172 |
+
# GN is prone to overflow with float16/bfloat16 inputs, so we upcast to
|
| 173 |
+
# float32 for the subsequent computations.
|
| 174 |
+
reshaped_inputs = ops.cast(reshaped_inputs, compute_dtype)
|
| 175 |
+
|
| 176 |
+
group_reduction_axes = list(range(1, len(reshaped_inputs.shape)))
|
| 177 |
+
|
| 178 |
+
axis = -2 if self.axis == -1 else self.axis - 1
|
| 179 |
+
group_reduction_axes.pop(axis)
|
| 180 |
+
|
| 181 |
+
broadcast_shape = self._create_broadcast_shape(input_shape)
|
| 182 |
+
mean, variance = ops.moments(
|
| 183 |
+
reshaped_inputs, axes=group_reduction_axes, keepdims=True
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
# Compute the batch normalization.
|
| 187 |
+
inv = ops.rsqrt(variance + self.epsilon)
|
| 188 |
+
if self.scale:
|
| 189 |
+
gamma = ops.reshape(self.gamma, broadcast_shape)
|
| 190 |
+
gamma = ops.cast(gamma, reshaped_inputs.dtype)
|
| 191 |
+
inv = inv * gamma
|
| 192 |
+
|
| 193 |
+
res = -mean * inv
|
| 194 |
+
if self.center:
|
| 195 |
+
beta = ops.reshape(self.beta, broadcast_shape)
|
| 196 |
+
beta = ops.cast(beta, reshaped_inputs.dtype)
|
| 197 |
+
res = res + beta
|
| 198 |
+
|
| 199 |
+
normalized_inputs = reshaped_inputs * inv + res
|
| 200 |
+
normalized_inputs = ops.cast(normalized_inputs, inputs_dtype)
|
| 201 |
+
|
| 202 |
+
return normalized_inputs
|
| 203 |
+
|
| 204 |
+
def _create_broadcast_shape(self, input_shape):
|
| 205 |
+
broadcast_shape = [1] * len(input_shape)
|
| 206 |
+
broadcast_shape[self.axis] = input_shape[self.axis] // self.groups
|
| 207 |
+
broadcast_shape.insert(self.axis, self.groups)
|
| 208 |
+
return broadcast_shape
|
| 209 |
+
|
| 210 |
+
def compute_output_shape(self, input_shape):
|
| 211 |
+
if isinstance(self.axis, int):
|
| 212 |
+
axes = [self.axis]
|
| 213 |
+
else:
|
| 214 |
+
axes = self.axis
|
| 215 |
+
|
| 216 |
+
for axis in axes:
|
| 217 |
+
if axis >= len(input_shape) or axis < -len(input_shape):
|
| 218 |
+
raise ValueError(
|
| 219 |
+
f"Axis {axis} is out of bounds for "
|
| 220 |
+
f"input shape {input_shape}. "
|
| 221 |
+
f"Received: axis={self.axis}"
|
| 222 |
+
)
|
| 223 |
+
return input_shape
|
| 224 |
+
|
| 225 |
+
def get_config(self):
|
| 226 |
+
config = {
|
| 227 |
+
"groups": self.groups,
|
| 228 |
+
"axis": self.axis,
|
| 229 |
+
"epsilon": self.epsilon,
|
| 230 |
+
"center": self.center,
|
| 231 |
+
"scale": self.scale,
|
| 232 |
+
"beta_initializer": initializers.serialize(self.beta_initializer),
|
| 233 |
+
"gamma_initializer": initializers.serialize(self.gamma_initializer),
|
| 234 |
+
"beta_regularizer": regularizers.serialize(self.beta_regularizer),
|
| 235 |
+
"gamma_regularizer": regularizers.serialize(self.gamma_regularizer),
|
| 236 |
+
"beta_constraint": constraints.serialize(self.beta_constraint),
|
| 237 |
+
"gamma_constraint": constraints.serialize(self.gamma_constraint),
|
| 238 |
+
}
|
| 239 |
+
base_config = super().get_config()
|
| 240 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/layer_normalization.py
ADDED
|
@@ -0,0 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src import constraints
|
| 3 |
+
from keras.src import initializers
|
| 4 |
+
from keras.src import ops
|
| 5 |
+
from keras.src import regularizers
|
| 6 |
+
from keras.src.api_export import keras_export
|
| 7 |
+
from keras.src.layers.layer import Layer
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@keras_export("keras.layers.LayerNormalization")
|
| 11 |
+
class LayerNormalization(Layer):
|
| 12 |
+
"""Layer normalization layer (Ba et al., 2016).
|
| 13 |
+
|
| 14 |
+
Normalize the activations of the previous layer for each given example in a
|
| 15 |
+
batch independently, rather than across a batch like Batch Normalization.
|
| 16 |
+
i.e. applies a transformation that maintains the mean activation within each
|
| 17 |
+
example close to 0 and the activation standard deviation close to 1.
|
| 18 |
+
|
| 19 |
+
If `scale` or `center` are enabled, the layer will scale the normalized
|
| 20 |
+
outputs by broadcasting them with a trainable variable `gamma`, and center
|
| 21 |
+
the outputs by broadcasting with a trainable variable `beta`. `gamma` will
|
| 22 |
+
default to a ones tensor and `beta` will default to a zeros tensor, so that
|
| 23 |
+
centering and scaling are no-ops before training has begun.
|
| 24 |
+
|
| 25 |
+
So, with scaling and centering enabled the normalization equations
|
| 26 |
+
are as follows:
|
| 27 |
+
|
| 28 |
+
Let the intermediate activations for a mini-batch to be the `inputs`.
|
| 29 |
+
|
| 30 |
+
For each sample `x_i` in `inputs` with `k` features, we compute the mean and
|
| 31 |
+
variance of the sample:
|
| 32 |
+
|
| 33 |
+
```python
|
| 34 |
+
mean_i = sum(x_i[j] for j in range(k)) / k
|
| 35 |
+
var_i = sum((x_i[j] - mean_i) ** 2 for j in range(k)) / k
|
| 36 |
+
```
|
| 37 |
+
|
| 38 |
+
and then compute a normalized `x_i_normalized`, including a small factor
|
| 39 |
+
`epsilon` for numerical stability.
|
| 40 |
+
|
| 41 |
+
```python
|
| 42 |
+
x_i_normalized = (x_i - mean_i) / sqrt(var_i + epsilon)
|
| 43 |
+
```
|
| 44 |
+
|
| 45 |
+
And finally `x_i_normalized ` is linearly transformed by `gamma` and `beta`,
|
| 46 |
+
which are learned parameters:
|
| 47 |
+
|
| 48 |
+
```python
|
| 49 |
+
output_i = x_i_normalized * gamma + beta
|
| 50 |
+
```
|
| 51 |
+
|
| 52 |
+
`gamma` and `beta` will span the axes of `inputs` specified in `axis`, and
|
| 53 |
+
this part of the inputs' shape must be fully defined.
|
| 54 |
+
|
| 55 |
+
For example:
|
| 56 |
+
|
| 57 |
+
>>> layer = keras.layers.LayerNormalization(axis=[1, 2, 3])
|
| 58 |
+
>>> layer.build([5, 20, 30, 40])
|
| 59 |
+
>>> print(layer.beta.shape)
|
| 60 |
+
(20, 30, 40)
|
| 61 |
+
>>> print(layer.gamma.shape)
|
| 62 |
+
(20, 30, 40)
|
| 63 |
+
|
| 64 |
+
Note that other implementations of layer normalization may choose to define
|
| 65 |
+
`gamma` and `beta` over a separate set of axes from the axes being
|
| 66 |
+
normalized across. For example, Group Normalization
|
| 67 |
+
([Wu et al. 2018](https://arxiv.org/abs/1803.08494)) with group size of 1
|
| 68 |
+
corresponds to a Layer Normalization that normalizes across height, width,
|
| 69 |
+
and channel and has `gamma` and `beta` span only the channel dimension.
|
| 70 |
+
So, this Layer Normalization implementation will not match a Group
|
| 71 |
+
Normalization layer with group size set to 1.
|
| 72 |
+
|
| 73 |
+
Args:
|
| 74 |
+
axis: Integer or List/Tuple. The axis or axes to normalize across.
|
| 75 |
+
Typically, this is the features axis/axes. The left-out axes are
|
| 76 |
+
typically the batch axis/axes. `-1` is the last dimension in the
|
| 77 |
+
input. Defaults to `-1`.
|
| 78 |
+
epsilon: Small float added to variance to avoid dividing by zero.
|
| 79 |
+
Defaults to 1e-3.
|
| 80 |
+
center: If True, add offset of `beta` to normalized tensor. If False,
|
| 81 |
+
`beta` is ignored. Defaults to `True`.
|
| 82 |
+
scale: If True, multiply by `gamma`. If False, `gamma` is not used.
|
| 83 |
+
When the next layer is linear (also e.g. `nn.relu`), this can be
|
| 84 |
+
disabled since the scaling will be done by the next layer.
|
| 85 |
+
Defaults to `True`.
|
| 86 |
+
rms_scaling: If True, `center` and `scale` are ignored, and the
|
| 87 |
+
inputs are scaled by `gamma` and the inverse square root
|
| 88 |
+
of the square of all inputs. This is an approximate and faster
|
| 89 |
+
approach that avoids ever computing the mean of the input.
|
| 90 |
+
beta_initializer: Initializer for the beta weight. Defaults to zeros.
|
| 91 |
+
gamma_initializer: Initializer for the gamma weight. Defaults to ones.
|
| 92 |
+
beta_regularizer: Optional regularizer for the beta weight.
|
| 93 |
+
None by default.
|
| 94 |
+
gamma_regularizer: Optional regularizer for the gamma weight.
|
| 95 |
+
None by default.
|
| 96 |
+
beta_constraint: Optional constraint for the beta weight.
|
| 97 |
+
None by default.
|
| 98 |
+
gamma_constraint: Optional constraint for the gamma weight.
|
| 99 |
+
None by default.
|
| 100 |
+
**kwargs: Base layer keyword arguments (e.g. `name` and `dtype`).
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
Reference:
|
| 104 |
+
|
| 105 |
+
- [Lei Ba et al., 2016](https://arxiv.org/abs/1607.06450).
|
| 106 |
+
"""
|
| 107 |
+
|
| 108 |
+
def __init__(
|
| 109 |
+
self,
|
| 110 |
+
axis=-1,
|
| 111 |
+
epsilon=1e-3,
|
| 112 |
+
center=True,
|
| 113 |
+
scale=True,
|
| 114 |
+
rms_scaling=False,
|
| 115 |
+
beta_initializer="zeros",
|
| 116 |
+
gamma_initializer="ones",
|
| 117 |
+
beta_regularizer=None,
|
| 118 |
+
gamma_regularizer=None,
|
| 119 |
+
beta_constraint=None,
|
| 120 |
+
gamma_constraint=None,
|
| 121 |
+
**kwargs,
|
| 122 |
+
):
|
| 123 |
+
super().__init__(**kwargs)
|
| 124 |
+
if isinstance(axis, (list, tuple)):
|
| 125 |
+
self.axis = list(axis)
|
| 126 |
+
elif isinstance(axis, int):
|
| 127 |
+
self.axis = axis
|
| 128 |
+
else:
|
| 129 |
+
raise TypeError(
|
| 130 |
+
"Expected an int or a list/tuple of ints for the "
|
| 131 |
+
"argument 'axis', but received: %r" % axis
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
self.epsilon = epsilon
|
| 135 |
+
self.center = center
|
| 136 |
+
self.scale = scale
|
| 137 |
+
self.rms_scaling = rms_scaling
|
| 138 |
+
self.beta_initializer = initializers.get(beta_initializer)
|
| 139 |
+
self.gamma_initializer = initializers.get(gamma_initializer)
|
| 140 |
+
self.beta_regularizer = regularizers.get(beta_regularizer)
|
| 141 |
+
self.gamma_regularizer = regularizers.get(gamma_regularizer)
|
| 142 |
+
self.beta_constraint = constraints.get(beta_constraint)
|
| 143 |
+
self.gamma_constraint = constraints.get(gamma_constraint)
|
| 144 |
+
|
| 145 |
+
self.supports_masking = True
|
| 146 |
+
self.autocast = False
|
| 147 |
+
|
| 148 |
+
def build(self, input_shape):
|
| 149 |
+
if isinstance(self.axis, list):
|
| 150 |
+
shape = tuple([input_shape[dim] for dim in self.axis])
|
| 151 |
+
else:
|
| 152 |
+
shape = (input_shape[self.axis],)
|
| 153 |
+
self.axis = [self.axis]
|
| 154 |
+
if self.scale or self.rms_scaling:
|
| 155 |
+
self.gamma = self.add_weight(
|
| 156 |
+
name="gamma",
|
| 157 |
+
shape=shape,
|
| 158 |
+
initializer=self.gamma_initializer,
|
| 159 |
+
regularizer=self.gamma_regularizer,
|
| 160 |
+
constraint=self.gamma_constraint,
|
| 161 |
+
trainable=True,
|
| 162 |
+
autocast=False,
|
| 163 |
+
)
|
| 164 |
+
else:
|
| 165 |
+
self.gamma = None
|
| 166 |
+
|
| 167 |
+
if self.center and not self.rms_scaling:
|
| 168 |
+
self.beta = self.add_weight(
|
| 169 |
+
name="beta",
|
| 170 |
+
shape=shape,
|
| 171 |
+
initializer=self.beta_initializer,
|
| 172 |
+
regularizer=self.beta_regularizer,
|
| 173 |
+
constraint=self.beta_constraint,
|
| 174 |
+
trainable=True,
|
| 175 |
+
autocast=False,
|
| 176 |
+
)
|
| 177 |
+
else:
|
| 178 |
+
self.beta = None
|
| 179 |
+
|
| 180 |
+
self.built = True
|
| 181 |
+
|
| 182 |
+
def call(self, inputs):
|
| 183 |
+
# Compute the axes along which to reduce the mean / variance
|
| 184 |
+
input_shape = inputs.shape
|
| 185 |
+
ndims = len(input_shape)
|
| 186 |
+
|
| 187 |
+
# Broadcasting only necessary for norm when the axis is not just
|
| 188 |
+
# the last dimension
|
| 189 |
+
broadcast_shape = [1] * ndims
|
| 190 |
+
for dim in self.axis:
|
| 191 |
+
broadcast_shape[dim] = input_shape[dim]
|
| 192 |
+
|
| 193 |
+
def _broadcast(v):
|
| 194 |
+
if (
|
| 195 |
+
v is not None
|
| 196 |
+
and len(v.shape) != ndims
|
| 197 |
+
and self.axis != [ndims - 1]
|
| 198 |
+
):
|
| 199 |
+
return ops.reshape(v, broadcast_shape)
|
| 200 |
+
return v
|
| 201 |
+
|
| 202 |
+
compute_dtype = backend.result_type(inputs.dtype, "float32")
|
| 203 |
+
# LN is prone to overflow with float16/bfloat16 inputs, so we upcast to
|
| 204 |
+
# float32 for the subsequent computations.
|
| 205 |
+
inputs = ops.cast(inputs, compute_dtype)
|
| 206 |
+
|
| 207 |
+
if self.rms_scaling:
|
| 208 |
+
# Calculate outputs with only variance and gamma if rms scaling
|
| 209 |
+
# is enabled
|
| 210 |
+
# Calculate the variance along self.axis (layer activations).
|
| 211 |
+
variance = ops.var(inputs, axis=self.axis, keepdims=True)
|
| 212 |
+
inv = ops.rsqrt(variance + self.epsilon)
|
| 213 |
+
|
| 214 |
+
outputs = (
|
| 215 |
+
inputs * inv * ops.cast(_broadcast(self.gamma), inputs.dtype)
|
| 216 |
+
)
|
| 217 |
+
else:
|
| 218 |
+
# Calculate the mean & variance along self.axis (layer activations).
|
| 219 |
+
mean, variance = ops.moments(inputs, axes=self.axis, keepdims=True)
|
| 220 |
+
gamma, beta = _broadcast(self.gamma), _broadcast(self.beta)
|
| 221 |
+
|
| 222 |
+
inv = ops.rsqrt(variance + self.epsilon)
|
| 223 |
+
if gamma is not None:
|
| 224 |
+
gamma = ops.cast(gamma, inputs.dtype)
|
| 225 |
+
inv = inv * gamma
|
| 226 |
+
|
| 227 |
+
res = -mean * inv
|
| 228 |
+
if beta is not None:
|
| 229 |
+
beta = ops.cast(beta, inputs.dtype)
|
| 230 |
+
res = res + beta
|
| 231 |
+
|
| 232 |
+
outputs = inputs * inv + res
|
| 233 |
+
return ops.cast(outputs, self.compute_dtype)
|
| 234 |
+
|
| 235 |
+
def compute_output_shape(self, input_shape):
|
| 236 |
+
if isinstance(self.axis, int):
|
| 237 |
+
axes = [self.axis]
|
| 238 |
+
else:
|
| 239 |
+
axes = self.axis
|
| 240 |
+
|
| 241 |
+
for axis in axes:
|
| 242 |
+
if axis >= len(input_shape) or axis < -len(input_shape):
|
| 243 |
+
raise ValueError(
|
| 244 |
+
f"Axis {axis} is out of bounds for "
|
| 245 |
+
f"input shape {input_shape}. "
|
| 246 |
+
f"Received: axis={self.axis}"
|
| 247 |
+
)
|
| 248 |
+
return input_shape
|
| 249 |
+
|
| 250 |
+
def get_config(self):
|
| 251 |
+
config = {
|
| 252 |
+
"axis": self.axis,
|
| 253 |
+
"epsilon": self.epsilon,
|
| 254 |
+
"center": self.center,
|
| 255 |
+
"scale": self.scale,
|
| 256 |
+
"rms_scaling": self.rms_scaling,
|
| 257 |
+
"beta_initializer": initializers.serialize(self.beta_initializer),
|
| 258 |
+
"gamma_initializer": initializers.serialize(self.gamma_initializer),
|
| 259 |
+
"beta_regularizer": regularizers.serialize(self.beta_regularizer),
|
| 260 |
+
"gamma_regularizer": regularizers.serialize(self.gamma_regularizer),
|
| 261 |
+
"beta_constraint": constraints.serialize(self.beta_constraint),
|
| 262 |
+
"gamma_constraint": constraints.serialize(self.gamma_constraint),
|
| 263 |
+
}
|
| 264 |
+
base_config = super().get_config()
|
| 265 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/spectral_normalization.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import initializers
|
| 2 |
+
from keras.src import ops
|
| 3 |
+
from keras.src.api_export import keras_export
|
| 4 |
+
from keras.src.layers import Wrapper
|
| 5 |
+
from keras.src.layers.input_spec import InputSpec
|
| 6 |
+
from keras.src.utils.numerical_utils import normalize
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@keras_export("keras.layers.SpectralNormalization")
|
| 10 |
+
class SpectralNormalization(Wrapper):
|
| 11 |
+
"""Performs spectral normalization on the weights of a target layer.
|
| 12 |
+
|
| 13 |
+
This wrapper controls the Lipschitz constant of the weights of a layer by
|
| 14 |
+
constraining their spectral norm, which can stabilize the training of GANs.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
layer: A `keras.layers.Layer` instance that
|
| 18 |
+
has either a `kernel` (e.g. `Conv2D`, `Dense`...)
|
| 19 |
+
or an `embeddings` attribute (`Embedding` layer).
|
| 20 |
+
power_iterations: int, the number of iterations during normalization.
|
| 21 |
+
**kwargs: Base wrapper keyword arguments.
|
| 22 |
+
|
| 23 |
+
Examples:
|
| 24 |
+
|
| 25 |
+
Wrap `keras.layers.Conv2D`:
|
| 26 |
+
>>> x = np.random.rand(1, 10, 10, 1)
|
| 27 |
+
>>> conv2d = SpectralNormalization(keras.layers.Conv2D(2, 2))
|
| 28 |
+
>>> y = conv2d(x)
|
| 29 |
+
>>> y.shape
|
| 30 |
+
(1, 9, 9, 2)
|
| 31 |
+
|
| 32 |
+
Wrap `keras.layers.Dense`:
|
| 33 |
+
>>> x = np.random.rand(1, 10, 10, 1)
|
| 34 |
+
>>> dense = SpectralNormalization(keras.layers.Dense(10))
|
| 35 |
+
>>> y = dense(x)
|
| 36 |
+
>>> y.shape
|
| 37 |
+
(1, 10, 10, 10)
|
| 38 |
+
|
| 39 |
+
Reference:
|
| 40 |
+
|
| 41 |
+
- [Spectral Normalization for GAN](https://arxiv.org/abs/1802.05957).
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
def __init__(self, layer, power_iterations=1, **kwargs):
|
| 45 |
+
super().__init__(layer, **kwargs)
|
| 46 |
+
if power_iterations <= 0:
|
| 47 |
+
raise ValueError(
|
| 48 |
+
"`power_iterations` should be greater than zero. Received: "
|
| 49 |
+
f"`power_iterations={power_iterations}`"
|
| 50 |
+
)
|
| 51 |
+
self.power_iterations = power_iterations
|
| 52 |
+
|
| 53 |
+
def build(self, input_shape):
|
| 54 |
+
super().build(input_shape)
|
| 55 |
+
self.input_spec = InputSpec(shape=[None] + list(input_shape[1:]))
|
| 56 |
+
|
| 57 |
+
if hasattr(self.layer, "kernel"):
|
| 58 |
+
self.kernel = self.layer.kernel
|
| 59 |
+
elif hasattr(self.layer, "embeddings"):
|
| 60 |
+
self.kernel = self.layer.embeddings
|
| 61 |
+
else:
|
| 62 |
+
raise ValueError(
|
| 63 |
+
f"{type(self.layer).__name__} object has no attribute 'kernel' "
|
| 64 |
+
"nor 'embeddings'"
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
self.kernel_shape = self.kernel.shape
|
| 68 |
+
|
| 69 |
+
self.vector_u = self.add_weight(
|
| 70 |
+
shape=(1, self.kernel_shape[-1]),
|
| 71 |
+
initializer=initializers.TruncatedNormal(stddev=0.02),
|
| 72 |
+
trainable=False,
|
| 73 |
+
name="vector_u",
|
| 74 |
+
dtype=self.kernel.dtype,
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
def call(self, inputs, training=False):
|
| 78 |
+
if training:
|
| 79 |
+
new_vector_u, new_kernel = ops.cond(
|
| 80 |
+
ops.all(ops.equal(self.kernel.value, 0)),
|
| 81 |
+
lambda: (self.vector_u.value, self.kernel.value),
|
| 82 |
+
self.normalized_weights,
|
| 83 |
+
)
|
| 84 |
+
self.vector_u.assign(new_vector_u)
|
| 85 |
+
self.kernel.assign(new_kernel)
|
| 86 |
+
|
| 87 |
+
output = self.layer(inputs)
|
| 88 |
+
return ops.cast(output, inputs.dtype)
|
| 89 |
+
|
| 90 |
+
def compute_output_shape(self, input_shape):
|
| 91 |
+
return self.layer.compute_output_shape(input_shape)
|
| 92 |
+
|
| 93 |
+
def normalized_weights(self):
|
| 94 |
+
"""Generate spectral normalized weights.
|
| 95 |
+
|
| 96 |
+
This method returns the updated value for `self.kernel` with the
|
| 97 |
+
spectral normalized value, so that the layer is ready for `call()`.
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
weights = ops.reshape(self.kernel, [-1, self.kernel_shape[-1]])
|
| 101 |
+
vector_u = self.vector_u.value
|
| 102 |
+
|
| 103 |
+
for _ in range(self.power_iterations):
|
| 104 |
+
vector_v = normalize(
|
| 105 |
+
ops.matmul(vector_u, ops.transpose(weights)), axis=None
|
| 106 |
+
)
|
| 107 |
+
vector_u = normalize(ops.matmul(vector_v, weights), axis=None)
|
| 108 |
+
vector_u = ops.stop_gradient(vector_u)
|
| 109 |
+
vector_v = ops.stop_gradient(vector_v)
|
| 110 |
+
sigma = ops.matmul(
|
| 111 |
+
ops.matmul(vector_v, weights), ops.transpose(vector_u)
|
| 112 |
+
)
|
| 113 |
+
kernel = ops.reshape(ops.divide(self.kernel, sigma), self.kernel_shape)
|
| 114 |
+
return ops.cast(vector_u, self.vector_u.dtype), ops.cast(
|
| 115 |
+
kernel, self.kernel.dtype
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
def get_config(self):
|
| 119 |
+
config = {"power_iterations": self.power_iterations}
|
| 120 |
+
base_config = super().get_config()
|
| 121 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/unit_normalization.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import ops
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.layers.layer import Layer
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@keras_export("keras.layers.UnitNormalization")
|
| 7 |
+
class UnitNormalization(Layer):
|
| 8 |
+
"""Unit normalization layer.
|
| 9 |
+
|
| 10 |
+
Normalize a batch of inputs so that each input in the batch has a L2 norm
|
| 11 |
+
equal to 1 (across the axes specified in `axis`).
|
| 12 |
+
|
| 13 |
+
Example:
|
| 14 |
+
|
| 15 |
+
>>> data = np.arange(6).reshape(2, 3)
|
| 16 |
+
>>> normalized_data = keras.layers.UnitNormalization()(data)
|
| 17 |
+
>>> np.sum(normalized_data[0, :] ** 2)
|
| 18 |
+
1.0
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
axis: Integer or list/tuple. The axis or axes to normalize across.
|
| 22 |
+
Typically, this is the features axis or axes. The left-out axes are
|
| 23 |
+
typically the batch axis or axes. `-1` is the last dimension
|
| 24 |
+
in the input. Defaults to `-1`.
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
def __init__(self, axis=-1, **kwargs):
|
| 28 |
+
super().__init__(**kwargs)
|
| 29 |
+
if isinstance(axis, (list, tuple)):
|
| 30 |
+
self.axis = list(axis)
|
| 31 |
+
elif isinstance(axis, int):
|
| 32 |
+
self.axis = axis
|
| 33 |
+
else:
|
| 34 |
+
raise TypeError(
|
| 35 |
+
"Invalid value for `axis` argument: "
|
| 36 |
+
"expected an int or a list/tuple of ints. "
|
| 37 |
+
f"Received: axis={axis}"
|
| 38 |
+
)
|
| 39 |
+
self.supports_masking = True
|
| 40 |
+
self.built = True
|
| 41 |
+
|
| 42 |
+
def call(self, inputs):
|
| 43 |
+
return ops.normalize(inputs, axis=self.axis, order=2, epsilon=1e-12)
|
| 44 |
+
|
| 45 |
+
def compute_output_shape(self, input_shape):
|
| 46 |
+
# Ensure axis is always treated as a list
|
| 47 |
+
if isinstance(self.axis, int):
|
| 48 |
+
axes = [self.axis]
|
| 49 |
+
else:
|
| 50 |
+
axes = self.axis
|
| 51 |
+
|
| 52 |
+
for axis in axes:
|
| 53 |
+
if axis >= len(input_shape) or axis < -len(input_shape):
|
| 54 |
+
raise ValueError(
|
| 55 |
+
f"Axis {self.axis} is out of bounds for "
|
| 56 |
+
f"input shape {input_shape}."
|
| 57 |
+
)
|
| 58 |
+
return input_shape
|
| 59 |
+
|
| 60 |
+
def get_config(self):
|
| 61 |
+
config = super().get_config()
|
| 62 |
+
config.update({"axis": self.axis})
|
| 63 |
+
return config
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (200 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/average_pooling1d.cpython-310.pyc
ADDED
|
Binary file (3.66 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/average_pooling2d.cpython-310.pyc
ADDED
|
Binary file (4.46 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/average_pooling3d.cpython-310.pyc
ADDED
|
Binary file (3.55 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/base_global_pooling.cpython-310.pyc
ADDED
|
Binary file (1.7 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/base_pooling.cpython-310.pyc
ADDED
|
Binary file (2.18 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_average_pooling1d.cpython-310.pyc
ADDED
|
Binary file (3.43 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_average_pooling2d.cpython-310.pyc
ADDED
|
Binary file (2.91 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_average_pooling3d.cpython-310.pyc
ADDED
|
Binary file (3.06 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_max_pooling1d.cpython-310.pyc
ADDED
|
Binary file (2.82 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_max_pooling2d.cpython-310.pyc
ADDED
|
Binary file (2.88 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_max_pooling3d.cpython-310.pyc
ADDED
|
Binary file (3.03 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/max_pooling1d.cpython-310.pyc
ADDED
|
Binary file (3.66 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/max_pooling2d.cpython-310.pyc
ADDED
|
Binary file (4.44 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/max_pooling3d.cpython-310.pyc
ADDED
|
Binary file (3.55 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/average_pooling1d.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src.api_export import keras_export
|
| 2 |
+
from keras.src.layers.pooling.base_pooling import BasePooling
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
@keras_export(["keras.layers.AveragePooling1D", "keras.layers.AvgPool1D"])
|
| 6 |
+
class AveragePooling1D(BasePooling):
|
| 7 |
+
"""Average pooling for temporal data.
|
| 8 |
+
|
| 9 |
+
Downsamples the input representation by taking the average value over the
|
| 10 |
+
window defined by `pool_size`. The window is shifted by `strides`. The
|
| 11 |
+
resulting output when using "valid" padding option has a shape of:
|
| 12 |
+
`output_shape = (input_shape - pool_size + 1) / strides)`
|
| 13 |
+
|
| 14 |
+
The resulting output shape when using the "same" padding option is:
|
| 15 |
+
`output_shape = input_shape / strides`
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
pool_size: int, size of the max pooling window.
|
| 19 |
+
strides: int or None. Specifies how much the pooling window moves
|
| 20 |
+
for each pooling step. If None, it will default to `pool_size`.
|
| 21 |
+
padding: string, either `"valid"` or `"same"` (case-insensitive).
|
| 22 |
+
`"valid"` means no padding. `"same"` results in padding evenly to
|
| 23 |
+
the left/right or up/down of the input such that output has the same
|
| 24 |
+
height/width dimension as the input.
|
| 25 |
+
data_format: string, either `"channels_last"` or `"channels_first"`.
|
| 26 |
+
The ordering of the dimensions in the inputs. `"channels_last"`
|
| 27 |
+
corresponds to inputs with shape `(batch, steps, features)`
|
| 28 |
+
while `"channels_first"` corresponds to inputs with shape
|
| 29 |
+
`(batch, features, steps)`. It defaults to the `image_data_format`
|
| 30 |
+
value found in your Keras config file at `~/.keras/keras.json`.
|
| 31 |
+
If you never set it, then it will be `"channels_last"`.
|
| 32 |
+
|
| 33 |
+
Input shape:
|
| 34 |
+
|
| 35 |
+
- If `data_format="channels_last"`:
|
| 36 |
+
3D tensor with shape `(batch_size, steps, features)`.
|
| 37 |
+
- If `data_format="channels_first"`:
|
| 38 |
+
3D tensor with shape `(batch_size, features, steps)`.
|
| 39 |
+
|
| 40 |
+
Output shape:
|
| 41 |
+
|
| 42 |
+
- If `data_format="channels_last"`:
|
| 43 |
+
3D tensor with shape `(batch_size, downsampled_steps, features)`.
|
| 44 |
+
- If `data_format="channels_first"`:
|
| 45 |
+
3D tensor with shape `(batch_size, features, downsampled_steps)`.
|
| 46 |
+
|
| 47 |
+
Examples:
|
| 48 |
+
|
| 49 |
+
`strides=1` and `padding="valid"`:
|
| 50 |
+
|
| 51 |
+
>>> x = np.array([1., 2., 3., 4., 5.])
|
| 52 |
+
>>> x = np.reshape(x, [1, 5, 1])
|
| 53 |
+
>>> avg_pool_1d = keras.layers.AveragePooling1D(pool_size=2,
|
| 54 |
+
... strides=1, padding="valid")
|
| 55 |
+
>>> avg_pool_1d(x)
|
| 56 |
+
|
| 57 |
+
`strides=2` and `padding="valid"`:
|
| 58 |
+
|
| 59 |
+
>>> x = np.array([1., 2., 3., 4., 5.])
|
| 60 |
+
>>> x = np.reshape(x, [1, 5, 1])
|
| 61 |
+
>>> avg_pool_1d = keras.layers.AveragePooling1D(pool_size=2,
|
| 62 |
+
... strides=2, padding="valid")
|
| 63 |
+
>>> avg_pool_1d(x)
|
| 64 |
+
|
| 65 |
+
`strides=1` and `padding="same"`:
|
| 66 |
+
|
| 67 |
+
>>> x = np.array([1., 2., 3., 4., 5.])
|
| 68 |
+
>>> x = np.reshape(x, [1, 5, 1])
|
| 69 |
+
>>> avg_pool_1d = keras.layers.AveragePooling1D(pool_size=2,
|
| 70 |
+
... strides=1, padding="same")
|
| 71 |
+
>>> avg_pool_1d(x)
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
def __init__(
|
| 75 |
+
self,
|
| 76 |
+
pool_size,
|
| 77 |
+
strides=None,
|
| 78 |
+
padding="valid",
|
| 79 |
+
data_format=None,
|
| 80 |
+
name=None,
|
| 81 |
+
**kwargs,
|
| 82 |
+
):
|
| 83 |
+
super().__init__(
|
| 84 |
+
pool_size,
|
| 85 |
+
strides,
|
| 86 |
+
pool_dimensions=1,
|
| 87 |
+
pool_mode="average",
|
| 88 |
+
padding=padding,
|
| 89 |
+
data_format=data_format,
|
| 90 |
+
name=name,
|
| 91 |
+
**kwargs,
|
| 92 |
+
)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/average_pooling2d.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src.api_export import keras_export
|
| 2 |
+
from keras.src.layers.pooling.base_pooling import BasePooling
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
@keras_export(["keras.layers.AveragePooling2D", "keras.layers.AvgPool2D"])
|
| 6 |
+
class AveragePooling2D(BasePooling):
|
| 7 |
+
"""Average pooling operation for 2D spatial data.
|
| 8 |
+
|
| 9 |
+
Downsamples the input along its spatial dimensions (height and width)
|
| 10 |
+
by taking the average value over an input window
|
| 11 |
+
(of size defined by `pool_size`) for each channel of the input.
|
| 12 |
+
The window is shifted by `strides` along each dimension.
|
| 13 |
+
|
| 14 |
+
The resulting output when using the `"valid"` padding option has a spatial
|
| 15 |
+
shape (number of rows or columns) of:
|
| 16 |
+
`output_shape = math.floor((input_shape - pool_size) / strides) + 1`
|
| 17 |
+
(when `input_shape >= pool_size`)
|
| 18 |
+
|
| 19 |
+
The resulting output shape when using the `"same"` padding option is:
|
| 20 |
+
`output_shape = math.floor((input_shape - 1) / strides) + 1`
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
pool_size: int or tuple of 2 integers, factors by which to downscale
|
| 24 |
+
(dim1, dim2). If only one integer is specified, the same
|
| 25 |
+
window length will be used for all dimensions.
|
| 26 |
+
strides: int or tuple of 2 integers, or None. Strides values. If None,
|
| 27 |
+
it will default to `pool_size`. If only one int is specified, the
|
| 28 |
+
same stride size will be used for all dimensions.
|
| 29 |
+
padding: string, either `"valid"` or `"same"` (case-insensitive).
|
| 30 |
+
`"valid"` means no padding. `"same"` results in padding evenly to
|
| 31 |
+
the left/right or up/down of the input such that output has the same
|
| 32 |
+
height/width dimension as the input.
|
| 33 |
+
data_format: string, either `"channels_last"` or `"channels_first"`.
|
| 34 |
+
The ordering of the dimensions in the inputs. `"channels_last"`
|
| 35 |
+
corresponds to inputs with shape `(batch, height, width, channels)`
|
| 36 |
+
while `"channels_first"` corresponds to inputs with shape
|
| 37 |
+
`(batch, channels, height, width)`. It defaults to the
|
| 38 |
+
`image_data_format` value found in your Keras config file at
|
| 39 |
+
`~/.keras/keras.json`. If you never set it, then it will be
|
| 40 |
+
`"channels_last"`.
|
| 41 |
+
|
| 42 |
+
Input shape:
|
| 43 |
+
|
| 44 |
+
- If `data_format="channels_last"`:
|
| 45 |
+
4D tensor with shape `(batch_size, height, width, channels)`.
|
| 46 |
+
- If `data_format="channels_first"`:
|
| 47 |
+
4D tensor with shape `(batch_size, channels, height, width)`.
|
| 48 |
+
|
| 49 |
+
Output shape:
|
| 50 |
+
|
| 51 |
+
- If `data_format="channels_last"`:
|
| 52 |
+
4D tensor with shape
|
| 53 |
+
`(batch_size, pooled_height, pooled_width, channels)`.
|
| 54 |
+
- If `data_format="channels_first"`:
|
| 55 |
+
4D tensor with shape
|
| 56 |
+
`(batch_size, channels, pooled_height, pooled_width)`.
|
| 57 |
+
|
| 58 |
+
Examples:
|
| 59 |
+
|
| 60 |
+
`strides=(1, 1)` and `padding="valid"`:
|
| 61 |
+
|
| 62 |
+
>>> x = np.array([[1., 2., 3.],
|
| 63 |
+
... [4., 5., 6.],
|
| 64 |
+
... [7., 8., 9.]])
|
| 65 |
+
>>> x = np.reshape(x, [1, 3, 3, 1])
|
| 66 |
+
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
|
| 67 |
+
... strides=(1, 1), padding="valid")
|
| 68 |
+
>>> avg_pool_2d(x)
|
| 69 |
+
|
| 70 |
+
`strides=(2, 2)` and `padding="valid"`:
|
| 71 |
+
|
| 72 |
+
>>> x = np.array([[1., 2., 3., 4.],
|
| 73 |
+
... [5., 6., 7., 8.],
|
| 74 |
+
... [9., 10., 11., 12.]])
|
| 75 |
+
>>> x = np.reshape(x, [1, 3, 4, 1])
|
| 76 |
+
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
|
| 77 |
+
... strides=(2, 2), padding="valid")
|
| 78 |
+
>>> avg_pool_2d(x)
|
| 79 |
+
|
| 80 |
+
`stride=(1, 1)` and `padding="same"`:
|
| 81 |
+
|
| 82 |
+
>>> x = np.array([[1., 2., 3.],
|
| 83 |
+
... [4., 5., 6.],
|
| 84 |
+
... [7., 8., 9.]])
|
| 85 |
+
>>> x = np.reshape(x, [1, 3, 3, 1])
|
| 86 |
+
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
|
| 87 |
+
... strides=(1, 1), padding="same")
|
| 88 |
+
>>> avg_pool_2d(x)
|
| 89 |
+
"""
|
| 90 |
+
|
| 91 |
+
def __init__(
|
| 92 |
+
self,
|
| 93 |
+
pool_size,
|
| 94 |
+
strides=None,
|
| 95 |
+
padding="valid",
|
| 96 |
+
data_format=None,
|
| 97 |
+
name=None,
|
| 98 |
+
**kwargs,
|
| 99 |
+
):
|
| 100 |
+
super().__init__(
|
| 101 |
+
pool_size,
|
| 102 |
+
strides,
|
| 103 |
+
pool_dimensions=2,
|
| 104 |
+
pool_mode="average",
|
| 105 |
+
padding=padding,
|
| 106 |
+
data_format=data_format,
|
| 107 |
+
name=name,
|
| 108 |
+
**kwargs,
|
| 109 |
+
)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/average_pooling3d.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src.api_export import keras_export
|
| 2 |
+
from keras.src.layers.pooling.base_pooling import BasePooling
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
@keras_export(["keras.layers.AveragePooling3D", "keras.layers.AvgPool3D"])
|
| 6 |
+
class AveragePooling3D(BasePooling):
|
| 7 |
+
"""Average pooling operation for 3D data (spatial or spatio-temporal).
|
| 8 |
+
|
| 9 |
+
Downsamples the input along its spatial dimensions (depth, height, and
|
| 10 |
+
width) by taking the average value over an input window (of size defined by
|
| 11 |
+
`pool_size`) for each channel of the input. The window is shifted by
|
| 12 |
+
`strides` along each dimension.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
pool_size: int or tuple of 3 integers, factors by which to downscale
|
| 16 |
+
(dim1, dim2, dim3). If only one integer is specified, the same
|
| 17 |
+
window length will be used for all dimensions.
|
| 18 |
+
strides: int or tuple of 3 integers, or None. Strides values. If None,
|
| 19 |
+
it will default to `pool_size`. If only one int is specified, the
|
| 20 |
+
same stride size will be used for all dimensions.
|
| 21 |
+
padding: string, either `"valid"` or `"same"` (case-insensitive).
|
| 22 |
+
`"valid"` means no padding. `"same"` results in padding evenly to
|
| 23 |
+
the left/right or up/down of the input such that output has the same
|
| 24 |
+
height/width dimension as the input.
|
| 25 |
+
data_format: string, either `"channels_last"` or `"channels_first"`.
|
| 26 |
+
The ordering of the dimensions in the inputs. `"channels_last"`
|
| 27 |
+
corresponds to inputs with shape
|
| 28 |
+
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while
|
| 29 |
+
`"channels_first"` corresponds to inputs with shape
|
| 30 |
+
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
|
| 31 |
+
It defaults to the `image_data_format` value found in your Keras
|
| 32 |
+
config file at `~/.keras/keras.json`. If you never set it, then it
|
| 33 |
+
will be `"channels_last"`.
|
| 34 |
+
|
| 35 |
+
Input shape:
|
| 36 |
+
|
| 37 |
+
- If `data_format="channels_last"`:
|
| 38 |
+
5D tensor with shape:
|
| 39 |
+
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
|
| 40 |
+
- If `data_format="channels_first"`:
|
| 41 |
+
5D tensor with shape:
|
| 42 |
+
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
|
| 43 |
+
|
| 44 |
+
Output shape:
|
| 45 |
+
|
| 46 |
+
- If `data_format="channels_last"`:
|
| 47 |
+
5D tensor with shape:
|
| 48 |
+
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
|
| 49 |
+
- If `data_format="channels_first"`:
|
| 50 |
+
5D tensor with shape:
|
| 51 |
+
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
|
| 52 |
+
|
| 53 |
+
Example:
|
| 54 |
+
|
| 55 |
+
```python
|
| 56 |
+
depth = 30
|
| 57 |
+
height = 30
|
| 58 |
+
width = 30
|
| 59 |
+
channels = 3
|
| 60 |
+
|
| 61 |
+
inputs = keras.layers.Input(shape=(depth, height, width, channels))
|
| 62 |
+
layer = keras.layers.AveragePooling3D(pool_size=3)
|
| 63 |
+
outputs = layer(inputs) # Shape: (batch_size, 10, 10, 10, 3)
|
| 64 |
+
```
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
def __init__(
|
| 68 |
+
self,
|
| 69 |
+
pool_size,
|
| 70 |
+
strides=None,
|
| 71 |
+
padding="valid",
|
| 72 |
+
data_format=None,
|
| 73 |
+
name=None,
|
| 74 |
+
**kwargs,
|
| 75 |
+
):
|
| 76 |
+
super().__init__(
|
| 77 |
+
pool_size,
|
| 78 |
+
strides,
|
| 79 |
+
pool_dimensions=3,
|
| 80 |
+
pool_mode="average",
|
| 81 |
+
padding=padding,
|
| 82 |
+
data_format=data_format,
|
| 83 |
+
name=name,
|
| 84 |
+
**kwargs,
|
| 85 |
+
)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/base_global_pooling.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src.layers.input_spec import InputSpec
|
| 3 |
+
from keras.src.layers.layer import Layer
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class BaseGlobalPooling(Layer):
|
| 7 |
+
"""Base global pooling layer."""
|
| 8 |
+
|
| 9 |
+
def __init__(
|
| 10 |
+
self, pool_dimensions, data_format=None, keepdims=False, **kwargs
|
| 11 |
+
):
|
| 12 |
+
super().__init__(**kwargs)
|
| 13 |
+
|
| 14 |
+
self.data_format = backend.standardize_data_format(data_format)
|
| 15 |
+
self.keepdims = keepdims
|
| 16 |
+
self.input_spec = InputSpec(ndim=pool_dimensions + 2)
|
| 17 |
+
self.built = True
|
| 18 |
+
|
| 19 |
+
def call(self, inputs):
|
| 20 |
+
raise NotImplementedError
|
| 21 |
+
|
| 22 |
+
def compute_output_shape(self, input_shape):
|
| 23 |
+
num_spatial_dims = len(input_shape) - 2
|
| 24 |
+
if self.data_format == "channels_last":
|
| 25 |
+
if self.keepdims:
|
| 26 |
+
return (
|
| 27 |
+
(input_shape[0],)
|
| 28 |
+
+ (1,) * num_spatial_dims
|
| 29 |
+
+ (input_shape[-1],)
|
| 30 |
+
)
|
| 31 |
+
else:
|
| 32 |
+
return (input_shape[0],) + (input_shape[-1],)
|
| 33 |
+
else:
|
| 34 |
+
if self.keepdims:
|
| 35 |
+
return (input_shape[0], input_shape[1]) + (
|
| 36 |
+
1,
|
| 37 |
+
) * num_spatial_dims
|
| 38 |
+
else:
|
| 39 |
+
return (input_shape[0], input_shape[1])
|
| 40 |
+
|
| 41 |
+
def get_config(self):
|
| 42 |
+
config = super().get_config()
|
| 43 |
+
config.update(
|
| 44 |
+
{
|
| 45 |
+
"data_format": self.data_format,
|
| 46 |
+
"keepdims": self.keepdims,
|
| 47 |
+
}
|
| 48 |
+
)
|
| 49 |
+
return config
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/base_pooling.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src import ops
|
| 3 |
+
from keras.src.layers.input_spec import InputSpec
|
| 4 |
+
from keras.src.layers.layer import Layer
|
| 5 |
+
from keras.src.ops.operation_utils import compute_pooling_output_shape
|
| 6 |
+
from keras.src.utils import argument_validation
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class BasePooling(Layer):
|
| 10 |
+
"""Base pooling layer."""
|
| 11 |
+
|
| 12 |
+
def __init__(
|
| 13 |
+
self,
|
| 14 |
+
pool_size,
|
| 15 |
+
strides,
|
| 16 |
+
pool_dimensions,
|
| 17 |
+
pool_mode="max",
|
| 18 |
+
padding="valid",
|
| 19 |
+
data_format=None,
|
| 20 |
+
name=None,
|
| 21 |
+
**kwargs,
|
| 22 |
+
):
|
| 23 |
+
super().__init__(name=name, **kwargs)
|
| 24 |
+
|
| 25 |
+
self.pool_size = argument_validation.standardize_tuple(
|
| 26 |
+
pool_size, pool_dimensions, "pool_size"
|
| 27 |
+
)
|
| 28 |
+
strides = pool_size if strides is None else strides
|
| 29 |
+
self.strides = argument_validation.standardize_tuple(
|
| 30 |
+
strides, pool_dimensions, "strides", allow_zero=True
|
| 31 |
+
)
|
| 32 |
+
self.pool_mode = pool_mode
|
| 33 |
+
self.padding = padding
|
| 34 |
+
self.data_format = backend.standardize_data_format(data_format)
|
| 35 |
+
|
| 36 |
+
self.input_spec = InputSpec(ndim=pool_dimensions + 2)
|
| 37 |
+
self.built = True
|
| 38 |
+
|
| 39 |
+
def call(self, inputs):
|
| 40 |
+
if self.pool_mode == "max":
|
| 41 |
+
return ops.max_pool(
|
| 42 |
+
inputs,
|
| 43 |
+
pool_size=self.pool_size,
|
| 44 |
+
strides=self.strides,
|
| 45 |
+
padding=self.padding,
|
| 46 |
+
data_format=self.data_format,
|
| 47 |
+
)
|
| 48 |
+
elif self.pool_mode == "average":
|
| 49 |
+
return ops.average_pool(
|
| 50 |
+
inputs,
|
| 51 |
+
pool_size=self.pool_size,
|
| 52 |
+
strides=self.strides,
|
| 53 |
+
padding=self.padding,
|
| 54 |
+
data_format=self.data_format,
|
| 55 |
+
)
|
| 56 |
+
else:
|
| 57 |
+
raise ValueError(
|
| 58 |
+
"`pool_mode` must be either 'max' or 'average'. Received: "
|
| 59 |
+
f"{self.pool_mode}."
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
def compute_output_shape(self, input_shape):
|
| 63 |
+
return compute_pooling_output_shape(
|
| 64 |
+
input_shape,
|
| 65 |
+
self.pool_size,
|
| 66 |
+
self.strides,
|
| 67 |
+
self.padding,
|
| 68 |
+
self.data_format,
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
def get_config(self):
|
| 72 |
+
config = super().get_config()
|
| 73 |
+
config.update(
|
| 74 |
+
{
|
| 75 |
+
"pool_size": self.pool_size,
|
| 76 |
+
"padding": self.padding,
|
| 77 |
+
"strides": self.strides,
|
| 78 |
+
"data_format": self.data_format,
|
| 79 |
+
}
|
| 80 |
+
)
|
| 81 |
+
return config
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_average_pooling1d.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src import ops
|
| 3 |
+
from keras.src.api_export import keras_export
|
| 4 |
+
from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@keras_export(
|
| 8 |
+
[
|
| 9 |
+
"keras.layers.GlobalAveragePooling1D",
|
| 10 |
+
"keras.layers.GlobalAvgPool1D",
|
| 11 |
+
]
|
| 12 |
+
)
|
| 13 |
+
class GlobalAveragePooling1D(BaseGlobalPooling):
|
| 14 |
+
"""Global average pooling operation for temporal data.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
data_format: string, either `"channels_last"` or `"channels_first"`.
|
| 18 |
+
The ordering of the dimensions in the inputs. `"channels_last"`
|
| 19 |
+
corresponds to inputs with shape `(batch, steps, features)`
|
| 20 |
+
while `"channels_first"` corresponds to inputs with shape
|
| 21 |
+
`(batch, features, steps)`. It defaults to the `image_data_format`
|
| 22 |
+
value found in your Keras config file at `~/.keras/keras.json`.
|
| 23 |
+
If you never set it, then it will be `"channels_last"`.
|
| 24 |
+
keepdims: A boolean, whether to keep the temporal dimension or not.
|
| 25 |
+
If `keepdims` is `False` (default), the rank of the tensor is
|
| 26 |
+
reduced for spatial dimensions. If `keepdims` is `True`, the
|
| 27 |
+
temporal dimension are retained with length 1.
|
| 28 |
+
The behavior is the same as for `tf.reduce_mean` or `np.mean`.
|
| 29 |
+
|
| 30 |
+
Call arguments:
|
| 31 |
+
inputs: A 3D tensor.
|
| 32 |
+
mask: Binary tensor of shape `(batch_size, steps)` indicating whether
|
| 33 |
+
a given step should be masked (excluded from the average).
|
| 34 |
+
|
| 35 |
+
Input shape:
|
| 36 |
+
|
| 37 |
+
- If `data_format='channels_last'`:
|
| 38 |
+
3D tensor with shape:
|
| 39 |
+
`(batch_size, steps, features)`
|
| 40 |
+
- If `data_format='channels_first'`:
|
| 41 |
+
3D tensor with shape:
|
| 42 |
+
`(batch_size, features, steps)`
|
| 43 |
+
|
| 44 |
+
Output shape:
|
| 45 |
+
|
| 46 |
+
- If `keepdims=False`:
|
| 47 |
+
2D tensor with shape `(batch_size, features)`.
|
| 48 |
+
- If `keepdims=True`:
|
| 49 |
+
- If `data_format="channels_last"`:
|
| 50 |
+
3D tensor with shape `(batch_size, 1, features)`
|
| 51 |
+
- If `data_format="channels_first"`:
|
| 52 |
+
3D tensor with shape `(batch_size, features, 1)`
|
| 53 |
+
|
| 54 |
+
Example:
|
| 55 |
+
|
| 56 |
+
>>> x = np.random.rand(2, 3, 4)
|
| 57 |
+
>>> y = keras.layers.GlobalAveragePooling1D()(x)
|
| 58 |
+
>>> y.shape
|
| 59 |
+
(2, 4)
|
| 60 |
+
"""
|
| 61 |
+
|
| 62 |
+
def __init__(self, data_format=None, keepdims=False, **kwargs):
|
| 63 |
+
super().__init__(
|
| 64 |
+
pool_dimensions=1,
|
| 65 |
+
data_format=data_format,
|
| 66 |
+
keepdims=keepdims,
|
| 67 |
+
**kwargs,
|
| 68 |
+
)
|
| 69 |
+
self.supports_masking = True
|
| 70 |
+
|
| 71 |
+
def call(self, inputs, mask=None):
|
| 72 |
+
steps_axis = 1 if self.data_format == "channels_last" else 2
|
| 73 |
+
if mask is not None:
|
| 74 |
+
mask = backend.cast(mask, inputs[0].dtype)
|
| 75 |
+
mask = ops.expand_dims(
|
| 76 |
+
mask, 2 if self.data_format == "channels_last" else 1
|
| 77 |
+
)
|
| 78 |
+
inputs *= mask
|
| 79 |
+
return ops.sum(
|
| 80 |
+
inputs, axis=steps_axis, keepdims=self.keepdims
|
| 81 |
+
) / ops.sum(mask, axis=steps_axis, keepdims=self.keepdims)
|
| 82 |
+
else:
|
| 83 |
+
return ops.mean(inputs, axis=steps_axis, keepdims=self.keepdims)
|
| 84 |
+
|
| 85 |
+
def compute_mask(self, inputs, mask=None):
|
| 86 |
+
return None
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_average_pooling2d.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import ops
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@keras_export(
|
| 7 |
+
[
|
| 8 |
+
"keras.layers.GlobalAveragePooling2D",
|
| 9 |
+
"keras.layers.GlobalAvgPool2D",
|
| 10 |
+
]
|
| 11 |
+
)
|
| 12 |
+
class GlobalAveragePooling2D(BaseGlobalPooling):
|
| 13 |
+
"""Global average pooling operation for 2D data.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
data_format: string, either `"channels_last"` or `"channels_first"`.
|
| 17 |
+
The ordering of the dimensions in the inputs. `"channels_last"`
|
| 18 |
+
corresponds to inputs with shape `(batch, height, width, channels)`
|
| 19 |
+
while `"channels_first"` corresponds to inputs with shape
|
| 20 |
+
`(batch, features, height, weight)`. It defaults to the
|
| 21 |
+
`image_data_format` value found in your Keras config file at
|
| 22 |
+
`~/.keras/keras.json`. If you never set it, then it will be
|
| 23 |
+
`"channels_last"`.
|
| 24 |
+
keepdims: A boolean, whether to keep the temporal dimension or not.
|
| 25 |
+
If `keepdims` is `False` (default), the rank of the tensor is
|
| 26 |
+
reduced for spatial dimensions. If `keepdims` is `True`, the
|
| 27 |
+
spatial dimension are retained with length 1.
|
| 28 |
+
The behavior is the same as for `tf.reduce_mean` or `np.mean`.
|
| 29 |
+
|
| 30 |
+
Input shape:
|
| 31 |
+
|
| 32 |
+
- If `data_format='channels_last'`:
|
| 33 |
+
4D tensor with shape:
|
| 34 |
+
`(batch_size, height, width, channels)`
|
| 35 |
+
- If `data_format='channels_first'`:
|
| 36 |
+
4D tensor with shape:
|
| 37 |
+
`(batch_size, channels, height, width)`
|
| 38 |
+
|
| 39 |
+
Output shape:
|
| 40 |
+
|
| 41 |
+
- If `keepdims=False`:
|
| 42 |
+
2D tensor with shape `(batch_size, channels)`.
|
| 43 |
+
- If `keepdims=True`:
|
| 44 |
+
- If `data_format="channels_last"`:
|
| 45 |
+
4D tensor with shape `(batch_size, 1, 1, channels)`
|
| 46 |
+
- If `data_format="channels_first"`:
|
| 47 |
+
4D tensor with shape `(batch_size, channels, 1, 1)`
|
| 48 |
+
|
| 49 |
+
Example:
|
| 50 |
+
|
| 51 |
+
>>> x = np.random.rand(2, 4, 5, 3)
|
| 52 |
+
>>> y = keras.layers.GlobalAveragePooling2D()(x)
|
| 53 |
+
>>> y.shape
|
| 54 |
+
(2, 3)
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
def __init__(self, data_format=None, keepdims=False, **kwargs):
|
| 58 |
+
super().__init__(
|
| 59 |
+
pool_dimensions=2,
|
| 60 |
+
data_format=data_format,
|
| 61 |
+
keepdims=keepdims,
|
| 62 |
+
**kwargs,
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
def call(self, inputs):
|
| 66 |
+
if self.data_format == "channels_last":
|
| 67 |
+
return ops.mean(inputs, axis=[1, 2], keepdims=self.keepdims)
|
| 68 |
+
return ops.mean(inputs, axis=[2, 3], keepdims=self.keepdims)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_average_pooling3d.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import ops
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@keras_export(
|
| 7 |
+
[
|
| 8 |
+
"keras.layers.GlobalAveragePooling3D",
|
| 9 |
+
"keras.layers.GlobalAvgPool3D",
|
| 10 |
+
]
|
| 11 |
+
)
|
| 12 |
+
class GlobalAveragePooling3D(BaseGlobalPooling):
|
| 13 |
+
"""Global average pooling operation for 3D data.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
data_format: string, either `"channels_last"` or `"channels_first"`.
|
| 17 |
+
The ordering of the dimensions in the inputs. `"channels_last"`
|
| 18 |
+
corresponds to inputs with shape
|
| 19 |
+
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
|
| 20 |
+
while `"channels_first"` corresponds to inputs with shape
|
| 21 |
+
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
|
| 22 |
+
It defaults to the `image_data_format` value found in your Keras
|
| 23 |
+
config file at `~/.keras/keras.json`. If you never set it, then it
|
| 24 |
+
will be `"channels_last"`.
|
| 25 |
+
keepdims: A boolean, whether to keep the temporal dimension or not.
|
| 26 |
+
If `keepdims` is `False` (default), the rank of the tensor is
|
| 27 |
+
reduced for spatial dimensions. If `keepdims` is `True`, the
|
| 28 |
+
spatial dimension are retained with length 1.
|
| 29 |
+
The behavior is the same as for `tf.reduce_mean` or `np.mean`.
|
| 30 |
+
|
| 31 |
+
Input shape:
|
| 32 |
+
|
| 33 |
+
- If `data_format='channels_last'`:
|
| 34 |
+
5D tensor with shape:
|
| 35 |
+
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
|
| 36 |
+
- If `data_format='channels_first'`:
|
| 37 |
+
5D tensor with shape:
|
| 38 |
+
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
|
| 39 |
+
|
| 40 |
+
Output shape:
|
| 41 |
+
|
| 42 |
+
- If `keepdims=False`:
|
| 43 |
+
2D tensor with shape `(batch_size, channels)`.
|
| 44 |
+
- If `keepdims=True`:
|
| 45 |
+
- If `data_format="channels_last"`:
|
| 46 |
+
5D tensor with shape `(batch_size, 1, 1, 1, channels)`
|
| 47 |
+
- If `data_format="channels_first"`:
|
| 48 |
+
5D tensor with shape `(batch_size, channels, 1, 1, 1)`
|
| 49 |
+
|
| 50 |
+
Example:
|
| 51 |
+
|
| 52 |
+
>>> x = np.random.rand(2, 4, 5, 4, 3)
|
| 53 |
+
>>> y = keras.layers.GlobalAveragePooling3D()(x)
|
| 54 |
+
>>> y.shape
|
| 55 |
+
(2, 3)
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
def __init__(self, data_format=None, keepdims=False, **kwargs):
|
| 59 |
+
super().__init__(
|
| 60 |
+
pool_dimensions=3,
|
| 61 |
+
data_format=data_format,
|
| 62 |
+
keepdims=keepdims,
|
| 63 |
+
**kwargs,
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
def call(self, inputs):
|
| 67 |
+
if self.data_format == "channels_last":
|
| 68 |
+
return ops.mean(inputs, axis=[1, 2, 3], keepdims=self.keepdims)
|
| 69 |
+
return ops.mean(inputs, axis=[2, 3, 4], keepdims=self.keepdims)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_max_pooling1d.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import ops
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@keras_export(
|
| 7 |
+
[
|
| 8 |
+
"keras.layers.GlobalMaxPooling1D",
|
| 9 |
+
"keras.layers.GlobalMaxPool1D",
|
| 10 |
+
]
|
| 11 |
+
)
|
| 12 |
+
class GlobalMaxPooling1D(BaseGlobalPooling):
|
| 13 |
+
"""Global max pooling operation for temporal data.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
data_format: string, either `"channels_last"` or `"channels_first"`.
|
| 17 |
+
The ordering of the dimensions in the inputs. `"channels_last"`
|
| 18 |
+
corresponds to inputs with shape `(batch, steps, features)`
|
| 19 |
+
while `"channels_first"` corresponds to inputs with shape
|
| 20 |
+
`(batch, features, steps)`. It defaults to the `image_data_format`
|
| 21 |
+
value found in your Keras config file at `~/.keras/keras.json`.
|
| 22 |
+
If you never set it, then it will be `"channels_last"`.
|
| 23 |
+
keepdims: A boolean, whether to keep the temporal dimension or not.
|
| 24 |
+
If `keepdims` is `False` (default), the rank of the tensor is
|
| 25 |
+
reduced for spatial dimensions. If `keepdims` is `True`, the
|
| 26 |
+
temporal dimension are retained with length 1.
|
| 27 |
+
The behavior is the same as for `tf.reduce_mean` or `np.mean`.
|
| 28 |
+
|
| 29 |
+
Input shape:
|
| 30 |
+
|
| 31 |
+
- If `data_format='channels_last'`:
|
| 32 |
+
3D tensor with shape:
|
| 33 |
+
`(batch_size, steps, features)`
|
| 34 |
+
- If `data_format='channels_first'`:
|
| 35 |
+
3D tensor with shape:
|
| 36 |
+
`(batch_size, features, steps)`
|
| 37 |
+
|
| 38 |
+
Output shape:
|
| 39 |
+
|
| 40 |
+
- If `keepdims=False`:
|
| 41 |
+
2D tensor with shape `(batch_size, features)`.
|
| 42 |
+
- If `keepdims=True`:
|
| 43 |
+
- If `data_format="channels_last"`:
|
| 44 |
+
3D tensor with shape `(batch_size, 1, features)`
|
| 45 |
+
- If `data_format="channels_first"`:
|
| 46 |
+
3D tensor with shape `(batch_size, features, 1)`
|
| 47 |
+
|
| 48 |
+
Example:
|
| 49 |
+
|
| 50 |
+
>>> x = np.random.rand(2, 3, 4)
|
| 51 |
+
>>> y = keras.layers.GlobalMaxPooling1D()(x)
|
| 52 |
+
>>> y.shape
|
| 53 |
+
(2, 4)
|
| 54 |
+
"""
|
| 55 |
+
|
| 56 |
+
def __init__(self, data_format=None, keepdims=False, **kwargs):
|
| 57 |
+
super().__init__(
|
| 58 |
+
pool_dimensions=1,
|
| 59 |
+
data_format=data_format,
|
| 60 |
+
keepdims=keepdims,
|
| 61 |
+
**kwargs,
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
def call(self, inputs):
|
| 65 |
+
steps_axis = 1 if self.data_format == "channels_last" else 2
|
| 66 |
+
return ops.max(inputs, axis=steps_axis, keepdims=self.keepdims)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_max_pooling2d.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import ops
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@keras_export(
|
| 7 |
+
[
|
| 8 |
+
"keras.layers.GlobalMaxPooling2D",
|
| 9 |
+
"keras.layers.GlobalMaxPool2D",
|
| 10 |
+
]
|
| 11 |
+
)
|
| 12 |
+
class GlobalMaxPooling2D(BaseGlobalPooling):
|
| 13 |
+
"""Global max pooling operation for 2D data.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
data_format: string, either `"channels_last"` or `"channels_first"`.
|
| 17 |
+
The ordering of the dimensions in the inputs. `"channels_last"`
|
| 18 |
+
corresponds to inputs with shape `(batch, height, width, channels)`
|
| 19 |
+
while `"channels_first"` corresponds to inputs with shape
|
| 20 |
+
`(batch, features, height, weight)`. It defaults to the
|
| 21 |
+
`image_data_format` value found in your Keras config file at
|
| 22 |
+
`~/.keras/keras.json`. If you never set it, then it will be
|
| 23 |
+
`"channels_last"`.
|
| 24 |
+
keepdims: A boolean, whether to keep the temporal dimension or not.
|
| 25 |
+
If `keepdims` is `False` (default), the rank of the tensor is
|
| 26 |
+
reduced for spatial dimensions. If `keepdims` is `True`, the
|
| 27 |
+
spatial dimension are retained with length 1.
|
| 28 |
+
The behavior is the same as for `tf.reduce_mean` or `np.mean`.
|
| 29 |
+
|
| 30 |
+
Input shape:
|
| 31 |
+
|
| 32 |
+
- If `data_format='channels_last'`:
|
| 33 |
+
4D tensor with shape:
|
| 34 |
+
`(batch_size, height, width, channels)`
|
| 35 |
+
- If `data_format='channels_first'`:
|
| 36 |
+
4D tensor with shape:
|
| 37 |
+
`(batch_size, channels, height, width)`
|
| 38 |
+
|
| 39 |
+
Output shape:
|
| 40 |
+
|
| 41 |
+
- If `keepdims=False`:
|
| 42 |
+
2D tensor with shape `(batch_size, channels)`.
|
| 43 |
+
- If `keepdims=True`:
|
| 44 |
+
- If `data_format="channels_last"`:
|
| 45 |
+
4D tensor with shape `(batch_size, 1, 1, channels)`
|
| 46 |
+
- If `data_format="channels_first"`:
|
| 47 |
+
4D tensor with shape `(batch_size, channels, 1, 1)`
|
| 48 |
+
|
| 49 |
+
Example:
|
| 50 |
+
|
| 51 |
+
>>> x = np.random.rand(2, 4, 5, 3)
|
| 52 |
+
>>> y = keras.layers.GlobalMaxPooling2D()(x)
|
| 53 |
+
>>> y.shape
|
| 54 |
+
(2, 3)
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
def __init__(self, data_format=None, keepdims=False, **kwargs):
|
| 58 |
+
super().__init__(
|
| 59 |
+
pool_dimensions=2,
|
| 60 |
+
data_format=data_format,
|
| 61 |
+
keepdims=keepdims,
|
| 62 |
+
**kwargs,
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
def call(self, inputs):
|
| 66 |
+
if self.data_format == "channels_last":
|
| 67 |
+
return ops.max(inputs, axis=[1, 2], keepdims=self.keepdims)
|
| 68 |
+
return ops.max(inputs, axis=[2, 3], keepdims=self.keepdims)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_max_pooling3d.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import ops
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@keras_export(
|
| 7 |
+
[
|
| 8 |
+
"keras.layers.GlobalMaxPooling3D",
|
| 9 |
+
"keras.layers.GlobalMaxPool3D",
|
| 10 |
+
]
|
| 11 |
+
)
|
| 12 |
+
class GlobalMaxPooling3D(BaseGlobalPooling):
|
| 13 |
+
"""Global max pooling operation for 3D data.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
data_format: string, either `"channels_last"` or `"channels_first"`.
|
| 17 |
+
The ordering of the dimensions in the inputs. `"channels_last"`
|
| 18 |
+
corresponds to inputs with shape
|
| 19 |
+
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
|
| 20 |
+
while `"channels_first"` corresponds to inputs with shape
|
| 21 |
+
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
|
| 22 |
+
It defaults to the `image_data_format` value found in your Keras
|
| 23 |
+
config file at `~/.keras/keras.json`. If you never set it, then it
|
| 24 |
+
will be `"channels_last"`.
|
| 25 |
+
keepdims: A boolean, whether to keep the temporal dimension or not.
|
| 26 |
+
If `keepdims` is `False` (default), the rank of the tensor is
|
| 27 |
+
reduced for spatial dimensions. If `keepdims` is `True`, the
|
| 28 |
+
spatial dimension are retained with length 1.
|
| 29 |
+
The behavior is the same as for `tf.reduce_mean` or `np.mean`.
|
| 30 |
+
|
| 31 |
+
Input shape:
|
| 32 |
+
|
| 33 |
+
- If `data_format='channels_last'`:
|
| 34 |
+
5D tensor with shape:
|
| 35 |
+
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
|
| 36 |
+
- If `data_format='channels_first'`:
|
| 37 |
+
5D tensor with shape:
|
| 38 |
+
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
|
| 39 |
+
|
| 40 |
+
Output shape:
|
| 41 |
+
|
| 42 |
+
- If `keepdims=False`:
|
| 43 |
+
2D tensor with shape `(batch_size, channels)`.
|
| 44 |
+
- If `keepdims=True`:
|
| 45 |
+
- If `data_format="channels_last"`:
|
| 46 |
+
5D tensor with shape `(batch_size, 1, 1, 1, channels)`
|
| 47 |
+
- If `data_format="channels_first"`:
|
| 48 |
+
5D tensor with shape `(batch_size, channels, 1, 1, 1)`
|
| 49 |
+
|
| 50 |
+
Example:
|
| 51 |
+
|
| 52 |
+
>>> x = np.random.rand(2, 4, 5, 4, 3)
|
| 53 |
+
>>> y = keras.layers.GlobalMaxPooling3D()(x)
|
| 54 |
+
>>> y.shape
|
| 55 |
+
(2, 3)
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
def __init__(self, data_format=None, keepdims=False, **kwargs):
|
| 59 |
+
super().__init__(
|
| 60 |
+
pool_dimensions=3,
|
| 61 |
+
data_format=data_format,
|
| 62 |
+
keepdims=keepdims,
|
| 63 |
+
**kwargs,
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
def call(self, inputs):
|
| 67 |
+
if self.data_format == "channels_last":
|
| 68 |
+
return ops.max(inputs, axis=[1, 2, 3], keepdims=self.keepdims)
|
| 69 |
+
return ops.max(inputs, axis=[2, 3, 4], keepdims=self.keepdims)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/max_pooling1d.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src.api_export import keras_export
|
| 2 |
+
from keras.src.layers.pooling.base_pooling import BasePooling
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
@keras_export(["keras.layers.MaxPooling1D", "keras.layers.MaxPool1D"])
|
| 6 |
+
class MaxPooling1D(BasePooling):
|
| 7 |
+
"""Max pooling operation for 1D temporal data.
|
| 8 |
+
|
| 9 |
+
Downsamples the input representation by taking the maximum value over a
|
| 10 |
+
spatial window of size `pool_size`. The window is shifted by `strides`.
|
| 11 |
+
|
| 12 |
+
The resulting output when using the `"valid"` padding option has a shape of:
|
| 13 |
+
`output_shape = (input_shape - pool_size + 1) / strides)`.
|
| 14 |
+
|
| 15 |
+
The resulting output shape when using the `"same"` padding option is:
|
| 16 |
+
`output_shape = input_shape / strides`
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
pool_size: int, size of the max pooling window.
|
| 20 |
+
strides: int or None. Specifies how much the pooling window moves
|
| 21 |
+
for each pooling step. If None, it will default to `pool_size`.
|
| 22 |
+
padding: string, either `"valid"` or `"same"` (case-insensitive).
|
| 23 |
+
`"valid"` means no padding. `"same"` results in padding evenly to
|
| 24 |
+
the left/right or up/down of the input such that output has the same
|
| 25 |
+
height/width dimension as the input.
|
| 26 |
+
data_format: string, either `"channels_last"` or `"channels_first"`.
|
| 27 |
+
The ordering of the dimensions in the inputs. `"channels_last"`
|
| 28 |
+
corresponds to inputs with shape `(batch, steps, features)`
|
| 29 |
+
while `"channels_first"` corresponds to inputs with shape
|
| 30 |
+
`(batch, features, steps)`. It defaults to the `image_data_format`
|
| 31 |
+
value found in your Keras config file at `~/.keras/keras.json`.
|
| 32 |
+
If you never set it, then it will be `"channels_last"`.
|
| 33 |
+
|
| 34 |
+
Input shape:
|
| 35 |
+
|
| 36 |
+
- If `data_format="channels_last"`:
|
| 37 |
+
3D tensor with shape `(batch_size, steps, features)`.
|
| 38 |
+
- If `data_format="channels_first"`:
|
| 39 |
+
3D tensor with shape `(batch_size, features, steps)`.
|
| 40 |
+
|
| 41 |
+
Output shape:
|
| 42 |
+
|
| 43 |
+
- If `data_format="channels_last"`:
|
| 44 |
+
3D tensor with shape `(batch_size, downsampled_steps, features)`.
|
| 45 |
+
- If `data_format="channels_first"`:
|
| 46 |
+
3D tensor with shape `(batch_size, features, downsampled_steps)`.
|
| 47 |
+
|
| 48 |
+
Examples:
|
| 49 |
+
|
| 50 |
+
`strides=1` and `padding="valid"`:
|
| 51 |
+
|
| 52 |
+
>>> x = np.array([1., 2., 3., 4., 5.])
|
| 53 |
+
>>> x = np.reshape(x, [1, 5, 1])
|
| 54 |
+
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
|
| 55 |
+
... strides=1, padding="valid")
|
| 56 |
+
>>> max_pool_1d(x)
|
| 57 |
+
|
| 58 |
+
`strides=2` and `padding="valid"`:
|
| 59 |
+
|
| 60 |
+
>>> x = np.array([1., 2., 3., 4., 5.])
|
| 61 |
+
>>> x = np.reshape(x, [1, 5, 1])
|
| 62 |
+
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
|
| 63 |
+
... strides=2, padding="valid")
|
| 64 |
+
>>> max_pool_1d(x)
|
| 65 |
+
|
| 66 |
+
`strides=1` and `padding="same"`:
|
| 67 |
+
|
| 68 |
+
>>> x = np.array([1., 2., 3., 4., 5.])
|
| 69 |
+
>>> x = np.reshape(x, [1, 5, 1])
|
| 70 |
+
>>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2,
|
| 71 |
+
... strides=1, padding="same")
|
| 72 |
+
>>> max_pool_1d(x)
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
def __init__(
|
| 76 |
+
self,
|
| 77 |
+
pool_size=2,
|
| 78 |
+
strides=None,
|
| 79 |
+
padding="valid",
|
| 80 |
+
data_format=None,
|
| 81 |
+
name=None,
|
| 82 |
+
**kwargs,
|
| 83 |
+
):
|
| 84 |
+
super().__init__(
|
| 85 |
+
pool_size,
|
| 86 |
+
strides,
|
| 87 |
+
pool_dimensions=1,
|
| 88 |
+
pool_mode="max",
|
| 89 |
+
padding=padding,
|
| 90 |
+
data_format=data_format,
|
| 91 |
+
name=name,
|
| 92 |
+
**kwargs,
|
| 93 |
+
)
|