Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/__init__.py +234 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/__pycache__/input_spec.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/__pycache__/layer.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/input_spec.py +250 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/layer.py +1759 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/__pycache__/gaussian_dropout.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/__pycache__/gaussian_noise.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/__pycache__/spatial_dropout.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/activity_regularization.py +42 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/alpha_dropout.py +98 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/dropout.py +77 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/gaussian_dropout.py +63 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/gaussian_noise.py +63 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/spatial_dropout.py +192 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/cropping1d.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/cropping2d.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/cropping3d.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/flatten.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/permute.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/repeat_vector.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/reshape.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/up_sampling1d.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/up_sampling2d.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/up_sampling3d.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/zero_padding1d.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/zero_padding2d.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/zero_padding3d.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/cropping1d.py +82 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/cropping2d.py +224 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/cropping3d.py +284 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/flatten.py +80 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/permute.py +64 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/repeat_vector.py +48 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/reshape.py +73 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/up_sampling1d.py +60 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/up_sampling2d.py +170 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/up_sampling3d.py +134 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/zero_padding1d.py +93 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/zero_padding2d.py +119 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/zero_padding3d.py +118 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/bidirectional.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/conv_lstm.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/conv_lstm1d.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/conv_lstm2d.cpython-310.pyc +0 -0
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/__init__.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src.api_export import keras_export
|
| 2 |
+
from keras.src.layers.activations.activation import Activation
|
| 3 |
+
from keras.src.layers.activations.elu import ELU
|
| 4 |
+
from keras.src.layers.activations.leaky_relu import LeakyReLU
|
| 5 |
+
from keras.src.layers.activations.prelu import PReLU
|
| 6 |
+
from keras.src.layers.activations.relu import ReLU
|
| 7 |
+
from keras.src.layers.activations.softmax import Softmax
|
| 8 |
+
from keras.src.layers.attention.additive_attention import AdditiveAttention
|
| 9 |
+
from keras.src.layers.attention.attention import Attention
|
| 10 |
+
from keras.src.layers.attention.grouped_query_attention import (
|
| 11 |
+
GroupedQueryAttention,
|
| 12 |
+
)
|
| 13 |
+
from keras.src.layers.attention.multi_head_attention import MultiHeadAttention
|
| 14 |
+
from keras.src.layers.convolutional.conv1d import Conv1D
|
| 15 |
+
from keras.src.layers.convolutional.conv1d_transpose import Conv1DTranspose
|
| 16 |
+
from keras.src.layers.convolutional.conv2d import Conv2D
|
| 17 |
+
from keras.src.layers.convolutional.conv2d_transpose import Conv2DTranspose
|
| 18 |
+
from keras.src.layers.convolutional.conv3d import Conv3D
|
| 19 |
+
from keras.src.layers.convolutional.conv3d_transpose import Conv3DTranspose
|
| 20 |
+
from keras.src.layers.convolutional.depthwise_conv1d import DepthwiseConv1D
|
| 21 |
+
from keras.src.layers.convolutional.depthwise_conv2d import DepthwiseConv2D
|
| 22 |
+
from keras.src.layers.convolutional.separable_conv1d import SeparableConv1D
|
| 23 |
+
from keras.src.layers.convolutional.separable_conv2d import SeparableConv2D
|
| 24 |
+
from keras.src.layers.core.dense import Dense
|
| 25 |
+
from keras.src.layers.core.einsum_dense import EinsumDense
|
| 26 |
+
from keras.src.layers.core.embedding import Embedding
|
| 27 |
+
from keras.src.layers.core.identity import Identity
|
| 28 |
+
from keras.src.layers.core.input_layer import Input
|
| 29 |
+
from keras.src.layers.core.input_layer import InputLayer
|
| 30 |
+
from keras.src.layers.core.lambda_layer import Lambda
|
| 31 |
+
from keras.src.layers.core.masking import Masking
|
| 32 |
+
from keras.src.layers.core.wrapper import Wrapper
|
| 33 |
+
from keras.src.layers.input_spec import InputSpec
|
| 34 |
+
from keras.src.layers.layer import Layer
|
| 35 |
+
from keras.src.layers.merging.add import Add
|
| 36 |
+
from keras.src.layers.merging.add import add
|
| 37 |
+
from keras.src.layers.merging.average import Average
|
| 38 |
+
from keras.src.layers.merging.average import average
|
| 39 |
+
from keras.src.layers.merging.concatenate import Concatenate
|
| 40 |
+
from keras.src.layers.merging.concatenate import concatenate
|
| 41 |
+
from keras.src.layers.merging.dot import Dot
|
| 42 |
+
from keras.src.layers.merging.dot import dot
|
| 43 |
+
from keras.src.layers.merging.maximum import Maximum
|
| 44 |
+
from keras.src.layers.merging.maximum import maximum
|
| 45 |
+
from keras.src.layers.merging.minimum import Minimum
|
| 46 |
+
from keras.src.layers.merging.minimum import minimum
|
| 47 |
+
from keras.src.layers.merging.multiply import Multiply
|
| 48 |
+
from keras.src.layers.merging.multiply import multiply
|
| 49 |
+
from keras.src.layers.merging.subtract import Subtract
|
| 50 |
+
from keras.src.layers.merging.subtract import subtract
|
| 51 |
+
from keras.src.layers.normalization.batch_normalization import (
|
| 52 |
+
BatchNormalization,
|
| 53 |
+
)
|
| 54 |
+
from keras.src.layers.normalization.group_normalization import (
|
| 55 |
+
GroupNormalization,
|
| 56 |
+
)
|
| 57 |
+
from keras.src.layers.normalization.layer_normalization import (
|
| 58 |
+
LayerNormalization,
|
| 59 |
+
)
|
| 60 |
+
from keras.src.layers.normalization.spectral_normalization import (
|
| 61 |
+
SpectralNormalization,
|
| 62 |
+
)
|
| 63 |
+
from keras.src.layers.normalization.unit_normalization import UnitNormalization
|
| 64 |
+
from keras.src.layers.pooling.average_pooling1d import AveragePooling1D
|
| 65 |
+
from keras.src.layers.pooling.average_pooling2d import AveragePooling2D
|
| 66 |
+
from keras.src.layers.pooling.average_pooling3d import AveragePooling3D
|
| 67 |
+
from keras.src.layers.pooling.global_average_pooling1d import (
|
| 68 |
+
GlobalAveragePooling1D,
|
| 69 |
+
)
|
| 70 |
+
from keras.src.layers.pooling.global_average_pooling2d import (
|
| 71 |
+
GlobalAveragePooling2D,
|
| 72 |
+
)
|
| 73 |
+
from keras.src.layers.pooling.global_average_pooling3d import (
|
| 74 |
+
GlobalAveragePooling3D,
|
| 75 |
+
)
|
| 76 |
+
from keras.src.layers.pooling.global_max_pooling1d import GlobalMaxPooling1D
|
| 77 |
+
from keras.src.layers.pooling.global_max_pooling2d import GlobalMaxPooling2D
|
| 78 |
+
from keras.src.layers.pooling.global_max_pooling3d import GlobalMaxPooling3D
|
| 79 |
+
from keras.src.layers.pooling.max_pooling1d import MaxPooling1D
|
| 80 |
+
from keras.src.layers.pooling.max_pooling2d import MaxPooling2D
|
| 81 |
+
from keras.src.layers.pooling.max_pooling3d import MaxPooling3D
|
| 82 |
+
from keras.src.layers.preprocessing.category_encoding import CategoryEncoding
|
| 83 |
+
from keras.src.layers.preprocessing.discretization import Discretization
|
| 84 |
+
from keras.src.layers.preprocessing.hashed_crossing import HashedCrossing
|
| 85 |
+
from keras.src.layers.preprocessing.hashing import Hashing
|
| 86 |
+
from keras.src.layers.preprocessing.image_preprocessing.auto_contrast import (
|
| 87 |
+
AutoContrast,
|
| 88 |
+
)
|
| 89 |
+
from keras.src.layers.preprocessing.image_preprocessing.center_crop import (
|
| 90 |
+
CenterCrop,
|
| 91 |
+
)
|
| 92 |
+
from keras.src.layers.preprocessing.image_preprocessing.equalization import (
|
| 93 |
+
Equalization,
|
| 94 |
+
)
|
| 95 |
+
from keras.src.layers.preprocessing.image_preprocessing.max_num_bounding_box import (
|
| 96 |
+
MaxNumBoundingBoxes,
|
| 97 |
+
)
|
| 98 |
+
from keras.src.layers.preprocessing.image_preprocessing.mix_up import MixUp
|
| 99 |
+
from keras.src.layers.preprocessing.image_preprocessing.rand_augment import (
|
| 100 |
+
RandAugment,
|
| 101 |
+
)
|
| 102 |
+
from keras.src.layers.preprocessing.image_preprocessing.random_brightness import (
|
| 103 |
+
RandomBrightness,
|
| 104 |
+
)
|
| 105 |
+
from keras.src.layers.preprocessing.image_preprocessing.random_color_degeneration import (
|
| 106 |
+
RandomColorDegeneration,
|
| 107 |
+
)
|
| 108 |
+
from keras.src.layers.preprocessing.image_preprocessing.random_color_jitter import (
|
| 109 |
+
RandomColorJitter,
|
| 110 |
+
)
|
| 111 |
+
from keras.src.layers.preprocessing.image_preprocessing.random_contrast import (
|
| 112 |
+
RandomContrast,
|
| 113 |
+
)
|
| 114 |
+
from keras.src.layers.preprocessing.image_preprocessing.random_crop import (
|
| 115 |
+
RandomCrop,
|
| 116 |
+
)
|
| 117 |
+
from keras.src.layers.preprocessing.image_preprocessing.random_flip import (
|
| 118 |
+
RandomFlip,
|
| 119 |
+
)
|
| 120 |
+
from keras.src.layers.preprocessing.image_preprocessing.random_grayscale import (
|
| 121 |
+
RandomGrayscale,
|
| 122 |
+
)
|
| 123 |
+
from keras.src.layers.preprocessing.image_preprocessing.random_hue import (
|
| 124 |
+
RandomHue,
|
| 125 |
+
)
|
| 126 |
+
from keras.src.layers.preprocessing.image_preprocessing.random_posterization import (
|
| 127 |
+
RandomPosterization,
|
| 128 |
+
)
|
| 129 |
+
from keras.src.layers.preprocessing.image_preprocessing.random_rotation import (
|
| 130 |
+
RandomRotation,
|
| 131 |
+
)
|
| 132 |
+
from keras.src.layers.preprocessing.image_preprocessing.random_saturation import (
|
| 133 |
+
RandomSaturation,
|
| 134 |
+
)
|
| 135 |
+
from keras.src.layers.preprocessing.image_preprocessing.random_sharpness import (
|
| 136 |
+
RandomSharpness,
|
| 137 |
+
)
|
| 138 |
+
from keras.src.layers.preprocessing.image_preprocessing.random_shear import (
|
| 139 |
+
RandomShear,
|
| 140 |
+
)
|
| 141 |
+
from keras.src.layers.preprocessing.image_preprocessing.random_translation import (
|
| 142 |
+
RandomTranslation,
|
| 143 |
+
)
|
| 144 |
+
from keras.src.layers.preprocessing.image_preprocessing.random_zoom import (
|
| 145 |
+
RandomZoom,
|
| 146 |
+
)
|
| 147 |
+
from keras.src.layers.preprocessing.image_preprocessing.resizing import Resizing
|
| 148 |
+
from keras.src.layers.preprocessing.image_preprocessing.solarization import (
|
| 149 |
+
Solarization,
|
| 150 |
+
)
|
| 151 |
+
from keras.src.layers.preprocessing.index_lookup import IndexLookup
|
| 152 |
+
from keras.src.layers.preprocessing.integer_lookup import IntegerLookup
|
| 153 |
+
from keras.src.layers.preprocessing.mel_spectrogram import MelSpectrogram
|
| 154 |
+
from keras.src.layers.preprocessing.normalization import Normalization
|
| 155 |
+
from keras.src.layers.preprocessing.pipeline import Pipeline
|
| 156 |
+
from keras.src.layers.preprocessing.rescaling import Rescaling
|
| 157 |
+
from keras.src.layers.preprocessing.stft_spectrogram import STFTSpectrogram
|
| 158 |
+
from keras.src.layers.preprocessing.string_lookup import StringLookup
|
| 159 |
+
from keras.src.layers.preprocessing.text_vectorization import TextVectorization
|
| 160 |
+
from keras.src.layers.regularization.activity_regularization import (
|
| 161 |
+
ActivityRegularization,
|
| 162 |
+
)
|
| 163 |
+
from keras.src.layers.regularization.alpha_dropout import AlphaDropout
|
| 164 |
+
from keras.src.layers.regularization.dropout import Dropout
|
| 165 |
+
from keras.src.layers.regularization.gaussian_dropout import GaussianDropout
|
| 166 |
+
from keras.src.layers.regularization.gaussian_noise import GaussianNoise
|
| 167 |
+
from keras.src.layers.regularization.spatial_dropout import SpatialDropout1D
|
| 168 |
+
from keras.src.layers.regularization.spatial_dropout import SpatialDropout2D
|
| 169 |
+
from keras.src.layers.regularization.spatial_dropout import SpatialDropout3D
|
| 170 |
+
from keras.src.layers.reshaping.cropping1d import Cropping1D
|
| 171 |
+
from keras.src.layers.reshaping.cropping2d import Cropping2D
|
| 172 |
+
from keras.src.layers.reshaping.cropping3d import Cropping3D
|
| 173 |
+
from keras.src.layers.reshaping.flatten import Flatten
|
| 174 |
+
from keras.src.layers.reshaping.permute import Permute
|
| 175 |
+
from keras.src.layers.reshaping.repeat_vector import RepeatVector
|
| 176 |
+
from keras.src.layers.reshaping.reshape import Reshape
|
| 177 |
+
from keras.src.layers.reshaping.up_sampling1d import UpSampling1D
|
| 178 |
+
from keras.src.layers.reshaping.up_sampling2d import UpSampling2D
|
| 179 |
+
from keras.src.layers.reshaping.up_sampling3d import UpSampling3D
|
| 180 |
+
from keras.src.layers.reshaping.zero_padding1d import ZeroPadding1D
|
| 181 |
+
from keras.src.layers.reshaping.zero_padding2d import ZeroPadding2D
|
| 182 |
+
from keras.src.layers.reshaping.zero_padding3d import ZeroPadding3D
|
| 183 |
+
from keras.src.layers.rnn.bidirectional import Bidirectional
|
| 184 |
+
from keras.src.layers.rnn.conv_lstm1d import ConvLSTM1D
|
| 185 |
+
from keras.src.layers.rnn.conv_lstm2d import ConvLSTM2D
|
| 186 |
+
from keras.src.layers.rnn.conv_lstm3d import ConvLSTM3D
|
| 187 |
+
from keras.src.layers.rnn.gru import GRU
|
| 188 |
+
from keras.src.layers.rnn.gru import GRUCell
|
| 189 |
+
from keras.src.layers.rnn.lstm import LSTM
|
| 190 |
+
from keras.src.layers.rnn.lstm import LSTMCell
|
| 191 |
+
from keras.src.layers.rnn.rnn import RNN
|
| 192 |
+
from keras.src.layers.rnn.simple_rnn import SimpleRNN
|
| 193 |
+
from keras.src.layers.rnn.simple_rnn import SimpleRNNCell
|
| 194 |
+
from keras.src.layers.rnn.stacked_rnn_cells import StackedRNNCells
|
| 195 |
+
from keras.src.layers.rnn.time_distributed import TimeDistributed
|
| 196 |
+
from keras.src.saving import serialization_lib
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
@keras_export("keras.layers.serialize")
|
| 200 |
+
def serialize(layer):
|
| 201 |
+
"""Returns the layer configuration as a Python dict.
|
| 202 |
+
|
| 203 |
+
Args:
|
| 204 |
+
layer: A `keras.layers.Layer` instance to serialize.
|
| 205 |
+
|
| 206 |
+
Returns:
|
| 207 |
+
Python dict which contains the configuration of the layer.
|
| 208 |
+
"""
|
| 209 |
+
return serialization_lib.serialize_keras_object(layer)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
@keras_export("keras.layers.deserialize")
|
| 213 |
+
def deserialize(config, custom_objects=None):
|
| 214 |
+
"""Returns a Keras layer object via its configuration.
|
| 215 |
+
|
| 216 |
+
Args:
|
| 217 |
+
config: A python dict containing a serialized layer configuration.
|
| 218 |
+
custom_objects: Optional dictionary mapping names (strings) to custom
|
| 219 |
+
objects (classes and functions) to be considered during
|
| 220 |
+
deserialization.
|
| 221 |
+
|
| 222 |
+
Returns:
|
| 223 |
+
A Keras layer instance.
|
| 224 |
+
"""
|
| 225 |
+
obj = serialization_lib.deserialize_keras_object(
|
| 226 |
+
config,
|
| 227 |
+
custom_objects=custom_objects,
|
| 228 |
+
)
|
| 229 |
+
if not isinstance(obj, Layer):
|
| 230 |
+
raise ValueError(
|
| 231 |
+
"`keras.layers.deserialize` was passed a `config` object that is "
|
| 232 |
+
f"not a `keras.layers.Layer`. Received: {config}"
|
| 233 |
+
)
|
| 234 |
+
return obj
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (11.7 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/__pycache__/input_spec.cpython-310.pyc
ADDED
|
Binary file (7.34 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/__pycache__/layer.cpython-310.pyc
ADDED
|
Binary file (52.4 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/input_spec.py
ADDED
|
@@ -0,0 +1,250 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src import tree
|
| 3 |
+
from keras.src.api_export import keras_export
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@keras_export(["keras.InputSpec", "keras.layers.InputSpec"])
|
| 7 |
+
class InputSpec:
|
| 8 |
+
"""Specifies the rank, dtype and shape of every input to a layer.
|
| 9 |
+
|
| 10 |
+
Layers can expose (if appropriate) an `input_spec` attribute:
|
| 11 |
+
an instance of `InputSpec`, or a nested structure of `InputSpec` instances
|
| 12 |
+
(one per input tensor). These objects enable the layer to run input
|
| 13 |
+
compatibility checks for input structure, input rank, input shape, and
|
| 14 |
+
input dtype for the first argument of `Layer.__call__`.
|
| 15 |
+
|
| 16 |
+
A `None` entry in a shape is compatible with any dimension.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
dtype: Expected dtype of the input.
|
| 20 |
+
shape: Shape tuple, expected shape of the input
|
| 21 |
+
(may include `None` for dynamic axes).
|
| 22 |
+
Includes the batch size.
|
| 23 |
+
ndim: Integer, expected rank of the input.
|
| 24 |
+
max_ndim: Integer, maximum rank of the input.
|
| 25 |
+
min_ndim: Integer, minimum rank of the input.
|
| 26 |
+
axes: Dictionary mapping integer axes to
|
| 27 |
+
a specific dimension value.
|
| 28 |
+
allow_last_axis_squeeze: If `True`, allow inputs of rank N+1 as long
|
| 29 |
+
as the last axis of the input is 1, as well as inputs of rank N-1
|
| 30 |
+
as long as the last axis of the spec is 1.
|
| 31 |
+
name: Expected key corresponding to this input when passing data as
|
| 32 |
+
a dictionary.
|
| 33 |
+
optional: Boolean, whether the input is optional or not.
|
| 34 |
+
An optional input can accept `None` values.
|
| 35 |
+
|
| 36 |
+
Example:
|
| 37 |
+
|
| 38 |
+
```python
|
| 39 |
+
class MyLayer(Layer):
|
| 40 |
+
def __init__(self):
|
| 41 |
+
super().__init__()
|
| 42 |
+
# The layer will accept inputs with
|
| 43 |
+
# shape (*, 28, 28) & (*, 28, 28, 1)
|
| 44 |
+
# and raise an appropriate error message otherwise.
|
| 45 |
+
self.input_spec = InputSpec(
|
| 46 |
+
shape=(None, 28, 28, 1),
|
| 47 |
+
allow_last_axis_squeeze=True)
|
| 48 |
+
```
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
def __init__(
|
| 52 |
+
self,
|
| 53 |
+
dtype=None,
|
| 54 |
+
shape=None,
|
| 55 |
+
ndim=None,
|
| 56 |
+
max_ndim=None,
|
| 57 |
+
min_ndim=None,
|
| 58 |
+
axes=None,
|
| 59 |
+
allow_last_axis_squeeze=False,
|
| 60 |
+
name=None,
|
| 61 |
+
optional=False,
|
| 62 |
+
):
|
| 63 |
+
self.dtype = (
|
| 64 |
+
backend.standardize_dtype(dtype) if dtype is not None else None
|
| 65 |
+
)
|
| 66 |
+
if shape is not None:
|
| 67 |
+
self.shape = backend.standardize_shape(shape)
|
| 68 |
+
self.ndim = len(shape)
|
| 69 |
+
else:
|
| 70 |
+
self.ndim = ndim
|
| 71 |
+
self.shape = None
|
| 72 |
+
self.max_ndim = max_ndim
|
| 73 |
+
self.min_ndim = min_ndim
|
| 74 |
+
self.name = name
|
| 75 |
+
self.optional = optional
|
| 76 |
+
self.allow_last_axis_squeeze = allow_last_axis_squeeze
|
| 77 |
+
try:
|
| 78 |
+
axes = axes or {}
|
| 79 |
+
self.axes = {int(k): axes[k] for k in axes}
|
| 80 |
+
except (ValueError, TypeError):
|
| 81 |
+
raise TypeError(
|
| 82 |
+
"Argument `axes` must be a dict with integer keys. "
|
| 83 |
+
f"Received: axes={axes}"
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
if self.axes and (self.ndim is not None or self.max_ndim is not None):
|
| 87 |
+
max_dim = (self.ndim if self.ndim else self.max_ndim) - 1
|
| 88 |
+
max_axis = max(self.axes)
|
| 89 |
+
if max_axis > max_dim:
|
| 90 |
+
raise ValueError(
|
| 91 |
+
"Axis {} is greater than the maximum "
|
| 92 |
+
"allowed value: {}".format(max_axis, max_dim)
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
def __repr__(self):
|
| 96 |
+
spec = [
|
| 97 |
+
("dtype=" + str(self.dtype)) if self.dtype else "",
|
| 98 |
+
("shape=" + str(self.shape)) if self.shape else "",
|
| 99 |
+
("ndim=" + str(self.ndim)) if self.ndim else "",
|
| 100 |
+
("max_ndim=" + str(self.max_ndim)) if self.max_ndim else "",
|
| 101 |
+
("min_ndim=" + str(self.min_ndim)) if self.min_ndim else "",
|
| 102 |
+
("axes=" + str(self.axes)) if self.axes else "",
|
| 103 |
+
]
|
| 104 |
+
return f"InputSpec({', '.join(x for x in spec if x)})"
|
| 105 |
+
|
| 106 |
+
def get_config(self):
|
| 107 |
+
return {
|
| 108 |
+
"dtype": self.dtype,
|
| 109 |
+
"shape": self.shape,
|
| 110 |
+
"ndim": self.ndim,
|
| 111 |
+
"max_ndim": self.max_ndim,
|
| 112 |
+
"min_ndim": self.min_ndim,
|
| 113 |
+
"axes": self.axes,
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
@classmethod
|
| 117 |
+
def from_config(cls, config):
|
| 118 |
+
return cls(**config)
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def assert_input_compatibility(input_spec, inputs, layer_name):
|
| 122 |
+
"""Checks compatibility between the layer and provided inputs.
|
| 123 |
+
|
| 124 |
+
This checks that the tensor(s) `inputs` verify the input assumptions
|
| 125 |
+
of a layer (if any). If not, a clear and actional exception gets raised.
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
input_spec: An InputSpec instance, list of InputSpec instances, a nested
|
| 129 |
+
structure of InputSpec instances, or None.
|
| 130 |
+
inputs: Input tensor, list of input tensors, or a nested structure of
|
| 131 |
+
input tensors.
|
| 132 |
+
layer_name: String, name of the layer (for error message formatting).
|
| 133 |
+
|
| 134 |
+
Raises:
|
| 135 |
+
ValueError: in case of mismatch between
|
| 136 |
+
the provided inputs and the expectations of the layer.
|
| 137 |
+
"""
|
| 138 |
+
if not input_spec:
|
| 139 |
+
return
|
| 140 |
+
|
| 141 |
+
input_spec = tree.flatten(input_spec)
|
| 142 |
+
if isinstance(inputs, dict):
|
| 143 |
+
# Flatten `inputs` by reference order if input spec names are provided
|
| 144 |
+
names = [spec.name for spec in input_spec]
|
| 145 |
+
if all(names):
|
| 146 |
+
list_inputs = []
|
| 147 |
+
for name in names:
|
| 148 |
+
if name not in inputs:
|
| 149 |
+
raise ValueError(
|
| 150 |
+
f'Missing data for input "{name}". '
|
| 151 |
+
"You passed a data dictionary with keys "
|
| 152 |
+
f"{list(inputs.keys())}. "
|
| 153 |
+
f"Expected the following keys: {names}"
|
| 154 |
+
)
|
| 155 |
+
list_inputs.append(inputs[name])
|
| 156 |
+
inputs = list_inputs
|
| 157 |
+
|
| 158 |
+
inputs = tree.flatten(inputs)
|
| 159 |
+
if len(inputs) != len(input_spec):
|
| 160 |
+
raise ValueError(
|
| 161 |
+
f'Layer "{layer_name}" expects {len(input_spec)} input(s),'
|
| 162 |
+
f" but it received {len(inputs)} input tensors. "
|
| 163 |
+
f"Inputs received: {inputs}"
|
| 164 |
+
)
|
| 165 |
+
for input_index, (x, spec) in enumerate(zip(inputs, input_spec)):
|
| 166 |
+
if spec is None:
|
| 167 |
+
continue
|
| 168 |
+
if x is None and spec.optional:
|
| 169 |
+
continue
|
| 170 |
+
|
| 171 |
+
# Having a shape/dtype is the only commonality of the various
|
| 172 |
+
# tensor-like objects that may be passed. The most common kind of
|
| 173 |
+
# invalid type we are guarding for is a Layer instance (Functional API),
|
| 174 |
+
# which does not have a `shape` attribute.
|
| 175 |
+
if not hasattr(x, "shape"):
|
| 176 |
+
raise ValueError(
|
| 177 |
+
f"Inputs to a layer should be tensors. Got '{x}' "
|
| 178 |
+
f"(of type {type(x)}) as input for layer '{layer_name}'."
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
shape = backend.standardize_shape(x.shape)
|
| 182 |
+
ndim = len(shape)
|
| 183 |
+
# Check ndim.
|
| 184 |
+
if spec.ndim is not None and not spec.allow_last_axis_squeeze:
|
| 185 |
+
if ndim != spec.ndim:
|
| 186 |
+
raise ValueError(
|
| 187 |
+
f'Input {input_index} of layer "{layer_name}" '
|
| 188 |
+
"is incompatible with the layer: "
|
| 189 |
+
f"expected ndim={spec.ndim}, found ndim={ndim}. "
|
| 190 |
+
f"Full shape received: {shape}"
|
| 191 |
+
)
|
| 192 |
+
if spec.max_ndim is not None:
|
| 193 |
+
if ndim is not None and ndim > spec.max_ndim:
|
| 194 |
+
raise ValueError(
|
| 195 |
+
f'Input {input_index} of layer "{layer_name}" '
|
| 196 |
+
"is incompatible with the layer: "
|
| 197 |
+
f"expected max_ndim={spec.max_ndim}, "
|
| 198 |
+
f"found ndim={ndim}"
|
| 199 |
+
)
|
| 200 |
+
if spec.min_ndim is not None:
|
| 201 |
+
if ndim is not None and ndim < spec.min_ndim:
|
| 202 |
+
raise ValueError(
|
| 203 |
+
f'Input {input_index} of layer "{layer_name}" '
|
| 204 |
+
"is incompatible with the layer: "
|
| 205 |
+
f"expected min_ndim={spec.min_ndim}, "
|
| 206 |
+
f"found ndim={ndim}. "
|
| 207 |
+
f"Full shape received: {shape}"
|
| 208 |
+
)
|
| 209 |
+
# Check dtype.
|
| 210 |
+
if spec.dtype is not None:
|
| 211 |
+
dtype = backend.standardize_dtype(x.dtype)
|
| 212 |
+
if dtype != spec.dtype:
|
| 213 |
+
raise ValueError(
|
| 214 |
+
f'Input {input_index} of layer "{layer_name}" '
|
| 215 |
+
"is incompatible with the layer: "
|
| 216 |
+
f"expected dtype={spec.dtype}, "
|
| 217 |
+
f"found dtype={dtype}"
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
# Check specific shape axes.
|
| 221 |
+
if spec.axes:
|
| 222 |
+
for axis, value in spec.axes.items():
|
| 223 |
+
if value is not None and shape[axis] not in {
|
| 224 |
+
value,
|
| 225 |
+
None,
|
| 226 |
+
}:
|
| 227 |
+
raise ValueError(
|
| 228 |
+
f'Input {input_index} of layer "{layer_name}" is '
|
| 229 |
+
f"incompatible with the layer: expected axis {axis} "
|
| 230 |
+
f"of input shape to have value {value}, "
|
| 231 |
+
"but received input with "
|
| 232 |
+
f"shape {shape}"
|
| 233 |
+
)
|
| 234 |
+
# Check shape.
|
| 235 |
+
if spec.shape is not None:
|
| 236 |
+
spec_shape = spec.shape
|
| 237 |
+
if spec.allow_last_axis_squeeze:
|
| 238 |
+
if shape and shape[-1] == 1:
|
| 239 |
+
shape = shape[:-1]
|
| 240 |
+
if spec_shape and spec_shape[-1] == 1:
|
| 241 |
+
spec_shape = spec_shape[:-1]
|
| 242 |
+
for spec_dim, dim in zip(spec_shape, shape):
|
| 243 |
+
if spec_dim is not None and dim is not None:
|
| 244 |
+
if spec_dim != dim:
|
| 245 |
+
raise ValueError(
|
| 246 |
+
f'Input {input_index} of layer "{layer_name}" is '
|
| 247 |
+
"incompatible with the layer: "
|
| 248 |
+
f"expected shape={spec.shape}, "
|
| 249 |
+
f"found shape={shape}"
|
| 250 |
+
)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/layer.py
ADDED
|
@@ -0,0 +1,1759 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Layer is an Operation with state.
|
| 2 |
+
|
| 3 |
+
Takes care of:
|
| 4 |
+
|
| 5 |
+
- Weights / variables (and tracking thereof)
|
| 6 |
+
- deferred build
|
| 7 |
+
- trainable argument value inference
|
| 8 |
+
- masking
|
| 9 |
+
- autocasting
|
| 10 |
+
|
| 11 |
+
And some more magic:
|
| 12 |
+
|
| 13 |
+
- add_loss
|
| 14 |
+
- metric tracking
|
| 15 |
+
- RNG seed tracking
|
| 16 |
+
- activity regularization
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
import collections
|
| 20 |
+
import inspect
|
| 21 |
+
import warnings
|
| 22 |
+
from functools import wraps
|
| 23 |
+
|
| 24 |
+
from keras.src import backend
|
| 25 |
+
from keras.src import constraints
|
| 26 |
+
from keras.src import dtype_policies
|
| 27 |
+
from keras.src import initializers
|
| 28 |
+
from keras.src import regularizers
|
| 29 |
+
from keras.src import tree
|
| 30 |
+
from keras.src import utils
|
| 31 |
+
from keras.src.api_export import keras_export
|
| 32 |
+
from keras.src.backend import KerasTensor
|
| 33 |
+
from keras.src.backend.common import global_state
|
| 34 |
+
from keras.src.backend.common.name_scope import current_path
|
| 35 |
+
from keras.src.backend.common.symbolic_scope import in_symbolic_scope
|
| 36 |
+
from keras.src.distribution import distribution_lib
|
| 37 |
+
from keras.src.dtype_policies import DTypePolicyMap
|
| 38 |
+
from keras.src.layers import input_spec
|
| 39 |
+
from keras.src.metrics.metric import Metric
|
| 40 |
+
from keras.src.ops.operation import Operation
|
| 41 |
+
from keras.src.saving.keras_saveable import KerasSaveable
|
| 42 |
+
from keras.src.utils import python_utils
|
| 43 |
+
from keras.src.utils import summary_utils
|
| 44 |
+
from keras.src.utils import traceback_utils
|
| 45 |
+
from keras.src.utils import tracking
|
| 46 |
+
|
| 47 |
+
if backend.backend() == "tensorflow":
|
| 48 |
+
from keras.src.backend.tensorflow.layer import TFLayer as BackendLayer
|
| 49 |
+
elif backend.backend() == "jax":
|
| 50 |
+
from keras.src.backend.jax.layer import JaxLayer as BackendLayer
|
| 51 |
+
elif backend.backend() == "torch":
|
| 52 |
+
from keras.src.backend.torch.layer import TorchLayer as BackendLayer
|
| 53 |
+
elif backend.backend() == "numpy":
|
| 54 |
+
from keras.src.backend.numpy.layer import NumpyLayer as BackendLayer
|
| 55 |
+
elif backend.backend() == "openvino":
|
| 56 |
+
from keras.src.backend.openvino.layer import OpenvinoLayer as BackendLayer
|
| 57 |
+
else:
|
| 58 |
+
raise RuntimeError(
|
| 59 |
+
f"Backend '{backend.backend()}' must implement a layer mixin class."
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
@keras_export(["keras.Layer", "keras.layers.Layer"])
|
| 64 |
+
class Layer(BackendLayer, Operation, KerasSaveable):
|
| 65 |
+
"""This is the class from which all layers inherit.
|
| 66 |
+
|
| 67 |
+
A layer is a callable object that takes as input one or more tensors and
|
| 68 |
+
that outputs one or more tensors. It involves *computation*, defined
|
| 69 |
+
in the `call()` method, and a *state* (weight variables). State can be
|
| 70 |
+
created:
|
| 71 |
+
|
| 72 |
+
* in `__init__()`, for instance via `self.add_weight()`;
|
| 73 |
+
* in the optional `build()` method, which is invoked by the first
|
| 74 |
+
`__call__()` to the layer, and supplies the shape(s) of the input(s),
|
| 75 |
+
which may not have been known at initialization time.
|
| 76 |
+
|
| 77 |
+
Layers are recursively composable: If you assign a Layer instance as an
|
| 78 |
+
attribute of another Layer, the outer layer will start tracking the weights
|
| 79 |
+
created by the inner layer. Nested layers should be instantiated in the
|
| 80 |
+
`__init__()` method or `build()` method.
|
| 81 |
+
|
| 82 |
+
Users will just instantiate a layer and then treat it as a callable.
|
| 83 |
+
|
| 84 |
+
Args:
|
| 85 |
+
trainable: Boolean, whether the layer's variables should be trainable.
|
| 86 |
+
name: String name of the layer.
|
| 87 |
+
dtype: The dtype of the layer's computations and weights. Can also be a
|
| 88 |
+
`keras.DTypePolicy`,
|
| 89 |
+
which allows the computation and
|
| 90 |
+
weight dtype to differ. Defaults to `None`. `None` means to use
|
| 91 |
+
`keras.config.dtype_policy()`,
|
| 92 |
+
which is a `float32` policy unless set to different value
|
| 93 |
+
(via `keras.config.set_dtype_policy()`).
|
| 94 |
+
|
| 95 |
+
Attributes:
|
| 96 |
+
name: The name of the layer (string).
|
| 97 |
+
dtype: Dtype of the layer's weights. Alias of `layer.variable_dtype`.
|
| 98 |
+
variable_dtype: Dtype of the layer's weights.
|
| 99 |
+
compute_dtype: The dtype of the layer's computations.
|
| 100 |
+
Layers automatically cast inputs to this dtype, which causes
|
| 101 |
+
the computations and output to also be in this dtype.
|
| 102 |
+
When mixed precision is used with a
|
| 103 |
+
`keras.DTypePolicy`, this will be different
|
| 104 |
+
than `variable_dtype`.
|
| 105 |
+
trainable_weights: List of variables to be included in backprop.
|
| 106 |
+
non_trainable_weights: List of variables that should not be
|
| 107 |
+
included in backprop.
|
| 108 |
+
weights: The concatenation of the lists trainable_weights and
|
| 109 |
+
non_trainable_weights (in this order).
|
| 110 |
+
trainable: Whether the layer should be trained (boolean), i.e.
|
| 111 |
+
whether its potentially-trainable weights should be returned
|
| 112 |
+
as part of `layer.trainable_weights`.
|
| 113 |
+
input_spec: Optional (list of) `InputSpec` object(s) specifying the
|
| 114 |
+
constraints on inputs that can be accepted by the layer.
|
| 115 |
+
|
| 116 |
+
We recommend that descendants of `Layer` implement the following methods:
|
| 117 |
+
|
| 118 |
+
* `__init__()`: Defines custom layer attributes, and creates layer weights
|
| 119 |
+
that do not depend on input shapes, using `add_weight()`,
|
| 120 |
+
or other state.
|
| 121 |
+
* `build(self, input_shape)`: This method can be used to create weights that
|
| 122 |
+
depend on the shape(s) of the input(s), using `add_weight()`, or other
|
| 123 |
+
state. `__call__()` will automatically build the layer
|
| 124 |
+
(if it has not been built yet) by calling `build()`.
|
| 125 |
+
* `call(self, *args, **kwargs)`: Called in `__call__` after making
|
| 126 |
+
sure `build()` has been called. `call()` performs the logic of applying
|
| 127 |
+
the layer to the input arguments.
|
| 128 |
+
Two reserved keyword arguments you can optionally use in `call()` are:
|
| 129 |
+
1. `training` (boolean, whether the call is in inference mode or
|
| 130 |
+
training mode).
|
| 131 |
+
2. `mask` (boolean tensor encoding masked timesteps in the input,
|
| 132 |
+
used e.g. in RNN layers).
|
| 133 |
+
A typical signature for this method is `call(self, inputs)`, and user
|
| 134 |
+
could optionally add `training` and `mask` if the layer need them.
|
| 135 |
+
* `get_config(self)`: Returns a dictionary containing the configuration
|
| 136 |
+
used to initialize this layer. If the keys differ from the arguments
|
| 137 |
+
in `__init__()`, then override `from_config(self)` as well.
|
| 138 |
+
This method is used when saving
|
| 139 |
+
the layer or a model that contains this layer.
|
| 140 |
+
|
| 141 |
+
Examples:
|
| 142 |
+
|
| 143 |
+
Here's a basic example: a layer with two variables, `w` and `b`,
|
| 144 |
+
that returns `y = w . x + b`.
|
| 145 |
+
It shows how to implement `build()` and `call()`.
|
| 146 |
+
Variables set as attributes of a layer are tracked as weights
|
| 147 |
+
of the layers (in `layer.weights`).
|
| 148 |
+
|
| 149 |
+
```python
|
| 150 |
+
class SimpleDense(Layer):
|
| 151 |
+
def __init__(self, units=32):
|
| 152 |
+
super().__init__()
|
| 153 |
+
self.units = units
|
| 154 |
+
|
| 155 |
+
# Create the state of the layer (weights)
|
| 156 |
+
def build(self, input_shape):
|
| 157 |
+
self.kernel = self.add_weight(
|
| 158 |
+
shape=(input_shape[-1], self.units),
|
| 159 |
+
initializer="glorot_uniform",
|
| 160 |
+
trainable=True,
|
| 161 |
+
name="kernel",
|
| 162 |
+
)
|
| 163 |
+
self.bias = self.add_weight(
|
| 164 |
+
shape=(self.units,),
|
| 165 |
+
initializer="zeros",
|
| 166 |
+
trainable=True,
|
| 167 |
+
name="bias",
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
# Defines the computation
|
| 171 |
+
def call(self, inputs):
|
| 172 |
+
return ops.matmul(inputs, self.kernel) + self.bias
|
| 173 |
+
|
| 174 |
+
# Instantiates the layer.
|
| 175 |
+
linear_layer = SimpleDense(4)
|
| 176 |
+
|
| 177 |
+
# This will also call `build(input_shape)` and create the weights.
|
| 178 |
+
y = linear_layer(ops.ones((2, 2)))
|
| 179 |
+
assert len(linear_layer.weights) == 2
|
| 180 |
+
|
| 181 |
+
# These weights are trainable, so they're listed in `trainable_weights`:
|
| 182 |
+
assert len(linear_layer.trainable_weights) == 2
|
| 183 |
+
```
|
| 184 |
+
|
| 185 |
+
Besides trainable weights, updated via backpropagation during training,
|
| 186 |
+
layers can also have non-trainable weights. These weights are meant to
|
| 187 |
+
be updated manually during `call()`. Here's a example layer that computes
|
| 188 |
+
the running sum of its inputs:
|
| 189 |
+
|
| 190 |
+
```python
|
| 191 |
+
class ComputeSum(Layer):
|
| 192 |
+
|
| 193 |
+
def __init__(self, input_dim):
|
| 194 |
+
super(ComputeSum, self).__init__()
|
| 195 |
+
# Create a non-trainable weight.
|
| 196 |
+
self.total = self.add_weight(
|
| 197 |
+
shape=(),
|
| 198 |
+
initializer="zeros",
|
| 199 |
+
trainable=False,
|
| 200 |
+
name="total",
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
def call(self, inputs):
|
| 204 |
+
self.total.assign(self.total + ops.sum(inputs))
|
| 205 |
+
return self.total
|
| 206 |
+
|
| 207 |
+
my_sum = ComputeSum(2)
|
| 208 |
+
x = ops.ones((2, 2))
|
| 209 |
+
y = my_sum(x)
|
| 210 |
+
|
| 211 |
+
assert my_sum.weights == [my_sum.total]
|
| 212 |
+
assert my_sum.non_trainable_weights == [my_sum.total]
|
| 213 |
+
assert my_sum.trainable_weights == []
|
| 214 |
+
```
|
| 215 |
+
"""
|
| 216 |
+
|
| 217 |
+
def __new__(cls, *args, **kwargs):
|
| 218 |
+
obj = super().__new__(cls, *args, **kwargs)
|
| 219 |
+
|
| 220 |
+
# Wrap the user-provided `build` method in the `build_wrapper`
|
| 221 |
+
# to add name scope support and serialization support.
|
| 222 |
+
original_build_method = obj.build
|
| 223 |
+
|
| 224 |
+
@wraps(original_build_method)
|
| 225 |
+
def build_wrapper(*args, **kwargs):
|
| 226 |
+
with obj._open_name_scope():
|
| 227 |
+
obj._path = current_path()
|
| 228 |
+
original_build_method(*args, **kwargs)
|
| 229 |
+
# Record build config.
|
| 230 |
+
signature = inspect.signature(original_build_method)
|
| 231 |
+
obj._build_shapes_dict = signature.bind(*args, **kwargs).arguments
|
| 232 |
+
# Set built, post build actions, and lock state.
|
| 233 |
+
obj.built = True
|
| 234 |
+
obj._post_build()
|
| 235 |
+
obj._lock_state()
|
| 236 |
+
|
| 237 |
+
obj.build = build_wrapper
|
| 238 |
+
|
| 239 |
+
# Wrap the user-provided `quantize` method in the `quantize_wrapper`
|
| 240 |
+
# to add tracker support.
|
| 241 |
+
original_quantize_method = obj.quantize
|
| 242 |
+
|
| 243 |
+
@wraps(original_quantize_method)
|
| 244 |
+
def quantize_wrapper(mode, **kwargs):
|
| 245 |
+
obj._check_quantize_args(mode, obj.compute_dtype)
|
| 246 |
+
obj._tracker.unlock()
|
| 247 |
+
try:
|
| 248 |
+
original_quantize_method(mode, **kwargs)
|
| 249 |
+
except Exception:
|
| 250 |
+
raise
|
| 251 |
+
finally:
|
| 252 |
+
obj._tracker.lock()
|
| 253 |
+
|
| 254 |
+
obj.quantize = quantize_wrapper
|
| 255 |
+
|
| 256 |
+
return obj
|
| 257 |
+
|
| 258 |
+
def __init__(
|
| 259 |
+
self,
|
| 260 |
+
*,
|
| 261 |
+
activity_regularizer=None,
|
| 262 |
+
trainable=True,
|
| 263 |
+
dtype=None,
|
| 264 |
+
autocast=True,
|
| 265 |
+
name=None,
|
| 266 |
+
**kwargs,
|
| 267 |
+
):
|
| 268 |
+
BackendLayer.__init__(self)
|
| 269 |
+
self._lock = False
|
| 270 |
+
Operation.__init__(self, dtype=dtype, name=name)
|
| 271 |
+
self.activity_regularizer = regularizers.get(activity_regularizer)
|
| 272 |
+
input_dim_arg = kwargs.pop("input_dim", None)
|
| 273 |
+
if input_dim_arg is not None:
|
| 274 |
+
input_shape_arg = (input_dim_arg,)
|
| 275 |
+
else:
|
| 276 |
+
input_shape_arg = kwargs.pop("input_shape", None)
|
| 277 |
+
if input_shape_arg is not None:
|
| 278 |
+
warnings.warn(
|
| 279 |
+
"Do not pass an `input_shape`/`input_dim` argument to "
|
| 280 |
+
"a layer. When using Sequential models, "
|
| 281 |
+
"prefer using an `Input(shape)` object as the "
|
| 282 |
+
"first layer in the model instead.",
|
| 283 |
+
stacklevel=2,
|
| 284 |
+
)
|
| 285 |
+
self._input_shape_arg = input_shape_arg
|
| 286 |
+
if kwargs:
|
| 287 |
+
raise ValueError(
|
| 288 |
+
"Unrecognized keyword arguments "
|
| 289 |
+
f"passed to {self.__class__.__name__}: {kwargs}"
|
| 290 |
+
)
|
| 291 |
+
|
| 292 |
+
self._path = None # Will be determined in `build_wrapper`
|
| 293 |
+
self.built = False
|
| 294 |
+
self.autocast = autocast
|
| 295 |
+
self._input_spec = None
|
| 296 |
+
self._called = False
|
| 297 |
+
self.supports_jit = True
|
| 298 |
+
|
| 299 |
+
self._trainable = trainable
|
| 300 |
+
self._losses = []
|
| 301 |
+
self._loss_ids = set()
|
| 302 |
+
self._losses_override = []
|
| 303 |
+
|
| 304 |
+
self._call_signature = inspect.signature(self.call)
|
| 305 |
+
call_signature_parameters = [
|
| 306 |
+
p.name for p in self._call_signature.parameters.values()
|
| 307 |
+
]
|
| 308 |
+
self._call_has_training_arg = "training" in call_signature_parameters
|
| 309 |
+
self._call_has_mask_arg = "mask" in call_signature_parameters
|
| 310 |
+
|
| 311 |
+
self._supports_masking = not utils.is_default(self.compute_mask)
|
| 312 |
+
# Whether to automatically convert (+ auto-cast) inputs to `call()`.
|
| 313 |
+
self._convert_input_args = True
|
| 314 |
+
# Whether to allow non-tensors as positional arguments in `call()`.
|
| 315 |
+
self._allow_non_tensor_positional_args = False
|
| 316 |
+
# Dict of shapes that were used to call `build()`.
|
| 317 |
+
self._build_shapes_dict = None
|
| 318 |
+
# Parent path
|
| 319 |
+
self._parent_path = None
|
| 320 |
+
self._initialize_tracker()
|
| 321 |
+
|
| 322 |
+
@tracking.no_automatic_dependency_tracking
|
| 323 |
+
def _initialize_tracker(self):
|
| 324 |
+
if hasattr(self, "_tracker"):
|
| 325 |
+
return
|
| 326 |
+
|
| 327 |
+
trainable_variables = []
|
| 328 |
+
non_trainable_variables = []
|
| 329 |
+
layers = []
|
| 330 |
+
metrics = []
|
| 331 |
+
seed_generators = []
|
| 332 |
+
self._tracker = tracking.Tracker(
|
| 333 |
+
{
|
| 334 |
+
"trainable_variables": (
|
| 335 |
+
lambda x: isinstance(x, backend.Variable) and x.trainable,
|
| 336 |
+
trainable_variables,
|
| 337 |
+
),
|
| 338 |
+
"non_trainable_variables": (
|
| 339 |
+
lambda x: isinstance(x, backend.Variable)
|
| 340 |
+
and not x.trainable,
|
| 341 |
+
non_trainable_variables,
|
| 342 |
+
),
|
| 343 |
+
"metrics": (lambda x: isinstance(x, Metric), metrics),
|
| 344 |
+
"layers": (
|
| 345 |
+
lambda x: isinstance(x, Layer)
|
| 346 |
+
and not isinstance(x, Metric),
|
| 347 |
+
layers,
|
| 348 |
+
),
|
| 349 |
+
"seed_generators": (
|
| 350 |
+
lambda x: isinstance(x, backend.random.SeedGenerator),
|
| 351 |
+
seed_generators,
|
| 352 |
+
),
|
| 353 |
+
},
|
| 354 |
+
exclusions={"non_trainable_variables": ["trainable_variables"]},
|
| 355 |
+
)
|
| 356 |
+
if backend.backend() == "tensorflow":
|
| 357 |
+
# Remove attribute tracking for lists (TF-specific attribute)
|
| 358 |
+
_self_setattr_tracking = getattr(
|
| 359 |
+
self, "_self_setattr_tracking", True
|
| 360 |
+
)
|
| 361 |
+
self._self_setattr_tracking = False
|
| 362 |
+
|
| 363 |
+
self._trainable_variables = trainable_variables
|
| 364 |
+
self._non_trainable_variables = non_trainable_variables
|
| 365 |
+
self._layers = layers
|
| 366 |
+
self._metrics = metrics
|
| 367 |
+
self._seed_generators = seed_generators
|
| 368 |
+
|
| 369 |
+
if backend.backend() == "tensorflow":
|
| 370 |
+
# Reset attribute tracking (TF-specific)
|
| 371 |
+
self._self_setattr_tracking = _self_setattr_tracking
|
| 372 |
+
|
| 373 |
+
@property
|
| 374 |
+
def path(self):
|
| 375 |
+
"""The path of the layer.
|
| 376 |
+
|
| 377 |
+
If the layer has not been built yet, it will be `None`.
|
| 378 |
+
"""
|
| 379 |
+
return self._path
|
| 380 |
+
|
| 381 |
+
@property
|
| 382 |
+
def input_spec(self):
|
| 383 |
+
return self._input_spec
|
| 384 |
+
|
| 385 |
+
@input_spec.setter
|
| 386 |
+
def input_spec(self, value):
|
| 387 |
+
self._input_spec = value
|
| 388 |
+
|
| 389 |
+
@utils.default
|
| 390 |
+
def build(self, input_shape):
|
| 391 |
+
self._check_super_called()
|
| 392 |
+
if utils.is_default(self.build) and might_have_unbuilt_state(self):
|
| 393 |
+
warnings.warn(
|
| 394 |
+
f"`build()` was called on layer '{self.name}', however "
|
| 395 |
+
"the layer does not have a `build()` method implemented "
|
| 396 |
+
"and it looks like it has unbuilt state. This will cause "
|
| 397 |
+
"the layer to be marked as built, despite not being "
|
| 398 |
+
"actually built, which may cause failures down the line. "
|
| 399 |
+
"Make sure to implement a proper `build()` method."
|
| 400 |
+
)
|
| 401 |
+
self.built = True
|
| 402 |
+
|
| 403 |
+
def _lock_state(self):
|
| 404 |
+
"""Prevent further state updates, called automatically in `build()`."""
|
| 405 |
+
if not self._tracker.locked:
|
| 406 |
+
self._tracker.lock(
|
| 407 |
+
msg=(
|
| 408 |
+
"You cannot add new elements of state "
|
| 409 |
+
"(variables or sub-layers) "
|
| 410 |
+
"to a layer that is already built. All state "
|
| 411 |
+
"must be created in the `__init__()` method or "
|
| 412 |
+
"in the `build()` method."
|
| 413 |
+
)
|
| 414 |
+
)
|
| 415 |
+
|
| 416 |
+
def get_build_config(self):
|
| 417 |
+
"""Returns a dictionary with the layer's input shape.
|
| 418 |
+
|
| 419 |
+
This method returns a config dict that can be used by
|
| 420 |
+
`build_from_config(config)` to create all states (e.g. Variables and
|
| 421 |
+
Lookup tables) needed by the layer.
|
| 422 |
+
|
| 423 |
+
By default, the config only contains the input shape that the layer
|
| 424 |
+
was built with. If you're writing a custom layer that creates state in
|
| 425 |
+
an unusual way, you should override this method to make sure this state
|
| 426 |
+
is already created when Keras attempts to load its value upon model
|
| 427 |
+
loading.
|
| 428 |
+
|
| 429 |
+
Returns:
|
| 430 |
+
A dict containing the input shape associated with the layer.
|
| 431 |
+
"""
|
| 432 |
+
if self._build_shapes_dict is not None:
|
| 433 |
+
if len(self._build_shapes_dict) == 1:
|
| 434 |
+
return {
|
| 435 |
+
"input_shape": tuple(self._build_shapes_dict.values())[0],
|
| 436 |
+
}
|
| 437 |
+
else:
|
| 438 |
+
return {"shapes_dict": self._build_shapes_dict}
|
| 439 |
+
|
| 440 |
+
def build_from_config(self, config):
|
| 441 |
+
"""Builds the layer's states with the supplied config dict.
|
| 442 |
+
|
| 443 |
+
By default, this method calls the `build(config["input_shape"])` method,
|
| 444 |
+
which creates weights based on the layer's input shape in the supplied
|
| 445 |
+
config. If your config contains other information needed to load the
|
| 446 |
+
layer's state, you should override this method.
|
| 447 |
+
|
| 448 |
+
Args:
|
| 449 |
+
config: Dict containing the input shape associated with this layer.
|
| 450 |
+
"""
|
| 451 |
+
if config:
|
| 452 |
+
if "input_shape" in config:
|
| 453 |
+
self.build(config["input_shape"])
|
| 454 |
+
elif "shapes_dict" in config:
|
| 455 |
+
self.build(**config["shapes_dict"])
|
| 456 |
+
self.built = True
|
| 457 |
+
|
| 458 |
+
def _obj_type(self):
|
| 459 |
+
return "Layer"
|
| 460 |
+
|
| 461 |
+
def add_variable(
|
| 462 |
+
self,
|
| 463 |
+
shape,
|
| 464 |
+
initializer,
|
| 465 |
+
dtype=None,
|
| 466 |
+
trainable=True,
|
| 467 |
+
autocast=True,
|
| 468 |
+
regularizer=None,
|
| 469 |
+
constraint=None,
|
| 470 |
+
name=None,
|
| 471 |
+
):
|
| 472 |
+
"""Add a weight variable to the layer.
|
| 473 |
+
|
| 474 |
+
Alias of `add_weight()`.
|
| 475 |
+
"""
|
| 476 |
+
return self.add_weight(
|
| 477 |
+
shape=shape,
|
| 478 |
+
initializer=initializer,
|
| 479 |
+
dtype=dtype,
|
| 480 |
+
trainable=trainable,
|
| 481 |
+
autocast=autocast,
|
| 482 |
+
regularizer=regularizer,
|
| 483 |
+
constraint=constraint,
|
| 484 |
+
name=name,
|
| 485 |
+
)
|
| 486 |
+
|
| 487 |
+
def add_weight(
|
| 488 |
+
self,
|
| 489 |
+
shape=None,
|
| 490 |
+
initializer=None,
|
| 491 |
+
dtype=None,
|
| 492 |
+
trainable=True,
|
| 493 |
+
autocast=True,
|
| 494 |
+
regularizer=None,
|
| 495 |
+
constraint=None,
|
| 496 |
+
aggregation="none",
|
| 497 |
+
name=None,
|
| 498 |
+
):
|
| 499 |
+
"""Add a weight variable to the layer.
|
| 500 |
+
|
| 501 |
+
Args:
|
| 502 |
+
shape: Shape tuple for the variable. Must be fully-defined
|
| 503 |
+
(no `None` entries). Defaults to `()` (scalar) if unspecified.
|
| 504 |
+
initializer: Initializer object to use to populate the initial
|
| 505 |
+
variable value, or string name of a built-in initializer
|
| 506 |
+
(e.g. `"random_normal"`). If unspecified, defaults to
|
| 507 |
+
`"glorot_uniform"` for floating-point variables and to `"zeros"`
|
| 508 |
+
for all other types (e.g. int, bool).
|
| 509 |
+
dtype: Dtype of the variable to create, e.g. `"float32"`. If
|
| 510 |
+
unspecified, defaults to the layer's variable dtype
|
| 511 |
+
(which itself defaults to `"float32"` if unspecified).
|
| 512 |
+
trainable: Boolean, whether the variable should be trainable via
|
| 513 |
+
backprop or whether its updates are managed manually. Defaults
|
| 514 |
+
to `True`.
|
| 515 |
+
autocast: Boolean, whether to autocast layers variables when
|
| 516 |
+
accessing them. Defaults to `True`.
|
| 517 |
+
regularizer: Regularizer object to call to apply penalty on the
|
| 518 |
+
weight. These penalties are summed into the loss function
|
| 519 |
+
during optimization. Defaults to `None`.
|
| 520 |
+
constraint: Contrainst object to call on the variable after any
|
| 521 |
+
optimizer update, or string name of a built-in constraint.
|
| 522 |
+
Defaults to `None`.
|
| 523 |
+
aggregation: Optional string, one of `None`, `"none"`, `"mean"`,
|
| 524 |
+
`"sum"` or `"only_first_replica"`. Annotates the variable with
|
| 525 |
+
the type of multi-replica aggregation to be used for this
|
| 526 |
+
variable when writing custom data parallel training loops.
|
| 527 |
+
Defaults to `"none"`.
|
| 528 |
+
name: String name of the variable. Useful for debugging purposes.
|
| 529 |
+
"""
|
| 530 |
+
self._check_super_called()
|
| 531 |
+
if shape is None:
|
| 532 |
+
shape = ()
|
| 533 |
+
if dtype is not None:
|
| 534 |
+
dtype = backend.standardize_dtype(dtype)
|
| 535 |
+
else:
|
| 536 |
+
dtype = self.variable_dtype
|
| 537 |
+
if initializer is None:
|
| 538 |
+
if "float" in dtype:
|
| 539 |
+
initializer = "glorot_uniform"
|
| 540 |
+
else:
|
| 541 |
+
initializer = "zeros"
|
| 542 |
+
initializer = initializers.get(initializer)
|
| 543 |
+
with backend.name_scope(self.name, caller=self):
|
| 544 |
+
variable = backend.Variable(
|
| 545 |
+
initializer=initializer,
|
| 546 |
+
shape=shape,
|
| 547 |
+
dtype=dtype,
|
| 548 |
+
trainable=trainable,
|
| 549 |
+
autocast=autocast,
|
| 550 |
+
aggregation=aggregation,
|
| 551 |
+
name=name,
|
| 552 |
+
)
|
| 553 |
+
# Will be added to layer.losses
|
| 554 |
+
variable.regularizer = regularizers.get(regularizer)
|
| 555 |
+
variable.constraint = constraints.get(constraint)
|
| 556 |
+
self._track_variable(variable)
|
| 557 |
+
return variable
|
| 558 |
+
|
| 559 |
+
@property
|
| 560 |
+
def trainable(self):
|
| 561 |
+
"""Settable boolean, whether this layer should be trainable or not."""
|
| 562 |
+
return self._trainable
|
| 563 |
+
|
| 564 |
+
@trainable.setter
|
| 565 |
+
def trainable(self, value):
|
| 566 |
+
"""Sets trainable attribute for the layer and its sublayers.
|
| 567 |
+
|
| 568 |
+
When this value is changed during training (e.g. with a
|
| 569 |
+
`Callback`) you need to call the parent
|
| 570 |
+
`Model.make_train_function` with `force=True` in order to
|
| 571 |
+
recompile the training graph.
|
| 572 |
+
|
| 573 |
+
Args:
|
| 574 |
+
value: Boolean with the desired state for the layer's trainable
|
| 575 |
+
attribute.
|
| 576 |
+
"""
|
| 577 |
+
value = bool(value)
|
| 578 |
+
self._trainable = value
|
| 579 |
+
for v in self._trainable_variables:
|
| 580 |
+
v.trainable = value
|
| 581 |
+
for layer in self._layers:
|
| 582 |
+
layer.trainable = value
|
| 583 |
+
|
| 584 |
+
@property
|
| 585 |
+
def variables(self):
|
| 586 |
+
"""List of all layer state, including random seeds.
|
| 587 |
+
|
| 588 |
+
This extends `layer.weights` to include all state used by the layer
|
| 589 |
+
including `SeedGenerator`s.
|
| 590 |
+
|
| 591 |
+
Note that metrics variables are not included here, use
|
| 592 |
+
`metrics_variables` to visit all the metric variables.
|
| 593 |
+
"""
|
| 594 |
+
# Return all `Variables` associate with the layer including metrics
|
| 595 |
+
# and random seeds. Also deduplicate them.
|
| 596 |
+
variables = []
|
| 597 |
+
seen_ids = set()
|
| 598 |
+
for v in self._trainable_variables + self._non_trainable_variables:
|
| 599 |
+
if id(v) not in seen_ids:
|
| 600 |
+
variables.append(v)
|
| 601 |
+
seen_ids.add(id(v))
|
| 602 |
+
for sg in self._seed_generators:
|
| 603 |
+
variables.append(sg.state)
|
| 604 |
+
for layer in self._layers:
|
| 605 |
+
for v in layer.variables:
|
| 606 |
+
if id(v) not in seen_ids:
|
| 607 |
+
variables.append(v)
|
| 608 |
+
seen_ids.add(id(v))
|
| 609 |
+
return variables
|
| 610 |
+
|
| 611 |
+
@property
|
| 612 |
+
def trainable_variables(self):
|
| 613 |
+
"""List of all trainable layer state.
|
| 614 |
+
|
| 615 |
+
This is equivalent to `layer.trainable_weights`.
|
| 616 |
+
"""
|
| 617 |
+
if not self.trainable:
|
| 618 |
+
return []
|
| 619 |
+
return [v for v in self.variables if v.trainable]
|
| 620 |
+
|
| 621 |
+
@property
|
| 622 |
+
def non_trainable_variables(self):
|
| 623 |
+
"""List of all non-trainable layer state.
|
| 624 |
+
|
| 625 |
+
This extends `layer.non_trainable_weights` to include all state used by
|
| 626 |
+
the layer including state for metrics and `SeedGenerator`s.
|
| 627 |
+
"""
|
| 628 |
+
if not self.trainable:
|
| 629 |
+
return self.variables
|
| 630 |
+
return [v for v in self.variables if not v.trainable]
|
| 631 |
+
|
| 632 |
+
@property
|
| 633 |
+
def weights(self):
|
| 634 |
+
"""List of all weight variables of the layer.
|
| 635 |
+
|
| 636 |
+
Unlike, `layer.variables` this excludes metric state and random seeds.
|
| 637 |
+
"""
|
| 638 |
+
# Return only `Variables` directly owned by layers and sub-layers.
|
| 639 |
+
# Also deduplicate them.
|
| 640 |
+
weights = []
|
| 641 |
+
seen_ids = set()
|
| 642 |
+
for w in self._trainable_variables + self._non_trainable_variables:
|
| 643 |
+
if id(w) not in seen_ids:
|
| 644 |
+
weights.append(w)
|
| 645 |
+
seen_ids.add(id(w))
|
| 646 |
+
for layer in self._layers:
|
| 647 |
+
for w in layer.weights:
|
| 648 |
+
if id(w) not in seen_ids:
|
| 649 |
+
weights.append(w)
|
| 650 |
+
seen_ids.add(id(w))
|
| 651 |
+
return weights
|
| 652 |
+
|
| 653 |
+
@property
|
| 654 |
+
def trainable_weights(self):
|
| 655 |
+
"""List of all trainable weight variables of the layer.
|
| 656 |
+
|
| 657 |
+
These are the weights that get updated by the optimizer during training.
|
| 658 |
+
"""
|
| 659 |
+
if not self.trainable:
|
| 660 |
+
return []
|
| 661 |
+
return [v for v in self.weights if v.trainable]
|
| 662 |
+
|
| 663 |
+
@property
|
| 664 |
+
def non_trainable_weights(self):
|
| 665 |
+
"""List of all non-trainable weight variables of the layer.
|
| 666 |
+
|
| 667 |
+
These are the weights that should not be updated by the optimizer during
|
| 668 |
+
training. Unlike, `layer.non_trainable_variables` this excludes metric
|
| 669 |
+
state and random seeds.
|
| 670 |
+
"""
|
| 671 |
+
if not self.trainable:
|
| 672 |
+
return self.weights
|
| 673 |
+
return [v for v in self.weights if not v.trainable]
|
| 674 |
+
|
| 675 |
+
@property
|
| 676 |
+
def metrics(self):
|
| 677 |
+
"""List of all metrics."""
|
| 678 |
+
metrics = list(self._metrics)
|
| 679 |
+
for layer in self._layers:
|
| 680 |
+
metrics.extend(layer.metrics)
|
| 681 |
+
return metrics
|
| 682 |
+
|
| 683 |
+
@property
|
| 684 |
+
def metrics_variables(self):
|
| 685 |
+
"""List of all metric variables."""
|
| 686 |
+
vars = []
|
| 687 |
+
for metric in self.metrics:
|
| 688 |
+
vars.extend(metric.variables)
|
| 689 |
+
return vars
|
| 690 |
+
|
| 691 |
+
def get_weights(self):
|
| 692 |
+
"""Return the values of `layer.weights` as a list of NumPy arrays."""
|
| 693 |
+
return [v.numpy() for v in self.weights]
|
| 694 |
+
|
| 695 |
+
def set_weights(self, weights):
|
| 696 |
+
"""Sets the values of `layer.weights` from a list of NumPy arrays."""
|
| 697 |
+
layer_weights = self.weights
|
| 698 |
+
if len(layer_weights) != len(weights):
|
| 699 |
+
raise ValueError(
|
| 700 |
+
f"You called `set_weights(weights)` on layer '{self.name}' "
|
| 701 |
+
f"with a weight list of length {len(weights)}, but the layer "
|
| 702 |
+
f"was expecting {len(layer_weights)} weights."
|
| 703 |
+
)
|
| 704 |
+
for variable, value in zip(layer_weights, weights):
|
| 705 |
+
if variable.shape != value.shape:
|
| 706 |
+
raise ValueError(
|
| 707 |
+
f"Layer {self.name} weight shape {variable.shape} "
|
| 708 |
+
"is not compatible with provided weight "
|
| 709 |
+
f"shape {value.shape}."
|
| 710 |
+
)
|
| 711 |
+
variable.assign(value)
|
| 712 |
+
|
| 713 |
+
@property
|
| 714 |
+
def dtype_policy(self):
|
| 715 |
+
return self._dtype_policy
|
| 716 |
+
|
| 717 |
+
@dtype_policy.setter
|
| 718 |
+
def dtype_policy(self, value):
|
| 719 |
+
policy = dtype_policies.get(value)
|
| 720 |
+
if isinstance(self._dtype_policy, DTypePolicyMap) and self.path:
|
| 721 |
+
if self.path in self._dtype_policy:
|
| 722 |
+
del self._dtype_policy[self.path]
|
| 723 |
+
self._dtype_policy[self.path] = policy
|
| 724 |
+
else:
|
| 725 |
+
self._dtype_policy = policy
|
| 726 |
+
if policy.quantization_mode is not None:
|
| 727 |
+
if self.built and not getattr(self, "_is_quantized", False):
|
| 728 |
+
self.quantize(policy.quantization_mode)
|
| 729 |
+
|
| 730 |
+
@property
|
| 731 |
+
def dtype(self):
|
| 732 |
+
"""Alias of `layer.variable_dtype`."""
|
| 733 |
+
return self.variable_dtype
|
| 734 |
+
|
| 735 |
+
@property
|
| 736 |
+
def compute_dtype(self):
|
| 737 |
+
"""The dtype of the computations performed by the layer."""
|
| 738 |
+
if isinstance(self._dtype_policy, DTypePolicyMap) and self.path:
|
| 739 |
+
policy = self._dtype_policy[self.path]
|
| 740 |
+
else:
|
| 741 |
+
policy = self._dtype_policy
|
| 742 |
+
return policy.compute_dtype
|
| 743 |
+
|
| 744 |
+
@property
|
| 745 |
+
def variable_dtype(self):
|
| 746 |
+
"""The dtype of the state (weights) of the layer."""
|
| 747 |
+
if isinstance(self._dtype_policy, DTypePolicyMap) and self.path:
|
| 748 |
+
policy = self._dtype_policy[self.path]
|
| 749 |
+
else:
|
| 750 |
+
policy = self._dtype_policy
|
| 751 |
+
return policy.variable_dtype
|
| 752 |
+
|
| 753 |
+
@property
|
| 754 |
+
def quantization_mode(self):
|
| 755 |
+
"""The quantization mode of this layer, `None` if not quantized."""
|
| 756 |
+
if isinstance(self._dtype_policy, DTypePolicyMap) and self.path:
|
| 757 |
+
policy = self._dtype_policy[self.path]
|
| 758 |
+
else:
|
| 759 |
+
policy = self._dtype_policy
|
| 760 |
+
return policy.quantization_mode
|
| 761 |
+
|
| 762 |
+
@property
|
| 763 |
+
def input_dtype(self):
|
| 764 |
+
"""The dtype layer inputs should be converted to."""
|
| 765 |
+
return self.compute_dtype
|
| 766 |
+
|
| 767 |
+
@property
|
| 768 |
+
def supports_masking(self):
|
| 769 |
+
"""Whether this layer supports computing a mask using `compute_mask`."""
|
| 770 |
+
return self._supports_masking
|
| 771 |
+
|
| 772 |
+
@supports_masking.setter
|
| 773 |
+
def supports_masking(self, value):
|
| 774 |
+
self._supports_masking = value
|
| 775 |
+
|
| 776 |
+
@utils.default
|
| 777 |
+
def compute_mask(self, inputs, previous_mask):
|
| 778 |
+
return previous_mask
|
| 779 |
+
|
| 780 |
+
@traceback_utils.filter_traceback
|
| 781 |
+
def __call__(self, *args, **kwargs):
|
| 782 |
+
self._check_super_called()
|
| 783 |
+
self._called = True
|
| 784 |
+
|
| 785 |
+
#####################################
|
| 786 |
+
# 1. Convert any array arguments to tensors of correct dtype.
|
| 787 |
+
def maybe_convert(x):
|
| 788 |
+
return self.dtype_policy.convert_input(
|
| 789 |
+
x, self.autocast, self.input_dtype
|
| 790 |
+
)
|
| 791 |
+
|
| 792 |
+
# Used to avoid expensive `tree` operations in the most common case.
|
| 793 |
+
if (
|
| 794 |
+
kwargs
|
| 795 |
+
or len(args) != 1
|
| 796 |
+
or not backend.is_tensor(args[0])
|
| 797 |
+
or backend.standardize_dtype(args[0].dtype) != self.input_dtype
|
| 798 |
+
) and self._convert_input_args:
|
| 799 |
+
args = tree.map_structure(maybe_convert, args)
|
| 800 |
+
kwargs = tree.map_structure(maybe_convert, kwargs)
|
| 801 |
+
|
| 802 |
+
##########################################################
|
| 803 |
+
# 2. Enforce that only tensors can be passed positionally.
|
| 804 |
+
if not self._allow_non_tensor_positional_args:
|
| 805 |
+
for arg in tree.flatten(args):
|
| 806 |
+
if (
|
| 807 |
+
not isinstance(arg, KerasTensor)
|
| 808 |
+
and not backend.is_tensor(arg)
|
| 809 |
+
and arg is not None
|
| 810 |
+
):
|
| 811 |
+
raise ValueError(
|
| 812 |
+
"Only input tensors may be passed as "
|
| 813 |
+
"positional arguments. The following argument value "
|
| 814 |
+
f"should be passed as a keyword argument: {arg} "
|
| 815 |
+
f"(of type {type(arg)})"
|
| 816 |
+
)
|
| 817 |
+
|
| 818 |
+
# Caches info about `call()` signature, args, kwargs.
|
| 819 |
+
call_spec = CallSpec(self._call_signature, args, kwargs)
|
| 820 |
+
|
| 821 |
+
############################################
|
| 822 |
+
# 3. Check input spec for 1st positional arg.
|
| 823 |
+
# TODO: consider extending this to all args and kwargs.
|
| 824 |
+
self._assert_input_compatibility(call_spec.first_arg)
|
| 825 |
+
|
| 826 |
+
################
|
| 827 |
+
# 4. Call build
|
| 828 |
+
with self._open_name_scope():
|
| 829 |
+
self._maybe_build(call_spec)
|
| 830 |
+
|
| 831 |
+
##########################
|
| 832 |
+
# 5. Infer training value
|
| 833 |
+
# Training phase for `Layer.call` is set via (in order of priority):
|
| 834 |
+
# (1) The `training` argument passed to this `Layer.call`, if not None
|
| 835 |
+
# (2) The training argument of an outer `Layer.call`.
|
| 836 |
+
# (4) Any non-None default value for `training` in the call signature
|
| 837 |
+
# (5) False (treating the layer as if it's in inference)
|
| 838 |
+
|
| 839 |
+
# Maintains info about the `Layer.call` stack
|
| 840 |
+
# across nested calls.
|
| 841 |
+
call_context = self._get_call_context()
|
| 842 |
+
|
| 843 |
+
# This is the value explicitly passed by the user
|
| 844 |
+
training = call_spec.user_arguments_dict.get("training", None)
|
| 845 |
+
if training is None:
|
| 846 |
+
# Wasn't passed explicitly: use context value
|
| 847 |
+
training = call_context.training
|
| 848 |
+
if training is None:
|
| 849 |
+
# Get signature default value
|
| 850 |
+
training = call_spec.arguments_dict.get("training", None)
|
| 851 |
+
call_context.training = training
|
| 852 |
+
if self._call_has_training_arg and training is not None:
|
| 853 |
+
# Only populate arg if it has a concrete value
|
| 854 |
+
kwargs["training"] = training
|
| 855 |
+
|
| 856 |
+
##############################
|
| 857 |
+
# 6. Populate mask argument(s)
|
| 858 |
+
if len(call_spec.tensor_arguments_dict) == 1:
|
| 859 |
+
if (
|
| 860 |
+
"mask" in call_spec.argument_names
|
| 861 |
+
and call_spec.arguments_dict["mask"] is None
|
| 862 |
+
):
|
| 863 |
+
arg_name = list(call_spec.tensor_arguments_dict.keys())[0]
|
| 864 |
+
only_tensor_arg = call_spec.tensor_arguments_dict[arg_name]
|
| 865 |
+
mask = tree.map_structure(
|
| 866 |
+
backend.get_keras_mask,
|
| 867 |
+
only_tensor_arg,
|
| 868 |
+
)
|
| 869 |
+
kwargs["mask"] = mask
|
| 870 |
+
elif len(call_spec.tensor_arguments_dict) > 1:
|
| 871 |
+
for k, v in call_spec.tensor_arguments_dict.items():
|
| 872 |
+
expected_mask_arg_name = f"{k}_mask"
|
| 873 |
+
if expected_mask_arg_name in call_spec.argument_names:
|
| 874 |
+
if call_spec.arguments_dict[expected_mask_arg_name] is None:
|
| 875 |
+
mask = tree.map_structure(backend.get_keras_mask, v)
|
| 876 |
+
kwargs[expected_mask_arg_name] = mask
|
| 877 |
+
|
| 878 |
+
# We need to cache the `previous_mask` before `__call__` because the
|
| 879 |
+
# mask might be removed during the call, such as `MultiHeadAttention`.
|
| 880 |
+
previous_mask = tree.map_structure(
|
| 881 |
+
backend.get_keras_mask, call_spec.first_arg
|
| 882 |
+
)
|
| 883 |
+
|
| 884 |
+
####################
|
| 885 |
+
# 7. Call the layer.
|
| 886 |
+
try:
|
| 887 |
+
with self._open_name_scope():
|
| 888 |
+
current_scope = backend.get_autocast_scope()
|
| 889 |
+
new_scope = None
|
| 890 |
+
if current_scope is not None:
|
| 891 |
+
# Clear or update the current scope if necessary.
|
| 892 |
+
if not self.autocast:
|
| 893 |
+
new_scope = backend.AutocastScope(None)
|
| 894 |
+
elif not backend.is_float_dtype(self.compute_dtype):
|
| 895 |
+
# Some preprocessing layers might have a non-float
|
| 896 |
+
# dtype, we should not autocast in this case.
|
| 897 |
+
new_scope = backend.AutocastScope(None)
|
| 898 |
+
elif current_scope.dtype != self.compute_dtype:
|
| 899 |
+
new_scope = backend.AutocastScope(self.compute_dtype)
|
| 900 |
+
elif self.compute_dtype != self.variable_dtype:
|
| 901 |
+
# Enter a new scope if our dtypes are "mixed".
|
| 902 |
+
new_scope = backend.AutocastScope(self.compute_dtype)
|
| 903 |
+
|
| 904 |
+
if new_scope is not None:
|
| 905 |
+
with new_scope:
|
| 906 |
+
outputs = super().__call__(*args, **kwargs)
|
| 907 |
+
else:
|
| 908 |
+
outputs = super().__call__(*args, **kwargs)
|
| 909 |
+
# Change the layout for the layer output if needed.
|
| 910 |
+
# This is useful for relayout intermediate tensor in the model
|
| 911 |
+
# to achieve the optimal performance.
|
| 912 |
+
distribution = distribution_lib.distribution()
|
| 913 |
+
if distribution is not None:
|
| 914 |
+
current_layer_path = current_path()
|
| 915 |
+
current_layer_path += "/output"
|
| 916 |
+
layout = distribution.get_tensor_layout(current_layer_path)
|
| 917 |
+
if layout:
|
| 918 |
+
outputs = distribution_lib.distribute_tensor(
|
| 919 |
+
outputs, layout
|
| 920 |
+
)
|
| 921 |
+
|
| 922 |
+
if not self.built:
|
| 923 |
+
self.built = True
|
| 924 |
+
# Record activity regularizer loss.
|
| 925 |
+
if self.activity_regularizer is not None:
|
| 926 |
+
for output in tree.flatten(outputs):
|
| 927 |
+
if backend.is_tensor(output):
|
| 928 |
+
self.add_loss(self.activity_regularizer(output))
|
| 929 |
+
|
| 930 |
+
# Set `previous_mask` on outputs if available. It is provided only
|
| 931 |
+
# for the first positional input arg and its mask.
|
| 932 |
+
# TODO: consider extending this to all args and kwargs.
|
| 933 |
+
if self.supports_masking:
|
| 934 |
+
self._set_mask_metadata(
|
| 935 |
+
call_spec.first_arg, outputs, previous_mask
|
| 936 |
+
)
|
| 937 |
+
elif any(m is not None for m in tree.flatten(previous_mask)):
|
| 938 |
+
warnings.warn(
|
| 939 |
+
f"Layer '{self.name}' (of type {self.__class__.__name__}) "
|
| 940 |
+
"was passed an input with a mask attached to it. "
|
| 941 |
+
"However, this layer does not support masking and will "
|
| 942 |
+
"therefore destroy the mask information. Downstream "
|
| 943 |
+
"layers will not see the mask."
|
| 944 |
+
)
|
| 945 |
+
finally:
|
| 946 |
+
# Destroy call context if we created it
|
| 947 |
+
self._maybe_reset_call_context()
|
| 948 |
+
return outputs
|
| 949 |
+
|
| 950 |
+
def call(self, *args, **kwargs):
|
| 951 |
+
raise self._not_implemented_error(self.call)
|
| 952 |
+
|
| 953 |
+
@traceback_utils.filter_traceback
|
| 954 |
+
def stateless_call(
|
| 955 |
+
self,
|
| 956 |
+
trainable_variables,
|
| 957 |
+
non_trainable_variables,
|
| 958 |
+
*args,
|
| 959 |
+
return_losses=False,
|
| 960 |
+
**kwargs,
|
| 961 |
+
):
|
| 962 |
+
"""Call the layer without any side effects.
|
| 963 |
+
|
| 964 |
+
Args:
|
| 965 |
+
trainable_variables: List of trainable variables of the model.
|
| 966 |
+
non_trainable_variables: List of non-trainable variables of the
|
| 967 |
+
model.
|
| 968 |
+
*args: Positional arguments to be passed to `call()`.
|
| 969 |
+
return_losses: If `True`, `stateless_call()` will return the list of
|
| 970 |
+
losses created during `call()` as part of its return values.
|
| 971 |
+
**kwargs: Keyword arguments to be passed to `call()`.
|
| 972 |
+
|
| 973 |
+
Returns:
|
| 974 |
+
A tuple. By default, returns `(outputs, non_trainable_variables)`.
|
| 975 |
+
If `return_losses = True`, then returns
|
| 976 |
+
`(outputs, non_trainable_variables, losses)`.
|
| 977 |
+
|
| 978 |
+
Note: `non_trainable_variables` include not only non-trainable weights
|
| 979 |
+
such as `BatchNormalization` statistics, but also RNG seed state
|
| 980 |
+
(if there are any random operations part of the layer, such as dropout),
|
| 981 |
+
and `Metric` state (if there are any metrics attached to the layer).
|
| 982 |
+
These are all elements of state of the layer.
|
| 983 |
+
|
| 984 |
+
Example:
|
| 985 |
+
|
| 986 |
+
```python
|
| 987 |
+
model = ...
|
| 988 |
+
data = ...
|
| 989 |
+
trainable_variables = model.trainable_variables
|
| 990 |
+
non_trainable_variables = model.non_trainable_variables
|
| 991 |
+
# Call the model with zero side effects
|
| 992 |
+
outputs, non_trainable_variables = model.stateless_call(
|
| 993 |
+
trainable_variables,
|
| 994 |
+
non_trainable_variables,
|
| 995 |
+
data,
|
| 996 |
+
)
|
| 997 |
+
# Attach the updated state to the model
|
| 998 |
+
# (until you do this, the model is still in its pre-call state).
|
| 999 |
+
for ref_var, value in zip(
|
| 1000 |
+
model.non_trainable_variables, non_trainable_variables
|
| 1001 |
+
):
|
| 1002 |
+
ref_var.assign(value)
|
| 1003 |
+
```
|
| 1004 |
+
"""
|
| 1005 |
+
self._check_super_called()
|
| 1006 |
+
|
| 1007 |
+
if not self.built:
|
| 1008 |
+
raise ValueError(
|
| 1009 |
+
f"To call stateless_call, {self.__class__.__name__} must be "
|
| 1010 |
+
"built (i.e. its variables must have been already created). "
|
| 1011 |
+
"You can build it by calling it on some data."
|
| 1012 |
+
)
|
| 1013 |
+
if len(trainable_variables) != len(self.trainable_variables):
|
| 1014 |
+
raise ValueError(
|
| 1015 |
+
"Argument `trainable_variables` must be a list of tensors "
|
| 1016 |
+
"corresponding 1:1 to "
|
| 1017 |
+
f"{self.__class__.__name__}().trainable_variables. "
|
| 1018 |
+
f"Received list with length {len(trainable_variables)}, "
|
| 1019 |
+
f"but expected {len(self.trainable_variables)} variables."
|
| 1020 |
+
)
|
| 1021 |
+
if len(non_trainable_variables) != len(self.non_trainable_variables):
|
| 1022 |
+
raise ValueError(
|
| 1023 |
+
"Argument `non_trainable_variables` must be a list of tensors "
|
| 1024 |
+
"corresponding 1:1 to "
|
| 1025 |
+
f"{self.__class__.__name__}().non_trainable_variables. "
|
| 1026 |
+
f"Received list with length {len(non_trainable_variables)}, "
|
| 1027 |
+
f"but expected {len(self.non_trainable_variables)} variables."
|
| 1028 |
+
)
|
| 1029 |
+
|
| 1030 |
+
# Gather variable mapping
|
| 1031 |
+
trainable_mapping = zip(self.trainable_variables, trainable_variables)
|
| 1032 |
+
non_trainable_mapping = zip(
|
| 1033 |
+
self.non_trainable_variables, non_trainable_variables
|
| 1034 |
+
)
|
| 1035 |
+
mapping = list(trainable_mapping) + list(non_trainable_mapping)
|
| 1036 |
+
|
| 1037 |
+
# Call in stateless scope
|
| 1038 |
+
losses = None
|
| 1039 |
+
with backend.StatelessScope(
|
| 1040 |
+
state_mapping=mapping, collect_losses=return_losses
|
| 1041 |
+
) as scope:
|
| 1042 |
+
if self.dtype_policy.quantization_mode is not None:
|
| 1043 |
+
outputs = self.quantized_call(*args, **kwargs)
|
| 1044 |
+
else:
|
| 1045 |
+
outputs = self.call(*args, **kwargs)
|
| 1046 |
+
if return_losses:
|
| 1047 |
+
losses = self.losses
|
| 1048 |
+
|
| 1049 |
+
# Gather updated non-trainable variables
|
| 1050 |
+
non_trainable_variables = []
|
| 1051 |
+
for v in self.non_trainable_variables:
|
| 1052 |
+
new_v = scope.get_current_value(v)
|
| 1053 |
+
non_trainable_variables.append(new_v)
|
| 1054 |
+
|
| 1055 |
+
if return_losses:
|
| 1056 |
+
return outputs, non_trainable_variables, losses
|
| 1057 |
+
return outputs, non_trainable_variables
|
| 1058 |
+
|
| 1059 |
+
def compute_output_spec(self, *args, **kwargs):
|
| 1060 |
+
if utils.is_default(self.compute_output_shape):
|
| 1061 |
+
return super().compute_output_spec(*args, **kwargs)
|
| 1062 |
+
else:
|
| 1063 |
+
# Use compute_output_shape() to return the right output spec
|
| 1064 |
+
call_spec = CallSpec(self._call_signature, args, kwargs)
|
| 1065 |
+
shapes_dict = get_shapes_dict(call_spec)
|
| 1066 |
+
shapes_dict = update_shapes_dict_for_target_fn(
|
| 1067 |
+
self.compute_output_shape,
|
| 1068 |
+
shapes_dict=shapes_dict,
|
| 1069 |
+
call_spec=call_spec,
|
| 1070 |
+
class_name=self.__class__.__name__,
|
| 1071 |
+
)
|
| 1072 |
+
output_shape = self.compute_output_shape(**shapes_dict)
|
| 1073 |
+
|
| 1074 |
+
if (
|
| 1075 |
+
isinstance(output_shape, list)
|
| 1076 |
+
and output_shape
|
| 1077 |
+
and isinstance(output_shape[0], (int, type(None)))
|
| 1078 |
+
):
|
| 1079 |
+
output_shape = tuple(output_shape)
|
| 1080 |
+
if not isinstance(output_shape, (list, tuple, dict)):
|
| 1081 |
+
try:
|
| 1082 |
+
output_shape = tuple(output_shape)
|
| 1083 |
+
except:
|
| 1084 |
+
raise ValueError(
|
| 1085 |
+
"Method `compute_output_shape()` of layer "
|
| 1086 |
+
f"{self.__class__.__name__} is returning "
|
| 1087 |
+
"a type that cannot be interpreted as a shape. "
|
| 1088 |
+
"It should return a shape tuple. "
|
| 1089 |
+
f"Received: {output_shape}"
|
| 1090 |
+
)
|
| 1091 |
+
if (
|
| 1092 |
+
isinstance(output_shape, tuple)
|
| 1093 |
+
and output_shape
|
| 1094 |
+
and isinstance(output_shape[0], (int, type(None)))
|
| 1095 |
+
):
|
| 1096 |
+
return KerasTensor(output_shape, dtype=self.compute_dtype)
|
| 1097 |
+
# Case: nested. Could be a tuple/list of shapes, or a dict of
|
| 1098 |
+
# shapes. Could be deeply nested.
|
| 1099 |
+
return tree.map_shape_structure(
|
| 1100 |
+
lambda s: KerasTensor(s, dtype=self.compute_dtype), output_shape
|
| 1101 |
+
)
|
| 1102 |
+
|
| 1103 |
+
@utils.default
|
| 1104 |
+
def compute_output_shape(self, *args, **kwargs):
|
| 1105 |
+
raise self._not_implemented_error(
|
| 1106 |
+
self.compute_output_shape,
|
| 1107 |
+
"Should implement `def compute_output_shape(self, input_shape)`.",
|
| 1108 |
+
)
|
| 1109 |
+
|
| 1110 |
+
def add_loss(self, loss):
|
| 1111 |
+
"""Can be called inside of the `call()` method to add a scalar loss.
|
| 1112 |
+
|
| 1113 |
+
Example:
|
| 1114 |
+
|
| 1115 |
+
```python
|
| 1116 |
+
class MyLayer(Layer):
|
| 1117 |
+
...
|
| 1118 |
+
def call(self, x):
|
| 1119 |
+
self.add_loss(ops.sum(x))
|
| 1120 |
+
return x
|
| 1121 |
+
```
|
| 1122 |
+
"""
|
| 1123 |
+
# Eager only.
|
| 1124 |
+
losses = tree.flatten(loss)
|
| 1125 |
+
for x in losses:
|
| 1126 |
+
if not backend.is_tensor(x):
|
| 1127 |
+
raise ValueError(
|
| 1128 |
+
"`add_loss()` can only be called from inside `build()` or "
|
| 1129 |
+
f"`call()`, on a tensor input. Received invalid value: {x}"
|
| 1130 |
+
)
|
| 1131 |
+
if backend.in_stateless_scope():
|
| 1132 |
+
scope = backend.get_stateless_scope()
|
| 1133 |
+
if scope.collect_losses:
|
| 1134 |
+
for x in losses:
|
| 1135 |
+
scope.add_loss(loss)
|
| 1136 |
+
self._loss_ids.add(id(loss))
|
| 1137 |
+
else:
|
| 1138 |
+
self._losses.extend(losses)
|
| 1139 |
+
|
| 1140 |
+
def _get_own_losses(self):
|
| 1141 |
+
if backend.in_stateless_scope():
|
| 1142 |
+
losses = []
|
| 1143 |
+
scope = backend.get_stateless_scope()
|
| 1144 |
+
for loss in scope.losses:
|
| 1145 |
+
if id(loss) in self._loss_ids:
|
| 1146 |
+
losses.append(loss)
|
| 1147 |
+
return losses
|
| 1148 |
+
else:
|
| 1149 |
+
return self._losses[:]
|
| 1150 |
+
|
| 1151 |
+
def _get_regularization_losses(self):
|
| 1152 |
+
weight_regularization_losses = []
|
| 1153 |
+
for variable in self.trainable_weights:
|
| 1154 |
+
if variable.regularizer is None:
|
| 1155 |
+
continue
|
| 1156 |
+
if backend.in_stateless_scope() and not in_symbolic_scope():
|
| 1157 |
+
# If in symbolic scope, we might get `None` from
|
| 1158 |
+
# `get_current_value` in `backend.compute_output_spec`. So we
|
| 1159 |
+
# assign `variable` instead.
|
| 1160 |
+
v = backend.get_stateless_scope().get_current_value(variable)
|
| 1161 |
+
else:
|
| 1162 |
+
v = variable
|
| 1163 |
+
weight_regularization_losses.append(variable.regularizer(v))
|
| 1164 |
+
return weight_regularization_losses
|
| 1165 |
+
|
| 1166 |
+
@property
|
| 1167 |
+
def losses(self):
|
| 1168 |
+
"""List of scalar losses from `add_loss`, regularizers and sublayers."""
|
| 1169 |
+
if self._losses_override:
|
| 1170 |
+
return self._losses_override
|
| 1171 |
+
losses = self._get_own_losses()
|
| 1172 |
+
for layer in self._flatten_layers(include_self=False):
|
| 1173 |
+
losses.extend(layer._get_own_losses())
|
| 1174 |
+
weight_regularization_losses = self._get_regularization_losses()
|
| 1175 |
+
losses.extend(weight_regularization_losses)
|
| 1176 |
+
return losses
|
| 1177 |
+
|
| 1178 |
+
def _clear_losses(self):
|
| 1179 |
+
if backend.in_stateless_scope():
|
| 1180 |
+
scope = backend.get_stateless_scope()
|
| 1181 |
+
if scope.collect_losses:
|
| 1182 |
+
for x in scope.losses:
|
| 1183 |
+
if id(x) in self._loss_ids:
|
| 1184 |
+
scope.losses.remove(x)
|
| 1185 |
+
self._losses.clear()
|
| 1186 |
+
self._loss_ids.clear()
|
| 1187 |
+
for layer in self._layers:
|
| 1188 |
+
layer._clear_losses()
|
| 1189 |
+
|
| 1190 |
+
# Quantization-related (int8 and float8) methods
|
| 1191 |
+
|
| 1192 |
+
def quantized_build(self, input_shape, mode):
|
| 1193 |
+
raise self._not_implemented_error(self.quantized_build)
|
| 1194 |
+
|
| 1195 |
+
def quantize(self, mode, type_check=True):
|
| 1196 |
+
raise self._not_implemented_error(self.quantize)
|
| 1197 |
+
|
| 1198 |
+
def _check_quantize_args(self, mode, compute_dtype):
|
| 1199 |
+
if not self.built:
|
| 1200 |
+
raise ValueError(
|
| 1201 |
+
"Cannot quantize a layer that isn't yet built. "
|
| 1202 |
+
f"Layer '{self.name}' (of type '{self.__class__.__name__}') "
|
| 1203 |
+
"is not built yet."
|
| 1204 |
+
)
|
| 1205 |
+
if getattr(self, "_is_quantized", False):
|
| 1206 |
+
raise ValueError(
|
| 1207 |
+
f"Layer '{self.name}' is already quantized with "
|
| 1208 |
+
f"dtype_policy='{self.dtype_policy.name}'. "
|
| 1209 |
+
f"Received: mode={mode}"
|
| 1210 |
+
)
|
| 1211 |
+
if mode not in dtype_policies.QUANTIZATION_MODES:
|
| 1212 |
+
raise ValueError(
|
| 1213 |
+
"Invalid quantization mode. "
|
| 1214 |
+
f"Expected one of {dtype_policies.QUANTIZATION_MODES}. "
|
| 1215 |
+
f"Received: mode={mode}"
|
| 1216 |
+
)
|
| 1217 |
+
if mode == "int8" and compute_dtype == "float16":
|
| 1218 |
+
raise ValueError(
|
| 1219 |
+
f"Quantization mode='{mode}' doesn't work well with "
|
| 1220 |
+
"compute_dtype='float16'. Consider loading model/layer with "
|
| 1221 |
+
"another dtype policy such as 'mixed_bfloat16' or "
|
| 1222 |
+
"'mixed_float16' before calling `quantize()`."
|
| 1223 |
+
)
|
| 1224 |
+
|
| 1225 |
+
def quantized_call(self, *args, **kwargs):
|
| 1226 |
+
if self.quantization_mode == "int8":
|
| 1227 |
+
return self._int8_call(*args, **kwargs)
|
| 1228 |
+
elif self.quantization_mode == "float8":
|
| 1229 |
+
return self._float8_call(*args, **kwargs)
|
| 1230 |
+
else:
|
| 1231 |
+
raise self._quantization_mode_error(self.quantization_mode)
|
| 1232 |
+
|
| 1233 |
+
def _int8_call(self, *args, **kwargs):
|
| 1234 |
+
raise self._not_implemented_error(self._int8_call)
|
| 1235 |
+
|
| 1236 |
+
def _float8_call(self, *args, **kwargs):
|
| 1237 |
+
raise self._not_implemented_error(self._float8_call)
|
| 1238 |
+
|
| 1239 |
+
def _not_implemented_error(self, attr, msg=None):
|
| 1240 |
+
if callable(attr):
|
| 1241 |
+
attr_name = attr.__name__
|
| 1242 |
+
attr_type = "method"
|
| 1243 |
+
else:
|
| 1244 |
+
attr_name = str(attr)
|
| 1245 |
+
attr_type = "attribute"
|
| 1246 |
+
msg = " " + msg if msg is not None else ""
|
| 1247 |
+
return NotImplementedError(
|
| 1248 |
+
f"Layer {self.__class__.__name__} does not have a `{attr_name}` "
|
| 1249 |
+
f"{attr_type} implemented.{msg}"
|
| 1250 |
+
)
|
| 1251 |
+
|
| 1252 |
+
def _quantization_mode_error(self, mode):
|
| 1253 |
+
return NotImplementedError(
|
| 1254 |
+
"Invalid quantization mode. Expected one of "
|
| 1255 |
+
f"{dtype_policies.QUANTIZATION_MODES}. "
|
| 1256 |
+
f"Received: quantization_mode={mode}"
|
| 1257 |
+
)
|
| 1258 |
+
|
| 1259 |
+
def save_own_variables(self, store):
|
| 1260 |
+
"""Saves the state of the layer.
|
| 1261 |
+
|
| 1262 |
+
You can override this method to take full control of how the state of
|
| 1263 |
+
the layer is saved upon calling `model.save()`.
|
| 1264 |
+
|
| 1265 |
+
Args:
|
| 1266 |
+
store: Dict where the state of the model will be saved.
|
| 1267 |
+
"""
|
| 1268 |
+
all_vars = self._trainable_variables + self._non_trainable_variables
|
| 1269 |
+
for i, v in enumerate(all_vars):
|
| 1270 |
+
store[f"{i}"] = v
|
| 1271 |
+
|
| 1272 |
+
def load_own_variables(self, store):
|
| 1273 |
+
"""Loads the state of the layer.
|
| 1274 |
+
|
| 1275 |
+
You can override this method to take full control of how the state of
|
| 1276 |
+
the layer is loaded upon calling `keras.models.load_model()`.
|
| 1277 |
+
|
| 1278 |
+
Args:
|
| 1279 |
+
store: Dict from which the state of the model will be loaded.
|
| 1280 |
+
"""
|
| 1281 |
+
all_vars = self._trainable_variables + self._non_trainable_variables
|
| 1282 |
+
if len(store.keys()) != len(all_vars):
|
| 1283 |
+
if len(all_vars) == 0 and not self.built:
|
| 1284 |
+
raise ValueError(
|
| 1285 |
+
f"Layer '{self.name}' was never built "
|
| 1286 |
+
"and thus it doesn't have any variables. "
|
| 1287 |
+
f"However the weights file lists {len(store.keys())} "
|
| 1288 |
+
"variables for this layer.\n"
|
| 1289 |
+
"In most cases, this error indicates that either:\n\n"
|
| 1290 |
+
"1. The layer is owned by a parent layer that "
|
| 1291 |
+
"implements a `build()` method, but calling the "
|
| 1292 |
+
"parent's `build()` method did NOT create the state of "
|
| 1293 |
+
f"the child layer '{self.name}'. A `build()` method "
|
| 1294 |
+
"must create ALL state for the layer, including "
|
| 1295 |
+
"the state of any children layers.\n\n"
|
| 1296 |
+
"2. You need to implement "
|
| 1297 |
+
"the `def build_from_config(self, config)` method "
|
| 1298 |
+
f"on layer '{self.name}', to specify how to rebuild "
|
| 1299 |
+
"it during loading. "
|
| 1300 |
+
"In this case, you might also want to implement the "
|
| 1301 |
+
"method that generates the build config at saving time, "
|
| 1302 |
+
"`def get_build_config(self)`. "
|
| 1303 |
+
"The method `build_from_config()` is meant "
|
| 1304 |
+
"to create the state "
|
| 1305 |
+
"of the layer (i.e. its variables) upon deserialization.",
|
| 1306 |
+
)
|
| 1307 |
+
raise ValueError(
|
| 1308 |
+
f"Layer '{self.name}' expected {len(all_vars)} variables, "
|
| 1309 |
+
"but received "
|
| 1310 |
+
f"{len(store.keys())} variables during loading. "
|
| 1311 |
+
f"Expected: {[v.name for v in all_vars]}"
|
| 1312 |
+
)
|
| 1313 |
+
for i, v in enumerate(all_vars):
|
| 1314 |
+
v.assign(store[f"{i}"])
|
| 1315 |
+
|
| 1316 |
+
def _track_variable(self, variable):
|
| 1317 |
+
if variable.trainable:
|
| 1318 |
+
self._tracker.add_to_store("trainable_variables", variable)
|
| 1319 |
+
else:
|
| 1320 |
+
self._tracker.add_to_store("non_trainable_variables", variable)
|
| 1321 |
+
if not self.trainable:
|
| 1322 |
+
variable.trainable = False
|
| 1323 |
+
self._post_track_variable(variable)
|
| 1324 |
+
|
| 1325 |
+
def _untrack_variable(self, variable):
|
| 1326 |
+
previous_lock_state = self._tracker.locked
|
| 1327 |
+
self._tracker.unlock()
|
| 1328 |
+
self._tracker.untrack(variable)
|
| 1329 |
+
if previous_lock_state is True:
|
| 1330 |
+
self._tracker.lock()
|
| 1331 |
+
self._post_untrack_variable(variable)
|
| 1332 |
+
|
| 1333 |
+
def add_metric(self, *args, **kwargs):
|
| 1334 |
+
# Permanently disabled
|
| 1335 |
+
raise NotImplementedError(
|
| 1336 |
+
"Layer `add_metric()` method is deprecated"
|
| 1337 |
+
" add your metric in `Model.compile(metrics=[...]).`"
|
| 1338 |
+
)
|
| 1339 |
+
|
| 1340 |
+
def count_params(self):
|
| 1341 |
+
"""Count the total number of scalars composing the weights.
|
| 1342 |
+
|
| 1343 |
+
Returns:
|
| 1344 |
+
An integer count.
|
| 1345 |
+
"""
|
| 1346 |
+
if not self.built:
|
| 1347 |
+
raise ValueError(
|
| 1348 |
+
"You tried to call `count_params` "
|
| 1349 |
+
f"on layer '{self.name}', "
|
| 1350 |
+
"but the layer isn't built. "
|
| 1351 |
+
"You can build it manually via: "
|
| 1352 |
+
f"`layer.build(input_shape)`."
|
| 1353 |
+
)
|
| 1354 |
+
return summary_utils.count_params(self.weights)
|
| 1355 |
+
|
| 1356 |
+
def _maybe_build(self, call_spec):
|
| 1357 |
+
if self.built:
|
| 1358 |
+
return
|
| 1359 |
+
|
| 1360 |
+
shapes_dict = get_shapes_dict(call_spec)
|
| 1361 |
+
first_shape = next(iter(shapes_dict.values()), None)
|
| 1362 |
+
|
| 1363 |
+
# If the layer has a build method, call it with our input shapes.
|
| 1364 |
+
if not utils.is_default(self.build):
|
| 1365 |
+
shapes_dict = update_shapes_dict_for_target_fn(
|
| 1366 |
+
self.build,
|
| 1367 |
+
shapes_dict=shapes_dict,
|
| 1368 |
+
call_spec=call_spec,
|
| 1369 |
+
class_name=self.__class__.__name__,
|
| 1370 |
+
)
|
| 1371 |
+
self.build(**shapes_dict)
|
| 1372 |
+
# Check input spec again (after build, since self.input_spec
|
| 1373 |
+
# may have been updated
|
| 1374 |
+
self._assert_input_compatibility(call_spec.first_arg)
|
| 1375 |
+
return
|
| 1376 |
+
|
| 1377 |
+
# Otherwise, attempt to build the layer by calling it on symbolic input.
|
| 1378 |
+
if might_have_unbuilt_state(self):
|
| 1379 |
+
try:
|
| 1380 |
+
backend.compute_output_spec(
|
| 1381 |
+
self.call, **call_spec.arguments_dict
|
| 1382 |
+
)
|
| 1383 |
+
except Exception as e:
|
| 1384 |
+
if call_spec.eager:
|
| 1385 |
+
# Will let the actual eager call do state-building
|
| 1386 |
+
return
|
| 1387 |
+
warnings.warn(
|
| 1388 |
+
f"Layer '{self.name}' looks like it has unbuilt state, but "
|
| 1389 |
+
"Keras is not able to trace the layer `call()` in order to "
|
| 1390 |
+
"build it automatically. Possible causes:\n"
|
| 1391 |
+
"1. The `call()` method of your layer may be crashing. Try "
|
| 1392 |
+
"to `__call__()` the layer eagerly on some test input "
|
| 1393 |
+
"first to see if it works. "
|
| 1394 |
+
"E.g. `x = np.random.random((3, 4)); y = layer(x)`\n"
|
| 1395 |
+
"2. If the `call()` method is correct, then you may need "
|
| 1396 |
+
"to implement the `def build(self, input_shape)` method on "
|
| 1397 |
+
"your layer. It should create all variables used by the "
|
| 1398 |
+
"layer (e.g. by calling `layer.build()` on all its "
|
| 1399 |
+
"children layers).\n"
|
| 1400 |
+
f"Exception encountered: ''{e}''"
|
| 1401 |
+
)
|
| 1402 |
+
self.build(first_shape)
|
| 1403 |
+
|
| 1404 |
+
def _build_by_run_for_single_pos_arg(self, input_shape):
|
| 1405 |
+
# Case: all inputs are in the first arg (possibly nested).
|
| 1406 |
+
input_tensors = tree.map_shape_structure(
|
| 1407 |
+
lambda s: backend.KerasTensor(s), input_shape
|
| 1408 |
+
)
|
| 1409 |
+
try:
|
| 1410 |
+
backend.compute_output_spec(self.call, input_tensors)
|
| 1411 |
+
return True
|
| 1412 |
+
except:
|
| 1413 |
+
return False
|
| 1414 |
+
|
| 1415 |
+
def _build_by_run_for_kwargs(self, shapes_dict):
|
| 1416 |
+
# Case: inputs were recorded as multiple keyword arguments.
|
| 1417 |
+
if all(is_shape_tuple(s) for s in shapes_dict.values()):
|
| 1418 |
+
# Case: all input keyword arguments were plain tensors.
|
| 1419 |
+
input_tensors = {
|
| 1420 |
+
# We strip the `_shape` suffix to recover kwarg names.
|
| 1421 |
+
utils.removesuffix(k, "_shape"): backend.KerasTensor(shape)
|
| 1422 |
+
for k, shape in shapes_dict.items()
|
| 1423 |
+
}
|
| 1424 |
+
try:
|
| 1425 |
+
backend.compute_output_spec(self.call, **input_tensors)
|
| 1426 |
+
return True
|
| 1427 |
+
except:
|
| 1428 |
+
return False
|
| 1429 |
+
else:
|
| 1430 |
+
# Not supported: nested input keyword arguments.
|
| 1431 |
+
return False
|
| 1432 |
+
|
| 1433 |
+
def __repr__(self):
|
| 1434 |
+
return (
|
| 1435 |
+
f"<{self.__class__.__name__} "
|
| 1436 |
+
f"name={self.name}, built={self.built}>"
|
| 1437 |
+
)
|
| 1438 |
+
|
| 1439 |
+
def __str__(self):
|
| 1440 |
+
return self.__repr__()
|
| 1441 |
+
|
| 1442 |
+
def __setattr__(self, name, value):
|
| 1443 |
+
# Track Variables, Layers, Metrics, SeedGenerators.
|
| 1444 |
+
name, value = self._setattr_hook(name, value)
|
| 1445 |
+
if name != "_tracker":
|
| 1446 |
+
if not hasattr(self, "_tracker"):
|
| 1447 |
+
self._initialize_tracker()
|
| 1448 |
+
value = self._tracker.track(value)
|
| 1449 |
+
return super().__setattr__(name, value)
|
| 1450 |
+
|
| 1451 |
+
def __delattr__(self, name):
|
| 1452 |
+
obj = getattr(self, name)
|
| 1453 |
+
if isinstance(obj, backend.Variable):
|
| 1454 |
+
import gc
|
| 1455 |
+
|
| 1456 |
+
# It will take a short amount of time for the corresponding buffer
|
| 1457 |
+
# to be actually removed from the device.
|
| 1458 |
+
# https://stackoverflow.com/a/74631949
|
| 1459 |
+
self._untrack_variable(obj)
|
| 1460 |
+
super().__delattr__(name)
|
| 1461 |
+
gc.collect()
|
| 1462 |
+
else:
|
| 1463 |
+
super().__delattr__(name)
|
| 1464 |
+
|
| 1465 |
+
def _check_super_called(self):
|
| 1466 |
+
if getattr(self, "_lock", True):
|
| 1467 |
+
raise RuntimeError(
|
| 1468 |
+
f"In layer '{self.__class__.__name__}', you forgot to call "
|
| 1469 |
+
"`super().__init__()` as the first statement "
|
| 1470 |
+
"in the `__init__()` method. Go add it!"
|
| 1471 |
+
)
|
| 1472 |
+
|
| 1473 |
+
def _assert_input_compatibility(self, arg_0):
|
| 1474 |
+
if self.input_spec:
|
| 1475 |
+
try:
|
| 1476 |
+
input_spec.assert_input_compatibility(
|
| 1477 |
+
self.input_spec, arg_0, layer_name=self.name
|
| 1478 |
+
)
|
| 1479 |
+
except SystemError:
|
| 1480 |
+
if backend.backend() == "torch":
|
| 1481 |
+
# TODO: The torch backend failed the ONNX CI with the error:
|
| 1482 |
+
# SystemError: <method '__int__' of 'torch._C.TensorBase'
|
| 1483 |
+
# objects> returned a result with an exception set
|
| 1484 |
+
# As a workaround, we are skipping this for now.
|
| 1485 |
+
pass
|
| 1486 |
+
else:
|
| 1487 |
+
raise
|
| 1488 |
+
|
| 1489 |
+
def _get_call_context(self):
|
| 1490 |
+
"""Returns currently active `CallContext`."""
|
| 1491 |
+
layer_call_ctx = global_state.get_global_attribute("current_call_ctx")
|
| 1492 |
+
if layer_call_ctx is None:
|
| 1493 |
+
# Enter new call context.
|
| 1494 |
+
layer_call_ctx = CallContext(entry_layer=self)
|
| 1495 |
+
global_state.set_global_attribute(
|
| 1496 |
+
"current_call_ctx", layer_call_ctx
|
| 1497 |
+
)
|
| 1498 |
+
self._clear_losses()
|
| 1499 |
+
return layer_call_ctx
|
| 1500 |
+
|
| 1501 |
+
def _maybe_reset_call_context(self):
|
| 1502 |
+
layer_call_ctx = global_state.get_global_attribute("current_call_ctx")
|
| 1503 |
+
if layer_call_ctx is None or layer_call_ctx.entry_layer == self:
|
| 1504 |
+
global_state.set_global_attribute("current_call_ctx", None)
|
| 1505 |
+
|
| 1506 |
+
def _flatten_layers(self, include_self=True, recursive=True):
|
| 1507 |
+
layers = []
|
| 1508 |
+
if include_self:
|
| 1509 |
+
layers.append(self)
|
| 1510 |
+
seen_object_ids = set()
|
| 1511 |
+
deque = collections.deque(self._layers)
|
| 1512 |
+
while deque:
|
| 1513 |
+
layer = deque.popleft()
|
| 1514 |
+
if id(layer) in seen_object_ids:
|
| 1515 |
+
continue
|
| 1516 |
+
seen_object_ids.add(id(layer))
|
| 1517 |
+
layers.append(layer)
|
| 1518 |
+
# Introspect recursively through sublayers.
|
| 1519 |
+
if recursive:
|
| 1520 |
+
deque.extendleft(layer._layers)
|
| 1521 |
+
return layers
|
| 1522 |
+
|
| 1523 |
+
def _set_mask_metadata(self, inputs, outputs, previous_mask):
|
| 1524 |
+
flat_outputs = tree.flatten(outputs)
|
| 1525 |
+
|
| 1526 |
+
mask_already_computed = all(
|
| 1527 |
+
backend.get_keras_mask(x) is not None for x in flat_outputs
|
| 1528 |
+
)
|
| 1529 |
+
if mask_already_computed:
|
| 1530 |
+
return
|
| 1531 |
+
|
| 1532 |
+
output_masks = self.compute_mask(inputs, previous_mask)
|
| 1533 |
+
if output_masks is None:
|
| 1534 |
+
return
|
| 1535 |
+
|
| 1536 |
+
flat_masks = tree.flatten(output_masks)
|
| 1537 |
+
for tensor, mask in zip(flat_outputs, flat_masks):
|
| 1538 |
+
if backend.get_keras_mask(tensor) is None and mask is not None:
|
| 1539 |
+
if backend.backend() == "numpy":
|
| 1540 |
+
warnings.warn(
|
| 1541 |
+
"The NumPy backend does not support masking at this"
|
| 1542 |
+
"time. Masks will be ignored."
|
| 1543 |
+
)
|
| 1544 |
+
else:
|
| 1545 |
+
backend.set_keras_mask(tensor, mask)
|
| 1546 |
+
|
| 1547 |
+
@python_utils.default
|
| 1548 |
+
def get_config(self):
|
| 1549 |
+
self._check_super_called()
|
| 1550 |
+
base_config = super().get_config()
|
| 1551 |
+
config = {
|
| 1552 |
+
"trainable": self.trainable,
|
| 1553 |
+
"dtype": dtype_policies.serialize(self.dtype_policy),
|
| 1554 |
+
}
|
| 1555 |
+
if self.activity_regularizer is not None:
|
| 1556 |
+
config["activity_regularizer"] = regularizers.serialize(
|
| 1557 |
+
self.activity_regularizer
|
| 1558 |
+
)
|
| 1559 |
+
return {**base_config, **config}
|
| 1560 |
+
|
| 1561 |
+
def _open_name_scope(self):
|
| 1562 |
+
if self._parent_path is None:
|
| 1563 |
+
self._parent_path = current_path()
|
| 1564 |
+
return backend.name_scope(self.name, caller=self)
|
| 1565 |
+
|
| 1566 |
+
|
| 1567 |
+
def is_backend_tensor_or_symbolic(x, allow_none=False):
|
| 1568 |
+
if allow_none and x is None:
|
| 1569 |
+
return True
|
| 1570 |
+
return backend.is_tensor(x) or isinstance(x, backend.KerasTensor)
|
| 1571 |
+
|
| 1572 |
+
|
| 1573 |
+
class CallSpec:
|
| 1574 |
+
def __init__(self, signature, args, kwargs):
|
| 1575 |
+
# `training` and `mask` are special kwargs that are always available in
|
| 1576 |
+
# a layer, if user specifies them in their call without adding to spec,
|
| 1577 |
+
# we remove them to be able to bind variables. User is not using
|
| 1578 |
+
# `training` anyway so we can ignore.
|
| 1579 |
+
# TODO: If necessary use workaround for `mask`
|
| 1580 |
+
if "training" in kwargs and "training" not in signature.parameters:
|
| 1581 |
+
kwargs.pop("training")
|
| 1582 |
+
bound_args = signature.bind(*args, **kwargs)
|
| 1583 |
+
else:
|
| 1584 |
+
bound_args = signature.bind(*args, **kwargs)
|
| 1585 |
+
self.user_arguments_dict = {
|
| 1586 |
+
k: v for k, v in bound_args.arguments.items()
|
| 1587 |
+
}
|
| 1588 |
+
bound_args.apply_defaults()
|
| 1589 |
+
arg_dict = {}
|
| 1590 |
+
arg_names = []
|
| 1591 |
+
tensor_arg_dict = {}
|
| 1592 |
+
tensor_args = []
|
| 1593 |
+
tensor_arg_names = []
|
| 1594 |
+
nested_tensor_arg_names = []
|
| 1595 |
+
for name, value in bound_args.arguments.items():
|
| 1596 |
+
arg_dict[name] = value
|
| 1597 |
+
arg_names.append(name)
|
| 1598 |
+
if is_backend_tensor_or_symbolic(value):
|
| 1599 |
+
tensor_args.append(value)
|
| 1600 |
+
tensor_arg_names.append(name)
|
| 1601 |
+
tensor_arg_dict[name] = value
|
| 1602 |
+
elif tree.is_nested(value) and len(value) > 0:
|
| 1603 |
+
flat_values = tree.flatten(value)
|
| 1604 |
+
if all(
|
| 1605 |
+
is_backend_tensor_or_symbolic(x, allow_none=True)
|
| 1606 |
+
for x in flat_values
|
| 1607 |
+
):
|
| 1608 |
+
tensor_args.append(value)
|
| 1609 |
+
tensor_arg_names.append(name)
|
| 1610 |
+
tensor_arg_dict[name] = value
|
| 1611 |
+
nested_tensor_arg_names.append(name)
|
| 1612 |
+
elif any(is_backend_tensor_or_symbolic(x) for x in flat_values):
|
| 1613 |
+
raise ValueError(
|
| 1614 |
+
"In a nested call() argument, "
|
| 1615 |
+
"you cannot mix tensors and non-tensors. "
|
| 1616 |
+
"Received invalid mixed argument: "
|
| 1617 |
+
f"{name}={value}"
|
| 1618 |
+
)
|
| 1619 |
+
self.arguments_dict = arg_dict
|
| 1620 |
+
self.argument_names = arg_names
|
| 1621 |
+
self.tensor_arguments_dict = tensor_arg_dict
|
| 1622 |
+
self.tensor_arguments_names = tensor_arg_names
|
| 1623 |
+
self.nested_tensor_argument_names = nested_tensor_arg_names
|
| 1624 |
+
self.first_arg = arg_dict[arg_names[0]]
|
| 1625 |
+
if all(
|
| 1626 |
+
backend.is_tensor(x) for x in self.tensor_arguments_dict.values()
|
| 1627 |
+
):
|
| 1628 |
+
self.eager = True
|
| 1629 |
+
else:
|
| 1630 |
+
self.eager = False
|
| 1631 |
+
|
| 1632 |
+
|
| 1633 |
+
def get_arguments_dict(fn, args, kwargs):
|
| 1634 |
+
"""Return a dict mapping argument names to their values."""
|
| 1635 |
+
sig = inspect.signature(fn)
|
| 1636 |
+
bound_args = sig.bind(*args, **kwargs)
|
| 1637 |
+
arg_dict = {}
|
| 1638 |
+
for name, value in bound_args.arguments.items():
|
| 1639 |
+
arg_dict[name] = value
|
| 1640 |
+
return arg_dict
|
| 1641 |
+
|
| 1642 |
+
|
| 1643 |
+
def get_shapes_dict(call_spec):
|
| 1644 |
+
"""Convert the call() arguments dict into a dict of input shape arguments.
|
| 1645 |
+
|
| 1646 |
+
Example:
|
| 1647 |
+
|
| 1648 |
+
```
|
| 1649 |
+
>>> get_shapes_dict(call_spec)
|
| 1650 |
+
{"input_a_shape": (2, 3)}
|
| 1651 |
+
```
|
| 1652 |
+
"""
|
| 1653 |
+
shapes_dict = {}
|
| 1654 |
+
for k, v in call_spec.tensor_arguments_dict.items():
|
| 1655 |
+
if k == "mask" or k.endswith("_mask"):
|
| 1656 |
+
# Do not include mask tensors in shapes dict
|
| 1657 |
+
continue
|
| 1658 |
+
if k == "kwargs" or k == "args":
|
| 1659 |
+
# Do not include catch-alls in shapes dict
|
| 1660 |
+
continue
|
| 1661 |
+
if k in call_spec.nested_tensor_argument_names:
|
| 1662 |
+
shapes_dict[f"{k}_shape"] = tree.map_structure(
|
| 1663 |
+
lambda x: backend.standardize_shape(x.shape), v
|
| 1664 |
+
)
|
| 1665 |
+
else:
|
| 1666 |
+
shapes_dict[f"{k}_shape"] = backend.standardize_shape(v.shape)
|
| 1667 |
+
return shapes_dict
|
| 1668 |
+
|
| 1669 |
+
|
| 1670 |
+
def update_shapes_dict_for_target_fn(
|
| 1671 |
+
target_fn,
|
| 1672 |
+
shapes_dict,
|
| 1673 |
+
call_spec,
|
| 1674 |
+
class_name,
|
| 1675 |
+
):
|
| 1676 |
+
"""Updates a `shapes_dict` for `build()` or `compute_output_shape()`.
|
| 1677 |
+
|
| 1678 |
+
This function will align a dictionary of the shapes of all tensor
|
| 1679 |
+
passed to `call`, with the signatures of `build()` or
|
| 1680 |
+
`compute_output_shape()`.
|
| 1681 |
+
|
| 1682 |
+
The alignment is a follows:
|
| 1683 |
+
|
| 1684 |
+
- If `build()` or `compute_output_shape()` accept only one argument,
|
| 1685 |
+
forward the shape of the first positional argument from call without
|
| 1686 |
+
checking any argument names.
|
| 1687 |
+
- If `build()` or `compute_output_shape()` accept multiple arguments,
|
| 1688 |
+
enforce that all argument names match a call argument name, e.g.
|
| 1689 |
+
`foo_shape` would match call argument `foo`.
|
| 1690 |
+
|
| 1691 |
+
Returns:
|
| 1692 |
+
An updated `shapes_dict` that can be used to invoke
|
| 1693 |
+
`target_fn(**shapes_dict)`.
|
| 1694 |
+
"""
|
| 1695 |
+
if utils.is_default(target_fn):
|
| 1696 |
+
return None
|
| 1697 |
+
sig = inspect.signature(target_fn)
|
| 1698 |
+
expected_names = []
|
| 1699 |
+
for name, param in sig.parameters.items():
|
| 1700 |
+
if param.kind in (
|
| 1701 |
+
param.POSITIONAL_OR_KEYWORD,
|
| 1702 |
+
param.POSITIONAL_ONLY,
|
| 1703 |
+
param.KEYWORD_ONLY,
|
| 1704 |
+
):
|
| 1705 |
+
expected_names.append(name)
|
| 1706 |
+
|
| 1707 |
+
# Single arg: don't check names, pass first shape.
|
| 1708 |
+
if len(expected_names) == 1:
|
| 1709 |
+
key = expected_names[0]
|
| 1710 |
+
values = tuple(shapes_dict.values())
|
| 1711 |
+
if values:
|
| 1712 |
+
input_shape = values[0]
|
| 1713 |
+
else:
|
| 1714 |
+
input_shape = None
|
| 1715 |
+
return {key: input_shape}
|
| 1716 |
+
|
| 1717 |
+
# Multiple args: check that all names line up.
|
| 1718 |
+
kwargs = {}
|
| 1719 |
+
for name in expected_names:
|
| 1720 |
+
method_name = target_fn.__name__
|
| 1721 |
+
error_preamble = (
|
| 1722 |
+
f"For a `{method_name}()` method with more than one argument, all "
|
| 1723 |
+
"arguments should have a `_shape` suffix and match an argument "
|
| 1724 |
+
f"from `call()`. E.g. `{method_name}(self, foo_shape, bar_shape)` "
|
| 1725 |
+
)
|
| 1726 |
+
if not name.endswith("_shape"):
|
| 1727 |
+
raise ValueError(
|
| 1728 |
+
f"{error_preamble} For layer '{class_name}', "
|
| 1729 |
+
f"Received `{method_name}()` argument "
|
| 1730 |
+
f"`{name}`, which does not end in `_shape`."
|
| 1731 |
+
)
|
| 1732 |
+
expected_call_arg = utils.removesuffix(name, "_shape")
|
| 1733 |
+
if expected_call_arg not in call_spec.arguments_dict:
|
| 1734 |
+
raise ValueError(
|
| 1735 |
+
f"{error_preamble} For layer '{class_name}', "
|
| 1736 |
+
f"received `{method_name}()` argument "
|
| 1737 |
+
f"`{name}`, but `call()` does not have argument "
|
| 1738 |
+
f"`{expected_call_arg}`."
|
| 1739 |
+
)
|
| 1740 |
+
if name in shapes_dict:
|
| 1741 |
+
kwargs[name] = shapes_dict[name]
|
| 1742 |
+
|
| 1743 |
+
return kwargs
|
| 1744 |
+
|
| 1745 |
+
|
| 1746 |
+
class CallContext:
|
| 1747 |
+
def __init__(self, entry_layer):
|
| 1748 |
+
self.entry_layer = entry_layer
|
| 1749 |
+
self.training = None
|
| 1750 |
+
|
| 1751 |
+
|
| 1752 |
+
def is_shape_tuple(s):
|
| 1753 |
+
return isinstance(s, (list, tuple)) and all(
|
| 1754 |
+
d is None or isinstance(d, int) for d in s
|
| 1755 |
+
)
|
| 1756 |
+
|
| 1757 |
+
|
| 1758 |
+
def might_have_unbuilt_state(layer):
|
| 1759 |
+
return any(not lr.built for lr in layer._layers)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/__pycache__/gaussian_dropout.cpython-310.pyc
ADDED
|
Binary file (2.48 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/__pycache__/gaussian_noise.cpython-310.pyc
ADDED
|
Binary file (2.52 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/__pycache__/spatial_dropout.cpython-310.pyc
ADDED
|
Binary file (7.81 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/activity_regularization.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import regularizers
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.layers.layer import Layer
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@keras_export("keras.layers.ActivityRegularization")
|
| 7 |
+
class ActivityRegularization(Layer):
|
| 8 |
+
"""Layer that applies an update to the cost function based input activity.
|
| 9 |
+
|
| 10 |
+
Args:
|
| 11 |
+
l1: L1 regularization factor (positive float).
|
| 12 |
+
l2: L2 regularization factor (positive float).
|
| 13 |
+
|
| 14 |
+
Input shape:
|
| 15 |
+
Arbitrary. Use the keyword argument `input_shape`
|
| 16 |
+
(tuple of integers, does not include the samples axis)
|
| 17 |
+
when using this layer as the first layer in a model.
|
| 18 |
+
|
| 19 |
+
Output shape:
|
| 20 |
+
Same shape as input.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def __init__(self, l1=0.0, l2=0.0, **kwargs):
|
| 24 |
+
super().__init__(
|
| 25 |
+
activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs
|
| 26 |
+
)
|
| 27 |
+
self.supports_masking = True
|
| 28 |
+
self.l1 = l1
|
| 29 |
+
self.l2 = l2
|
| 30 |
+
self.built = True
|
| 31 |
+
|
| 32 |
+
def call(self, inputs):
|
| 33 |
+
return inputs
|
| 34 |
+
|
| 35 |
+
def compute_output_shape(self, input_shape):
|
| 36 |
+
return input_shape
|
| 37 |
+
|
| 38 |
+
def get_config(self):
|
| 39 |
+
base_config = super().get_config()
|
| 40 |
+
base_config.pop("activity_regularizer", None)
|
| 41 |
+
config = {"l1": self.l1, "l2": self.l2}
|
| 42 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/alpha_dropout.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src import ops
|
| 3 |
+
from keras.src.api_export import keras_export
|
| 4 |
+
from keras.src.layers.layer import Layer
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@keras_export("keras.layers.AlphaDropout")
|
| 8 |
+
class AlphaDropout(Layer):
|
| 9 |
+
"""Applies Alpha Dropout to the input.
|
| 10 |
+
|
| 11 |
+
Alpha Dropout is a `Dropout` that keeps mean and variance of inputs
|
| 12 |
+
to their original values, in order to ensure the self-normalizing property
|
| 13 |
+
even after this dropout.
|
| 14 |
+
Alpha Dropout fits well to Scaled Exponential Linear Units (SELU) by
|
| 15 |
+
randomly setting activations to the negative saturation value.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
rate: Float between 0 and 1. The multiplicative noise will have
|
| 19 |
+
standard deviation `sqrt(rate / (1 - rate))`.
|
| 20 |
+
noise_shape: 1D integer tensor representing the shape of the
|
| 21 |
+
binary alpha dropout mask that will be multiplied with the input.
|
| 22 |
+
For instance, if your inputs have shape
|
| 23 |
+
`(batch_size, timesteps, features)` and
|
| 24 |
+
you want the alpha dropout mask to be the same for all timesteps,
|
| 25 |
+
you can use `noise_shape=(batch_size, 1, features)`.
|
| 26 |
+
seed: A Python integer to use as random seed.
|
| 27 |
+
|
| 28 |
+
Call arguments:
|
| 29 |
+
inputs: Input tensor (of any rank).
|
| 30 |
+
training: Python boolean indicating whether the layer should behave in
|
| 31 |
+
training mode (adding alpha dropout) or in inference mode
|
| 32 |
+
(doing nothing).
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
|
| 36 |
+
super().__init__(**kwargs)
|
| 37 |
+
if not 0 <= rate <= 1:
|
| 38 |
+
raise ValueError(
|
| 39 |
+
f"Invalid value received for argument "
|
| 40 |
+
"`rate`. Expected a float value between 0 and 1. "
|
| 41 |
+
f"Received: rate={rate}"
|
| 42 |
+
)
|
| 43 |
+
self.rate = rate
|
| 44 |
+
self.seed = seed
|
| 45 |
+
self.noise_shape = noise_shape
|
| 46 |
+
if rate > 0:
|
| 47 |
+
self.seed_generator = backend.random.SeedGenerator(seed)
|
| 48 |
+
self.supports_masking = True
|
| 49 |
+
self.built = True
|
| 50 |
+
|
| 51 |
+
def call(self, inputs, training=False):
|
| 52 |
+
if training and self.rate > 0:
|
| 53 |
+
noise_shape = self._get_concrete_noise_shape(
|
| 54 |
+
inputs, self.noise_shape
|
| 55 |
+
)
|
| 56 |
+
alpha = 1.6732632423543772848170429916717
|
| 57 |
+
scale = 1.0507009873554804934193349852946
|
| 58 |
+
alpha_p = -alpha * scale
|
| 59 |
+
|
| 60 |
+
kept_idx = ops.greater_equal(
|
| 61 |
+
ops.random.uniform(noise_shape, seed=self.seed_generator),
|
| 62 |
+
self.rate,
|
| 63 |
+
)
|
| 64 |
+
kept_idx = ops.cast(kept_idx, inputs.dtype)
|
| 65 |
+
|
| 66 |
+
# Compute affine transformation parameters
|
| 67 |
+
a = ((1 - self.rate) * (1 + self.rate * alpha_p**2)) ** -0.5
|
| 68 |
+
b = -a * alpha_p * self.rate
|
| 69 |
+
|
| 70 |
+
# Apply mask
|
| 71 |
+
x = inputs * kept_idx + alpha_p * (1 - kept_idx)
|
| 72 |
+
return a * x + b
|
| 73 |
+
|
| 74 |
+
return inputs
|
| 75 |
+
|
| 76 |
+
def compute_output_shape(self, input_shape):
|
| 77 |
+
return input_shape
|
| 78 |
+
|
| 79 |
+
def _get_concrete_noise_shape(self, inputs, noise_shape):
|
| 80 |
+
if noise_shape is None:
|
| 81 |
+
return ops.shape(inputs)
|
| 82 |
+
|
| 83 |
+
concrete_inputs_shape = ops.shape(inputs)
|
| 84 |
+
concrete_noise_shape = []
|
| 85 |
+
for i, value in enumerate(noise_shape):
|
| 86 |
+
concrete_noise_shape.append(
|
| 87 |
+
concrete_inputs_shape[i] if value is None else value
|
| 88 |
+
)
|
| 89 |
+
return concrete_noise_shape
|
| 90 |
+
|
| 91 |
+
def get_config(self):
|
| 92 |
+
base_config = super().get_config()
|
| 93 |
+
config = {
|
| 94 |
+
"rate": self.rate,
|
| 95 |
+
"seed": self.seed,
|
| 96 |
+
"noise_shape": self.noise_shape,
|
| 97 |
+
}
|
| 98 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/dropout.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.layers.layer import Layer
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@keras_export("keras.layers.Dropout")
|
| 7 |
+
class Dropout(Layer):
|
| 8 |
+
"""Applies dropout to the input.
|
| 9 |
+
|
| 10 |
+
The `Dropout` layer randomly sets input units to 0 with a frequency of
|
| 11 |
+
`rate` at each step during training time, which helps prevent overfitting.
|
| 12 |
+
Inputs not set to 0 are scaled up by `1 / (1 - rate)` such that the sum over
|
| 13 |
+
all inputs is unchanged.
|
| 14 |
+
|
| 15 |
+
Note that the `Dropout` layer only applies when `training` is set to `True`
|
| 16 |
+
in `call()`, such that no values are dropped during inference.
|
| 17 |
+
When using `model.fit`, `training` will be appropriately set to `True`
|
| 18 |
+
automatically. In other contexts, you can set the argument explicitly
|
| 19 |
+
to `True` when calling the layer.
|
| 20 |
+
|
| 21 |
+
(This is in contrast to setting `trainable=False` for a `Dropout` layer.
|
| 22 |
+
`trainable` does not affect the layer's behavior, as `Dropout` does
|
| 23 |
+
not have any variables/weights that can be frozen during training.)
|
| 24 |
+
|
| 25 |
+
Args:
|
| 26 |
+
rate: Float between 0 and 1. Fraction of the input units to drop.
|
| 27 |
+
noise_shape: 1D integer tensor representing the shape of the
|
| 28 |
+
binary dropout mask that will be multiplied with the input.
|
| 29 |
+
For instance, if your inputs have shape
|
| 30 |
+
`(batch_size, timesteps, features)` and
|
| 31 |
+
you want the dropout mask to be the same for all timesteps,
|
| 32 |
+
you can use `noise_shape=(batch_size, 1, features)`.
|
| 33 |
+
seed: A Python integer to use as random seed.
|
| 34 |
+
|
| 35 |
+
Call arguments:
|
| 36 |
+
inputs: Input tensor (of any rank).
|
| 37 |
+
training: Python boolean indicating whether the layer should behave in
|
| 38 |
+
training mode (adding dropout) or in inference mode (doing nothing).
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
|
| 42 |
+
super().__init__(**kwargs)
|
| 43 |
+
if not 0 <= rate <= 1:
|
| 44 |
+
raise ValueError(
|
| 45 |
+
f"Invalid value received for argument "
|
| 46 |
+
"`rate`. Expected a float value between 0 and 1. "
|
| 47 |
+
f"Received: rate={rate}"
|
| 48 |
+
)
|
| 49 |
+
self.rate = rate
|
| 50 |
+
self.seed = seed
|
| 51 |
+
self.noise_shape = noise_shape
|
| 52 |
+
if rate > 0:
|
| 53 |
+
self.seed_generator = backend.random.SeedGenerator(seed)
|
| 54 |
+
self.supports_masking = True
|
| 55 |
+
self.built = True
|
| 56 |
+
|
| 57 |
+
def call(self, inputs, training=False):
|
| 58 |
+
if training and self.rate > 0:
|
| 59 |
+
return backend.random.dropout(
|
| 60 |
+
inputs,
|
| 61 |
+
self.rate,
|
| 62 |
+
noise_shape=self.noise_shape,
|
| 63 |
+
seed=self.seed_generator,
|
| 64 |
+
)
|
| 65 |
+
return inputs
|
| 66 |
+
|
| 67 |
+
def compute_output_shape(self, input_shape):
|
| 68 |
+
return input_shape
|
| 69 |
+
|
| 70 |
+
def get_config(self):
|
| 71 |
+
base_config = super().get_config()
|
| 72 |
+
config = {
|
| 73 |
+
"rate": self.rate,
|
| 74 |
+
"seed": self.seed,
|
| 75 |
+
"noise_shape": self.noise_shape,
|
| 76 |
+
}
|
| 77 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/gaussian_dropout.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
from keras.src import backend
|
| 4 |
+
from keras.src import layers
|
| 5 |
+
from keras.src import ops
|
| 6 |
+
from keras.src.api_export import keras_export
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@keras_export("keras.layers.GaussianDropout")
|
| 10 |
+
class GaussianDropout(layers.Layer):
|
| 11 |
+
"""Apply multiplicative 1-centered Gaussian noise.
|
| 12 |
+
|
| 13 |
+
As it is a regularization layer, it is only active at training time.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
rate: Float, drop probability (as with `Dropout`).
|
| 17 |
+
The multiplicative noise will have
|
| 18 |
+
standard deviation `sqrt(rate / (1 - rate))`.
|
| 19 |
+
seed: Integer, optional random seed to enable deterministic behavior.
|
| 20 |
+
|
| 21 |
+
Call arguments:
|
| 22 |
+
inputs: Input tensor (of any rank).
|
| 23 |
+
training: Python boolean indicating whether the layer should behave in
|
| 24 |
+
training mode (adding dropout) or in inference mode (doing nothing).
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
def __init__(self, rate, seed=None, **kwargs):
|
| 28 |
+
super().__init__(**kwargs)
|
| 29 |
+
if not 0 <= rate <= 1:
|
| 30 |
+
raise ValueError(
|
| 31 |
+
f"Invalid value received for argument "
|
| 32 |
+
"`rate`. Expected a float value between 0 and 1. "
|
| 33 |
+
f"Received: rate={rate}"
|
| 34 |
+
)
|
| 35 |
+
self.rate = rate
|
| 36 |
+
self.seed = seed
|
| 37 |
+
if rate > 0:
|
| 38 |
+
self.seed_generator = backend.random.SeedGenerator(seed)
|
| 39 |
+
self.supports_masking = True
|
| 40 |
+
self.built = True
|
| 41 |
+
|
| 42 |
+
def call(self, inputs, training=False):
|
| 43 |
+
if training and self.rate > 0:
|
| 44 |
+
stddev = math.sqrt(self.rate / (1.0 - self.rate))
|
| 45 |
+
return inputs * backend.random.normal(
|
| 46 |
+
shape=ops.shape(inputs),
|
| 47 |
+
mean=1.0,
|
| 48 |
+
stddev=stddev,
|
| 49 |
+
dtype=self.compute_dtype,
|
| 50 |
+
seed=self.seed_generator,
|
| 51 |
+
)
|
| 52 |
+
return inputs
|
| 53 |
+
|
| 54 |
+
def compute_output_shape(self, input_shape):
|
| 55 |
+
return input_shape
|
| 56 |
+
|
| 57 |
+
def get_config(self):
|
| 58 |
+
base_config = super().get_config()
|
| 59 |
+
config = {
|
| 60 |
+
"rate": self.rate,
|
| 61 |
+
"seed": self.seed,
|
| 62 |
+
}
|
| 63 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/gaussian_noise.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src import layers
|
| 3 |
+
from keras.src import ops
|
| 4 |
+
from keras.src.api_export import keras_export
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@keras_export("keras.layers.GaussianNoise")
|
| 8 |
+
class GaussianNoise(layers.Layer):
|
| 9 |
+
"""Apply additive zero-centered Gaussian noise.
|
| 10 |
+
|
| 11 |
+
This is useful to mitigate overfitting
|
| 12 |
+
(you could see it as a form of random data augmentation).
|
| 13 |
+
Gaussian Noise (GS) is a natural choice as corruption process
|
| 14 |
+
for real valued inputs.
|
| 15 |
+
|
| 16 |
+
As it is a regularization layer, it is only active at training time.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
stddev: Float, standard deviation of the noise distribution.
|
| 20 |
+
seed: Integer, optional random seed to enable deterministic behavior.
|
| 21 |
+
|
| 22 |
+
Call arguments:
|
| 23 |
+
inputs: Input tensor (of any rank).
|
| 24 |
+
training: Python boolean indicating whether the layer should behave in
|
| 25 |
+
training mode (adding noise) or in inference mode (doing nothing).
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
def __init__(self, stddev, seed=None, **kwargs):
|
| 29 |
+
super().__init__(**kwargs)
|
| 30 |
+
if not 0 <= stddev <= 1:
|
| 31 |
+
raise ValueError(
|
| 32 |
+
f"Invalid value received for argument "
|
| 33 |
+
"`stddev`. Expected a float value between 0 and 1. "
|
| 34 |
+
f"Received: stddev={stddev}"
|
| 35 |
+
)
|
| 36 |
+
self.stddev = stddev
|
| 37 |
+
self.seed = seed
|
| 38 |
+
if stddev > 0:
|
| 39 |
+
self.seed_generator = backend.random.SeedGenerator(seed)
|
| 40 |
+
self.supports_masking = True
|
| 41 |
+
self.built = True
|
| 42 |
+
|
| 43 |
+
def call(self, inputs, training=False):
|
| 44 |
+
if training and self.stddev > 0:
|
| 45 |
+
return inputs + backend.random.normal(
|
| 46 |
+
shape=ops.shape(inputs),
|
| 47 |
+
mean=0.0,
|
| 48 |
+
stddev=self.stddev,
|
| 49 |
+
dtype=self.compute_dtype,
|
| 50 |
+
seed=self.seed_generator,
|
| 51 |
+
)
|
| 52 |
+
return inputs
|
| 53 |
+
|
| 54 |
+
def compute_output_shape(self, input_shape):
|
| 55 |
+
return input_shape
|
| 56 |
+
|
| 57 |
+
def get_config(self):
|
| 58 |
+
base_config = super().get_config()
|
| 59 |
+
config = {
|
| 60 |
+
"stddev": self.stddev,
|
| 61 |
+
"seed": self.seed,
|
| 62 |
+
}
|
| 63 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/spatial_dropout.py
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src import ops
|
| 3 |
+
from keras.src.api_export import keras_export
|
| 4 |
+
from keras.src.layers.input_spec import InputSpec
|
| 5 |
+
from keras.src.layers.regularization.dropout import Dropout
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class BaseSpatialDropout(Dropout):
|
| 9 |
+
def __init__(self, rate, seed=None, name=None, dtype=None):
|
| 10 |
+
super().__init__(rate, seed=seed, name=name, dtype=dtype)
|
| 11 |
+
|
| 12 |
+
def call(self, inputs, training=False):
|
| 13 |
+
if training and self.rate > 0:
|
| 14 |
+
return backend.random.dropout(
|
| 15 |
+
inputs,
|
| 16 |
+
self.rate,
|
| 17 |
+
noise_shape=self._get_noise_shape(inputs),
|
| 18 |
+
seed=self.seed_generator,
|
| 19 |
+
)
|
| 20 |
+
return inputs
|
| 21 |
+
|
| 22 |
+
def get_config(self):
|
| 23 |
+
return {
|
| 24 |
+
"rate": self.rate,
|
| 25 |
+
"seed": self.seed,
|
| 26 |
+
"name": self.name,
|
| 27 |
+
"dtype": self.dtype,
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@keras_export("keras.layers.SpatialDropout1D")
|
| 32 |
+
class SpatialDropout1D(BaseSpatialDropout):
|
| 33 |
+
"""Spatial 1D version of Dropout.
|
| 34 |
+
|
| 35 |
+
This layer performs the same function as Dropout, however, it drops
|
| 36 |
+
entire 1D feature maps instead of individual elements. If adjacent frames
|
| 37 |
+
within feature maps are strongly correlated (as is normally the case in
|
| 38 |
+
early convolution layers) then regular dropout will not regularize the
|
| 39 |
+
activations and will otherwise just result in an effective learning rate
|
| 40 |
+
decrease. In this case, `SpatialDropout1D` will help promote independence
|
| 41 |
+
between feature maps and should be used instead.
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
rate: Float between 0 and 1. Fraction of the input units to drop.
|
| 45 |
+
|
| 46 |
+
Call arguments:
|
| 47 |
+
inputs: A 3D tensor.
|
| 48 |
+
training: Python boolean indicating whether the layer
|
| 49 |
+
should behave in training mode (applying dropout)
|
| 50 |
+
or in inference mode (pass-through).
|
| 51 |
+
|
| 52 |
+
Input shape:
|
| 53 |
+
3D tensor with shape: `(samples, timesteps, channels)`
|
| 54 |
+
|
| 55 |
+
Output shape: Same as input.
|
| 56 |
+
|
| 57 |
+
Reference:
|
| 58 |
+
|
| 59 |
+
- [Tompson et al., 2014](https://arxiv.org/abs/1411.4280)
|
| 60 |
+
"""
|
| 61 |
+
|
| 62 |
+
def __init__(self, rate, seed=None, name=None, dtype=None):
|
| 63 |
+
super().__init__(rate, seed=seed, name=name, dtype=dtype)
|
| 64 |
+
self.input_spec = InputSpec(ndim=3)
|
| 65 |
+
|
| 66 |
+
def _get_noise_shape(self, inputs):
|
| 67 |
+
input_shape = ops.shape(inputs)
|
| 68 |
+
return (input_shape[0], 1, input_shape[2])
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
@keras_export("keras.layers.SpatialDropout2D")
|
| 72 |
+
class SpatialDropout2D(BaseSpatialDropout):
|
| 73 |
+
"""Spatial 2D version of Dropout.
|
| 74 |
+
|
| 75 |
+
This version performs the same function as Dropout, however, it drops
|
| 76 |
+
entire 2D feature maps instead of individual elements. If adjacent pixels
|
| 77 |
+
within feature maps are strongly correlated (as is normally the case in
|
| 78 |
+
early convolution layers) then regular dropout will not regularize the
|
| 79 |
+
activations and will otherwise just result in an effective learning rate
|
| 80 |
+
decrease. In this case, `SpatialDropout2D` will help promote independence
|
| 81 |
+
between feature maps and should be used instead.
|
| 82 |
+
|
| 83 |
+
Args:
|
| 84 |
+
rate: Float between 0 and 1. Fraction of the input units to drop.
|
| 85 |
+
data_format: `"channels_first"` or `"channels_last"`.
|
| 86 |
+
In `"channels_first"` mode, the channels dimension (the depth)
|
| 87 |
+
is at index 1, in `"channels_last"` mode is it at index 3.
|
| 88 |
+
It defaults to the `image_data_format` value found in your
|
| 89 |
+
Keras config file at `~/.keras/keras.json`.
|
| 90 |
+
If you never set it, then it will be `"channels_last"`.
|
| 91 |
+
|
| 92 |
+
Call arguments:
|
| 93 |
+
inputs: A 4D tensor.
|
| 94 |
+
training: Python boolean indicating whether the layer
|
| 95 |
+
should behave in training mode (applying dropout)
|
| 96 |
+
or in inference mode (pass-through).
|
| 97 |
+
|
| 98 |
+
Input shape:
|
| 99 |
+
4D tensor with shape: `(samples, channels, rows, cols)` if
|
| 100 |
+
data_format='channels_first'
|
| 101 |
+
or 4D tensor with shape: `(samples, rows, cols, channels)` if
|
| 102 |
+
data_format='channels_last'.
|
| 103 |
+
|
| 104 |
+
Output shape: Same as input.
|
| 105 |
+
|
| 106 |
+
Reference:
|
| 107 |
+
|
| 108 |
+
- [Tompson et al., 2014](https://arxiv.org/abs/1411.4280)
|
| 109 |
+
"""
|
| 110 |
+
|
| 111 |
+
def __init__(
|
| 112 |
+
self, rate, data_format=None, seed=None, name=None, dtype=None
|
| 113 |
+
):
|
| 114 |
+
super().__init__(rate, seed=seed, name=name, dtype=dtype)
|
| 115 |
+
self.data_format = backend.standardize_data_format(data_format)
|
| 116 |
+
self.input_spec = InputSpec(ndim=4)
|
| 117 |
+
|
| 118 |
+
def _get_noise_shape(self, inputs):
|
| 119 |
+
input_shape = ops.shape(inputs)
|
| 120 |
+
if self.data_format == "channels_first":
|
| 121 |
+
return (input_shape[0], input_shape[1], 1, 1)
|
| 122 |
+
elif self.data_format == "channels_last":
|
| 123 |
+
return (input_shape[0], 1, 1, input_shape[3])
|
| 124 |
+
|
| 125 |
+
def get_config(self):
|
| 126 |
+
base_config = super().get_config()
|
| 127 |
+
config = {
|
| 128 |
+
"data_format": self.data_format,
|
| 129 |
+
}
|
| 130 |
+
return {**base_config, **config}
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
@keras_export("keras.layers.SpatialDropout3D")
|
| 134 |
+
class SpatialDropout3D(BaseSpatialDropout):
|
| 135 |
+
"""Spatial 3D version of Dropout.
|
| 136 |
+
|
| 137 |
+
This version performs the same function as Dropout, however, it drops
|
| 138 |
+
entire 3D feature maps instead of individual elements. If adjacent voxels
|
| 139 |
+
within feature maps are strongly correlated (as is normally the case in
|
| 140 |
+
early convolution layers) then regular dropout will not regularize the
|
| 141 |
+
activations and will otherwise just result in an effective learning rate
|
| 142 |
+
decrease. In this case, SpatialDropout3D will help promote independence
|
| 143 |
+
between feature maps and should be used instead.
|
| 144 |
+
|
| 145 |
+
Args:
|
| 146 |
+
rate: Float between 0 and 1. Fraction of the input units to drop.
|
| 147 |
+
data_format: `"channels_first"` or `"channels_last"`.
|
| 148 |
+
In `"channels_first"` mode, the channels dimension (the depth)
|
| 149 |
+
is at index 1, in `"channels_last"` mode is it at index 4.
|
| 150 |
+
It defaults to the `image_data_format` value found in your
|
| 151 |
+
Keras config file at `~/.keras/keras.json`.
|
| 152 |
+
If you never set it, then it will be `"channels_last"`.
|
| 153 |
+
|
| 154 |
+
Call arguments:
|
| 155 |
+
inputs: A 5D tensor.
|
| 156 |
+
training: Python boolean indicating whether the layer
|
| 157 |
+
should behave in training mode (applying dropout)
|
| 158 |
+
or in inference mode (pass-through).
|
| 159 |
+
|
| 160 |
+
Input shape:
|
| 161 |
+
5D tensor with shape: `(samples, channels, dim1, dim2, dim3)` if
|
| 162 |
+
data_format='channels_first'
|
| 163 |
+
or 5D tensor with shape: `(samples, dim1, dim2, dim3, channels)` if
|
| 164 |
+
data_format='channels_last'.
|
| 165 |
+
|
| 166 |
+
Output shape: Same as input.
|
| 167 |
+
|
| 168 |
+
Reference:
|
| 169 |
+
|
| 170 |
+
- [Tompson et al., 2014](https://arxiv.org/abs/1411.4280)
|
| 171 |
+
"""
|
| 172 |
+
|
| 173 |
+
def __init__(
|
| 174 |
+
self, rate, data_format=None, seed=None, name=None, dtype=None
|
| 175 |
+
):
|
| 176 |
+
super().__init__(rate, seed=seed, name=name, dtype=dtype)
|
| 177 |
+
self.data_format = backend.standardize_data_format(data_format)
|
| 178 |
+
self.input_spec = InputSpec(ndim=5)
|
| 179 |
+
|
| 180 |
+
def _get_noise_shape(self, inputs):
|
| 181 |
+
input_shape = ops.shape(inputs)
|
| 182 |
+
if self.data_format == "channels_first":
|
| 183 |
+
return (input_shape[0], input_shape[1], 1, 1, 1)
|
| 184 |
+
elif self.data_format == "channels_last":
|
| 185 |
+
return (input_shape[0], 1, 1, 1, input_shape[4])
|
| 186 |
+
|
| 187 |
+
def get_config(self):
|
| 188 |
+
base_config = super().get_config()
|
| 189 |
+
config = {
|
| 190 |
+
"data_format": self.data_format,
|
| 191 |
+
}
|
| 192 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (202 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/cropping1d.cpython-310.pyc
ADDED
|
Binary file (3.01 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/cropping2d.cpython-310.pyc
ADDED
|
Binary file (6.1 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/cropping3d.cpython-310.pyc
ADDED
|
Binary file (7.7 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/flatten.cpython-310.pyc
ADDED
|
Binary file (3.33 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/permute.cpython-310.pyc
ADDED
|
Binary file (2.69 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/repeat_vector.cpython-310.pyc
ADDED
|
Binary file (1.99 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/reshape.cpython-310.pyc
ADDED
|
Binary file (2.99 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/up_sampling1d.cpython-310.pyc
ADDED
|
Binary file (2.26 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/up_sampling2d.cpython-310.pyc
ADDED
|
Binary file (5.12 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/up_sampling3d.cpython-310.pyc
ADDED
|
Binary file (4.57 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/zero_padding1d.cpython-310.pyc
ADDED
|
Binary file (3.82 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/zero_padding2d.cpython-310.pyc
ADDED
|
Binary file (4.64 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/zero_padding3d.cpython-310.pyc
ADDED
|
Binary file (4.86 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/cropping1d.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src.api_export import keras_export
|
| 2 |
+
from keras.src.layers.input_spec import InputSpec
|
| 3 |
+
from keras.src.layers.layer import Layer
|
| 4 |
+
from keras.src.utils import argument_validation
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@keras_export("keras.layers.Cropping1D")
|
| 8 |
+
class Cropping1D(Layer):
|
| 9 |
+
"""Cropping layer for 1D input (e.g. temporal sequence).
|
| 10 |
+
|
| 11 |
+
It crops along the time dimension (axis 1).
|
| 12 |
+
|
| 13 |
+
Example:
|
| 14 |
+
|
| 15 |
+
>>> input_shape = (2, 3, 2)
|
| 16 |
+
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
|
| 17 |
+
>>> x
|
| 18 |
+
[[[ 0 1]
|
| 19 |
+
[ 2 3]
|
| 20 |
+
[ 4 5]]
|
| 21 |
+
[[ 6 7]
|
| 22 |
+
[ 8 9]
|
| 23 |
+
[10 11]]]
|
| 24 |
+
>>> y = keras.layers.Cropping1D(cropping=1)(x)
|
| 25 |
+
>>> y
|
| 26 |
+
[[[2 3]]
|
| 27 |
+
[[8 9]]]
|
| 28 |
+
|
| 29 |
+
Args:
|
| 30 |
+
cropping: Int, or tuple of int (length 2), or dictionary.
|
| 31 |
+
- If int: how many units should be trimmed off at the beginning and
|
| 32 |
+
end of the cropping dimension (axis 1).
|
| 33 |
+
- If tuple of 2 ints: how many units should be trimmed off at the
|
| 34 |
+
beginning and end of the cropping dimension
|
| 35 |
+
(`(left_crop, right_crop)`).
|
| 36 |
+
|
| 37 |
+
Input shape:
|
| 38 |
+
3D tensor with shape `(batch_size, axis_to_crop, features)`
|
| 39 |
+
|
| 40 |
+
Output shape:
|
| 41 |
+
3D tensor with shape `(batch_size, cropped_axis, features)`
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
def __init__(self, cropping=(1, 1), **kwargs):
|
| 45 |
+
super().__init__(**kwargs)
|
| 46 |
+
self.cropping = argument_validation.standardize_tuple(
|
| 47 |
+
cropping, 2, "cropping", allow_zero=True
|
| 48 |
+
)
|
| 49 |
+
self.input_spec = InputSpec(ndim=3)
|
| 50 |
+
|
| 51 |
+
def compute_output_shape(self, input_shape):
|
| 52 |
+
if input_shape[1] is not None:
|
| 53 |
+
length = input_shape[1] - self.cropping[0] - self.cropping[1]
|
| 54 |
+
if length <= 0:
|
| 55 |
+
raise ValueError(
|
| 56 |
+
"`cropping` parameter of `Cropping1D` layer must be "
|
| 57 |
+
"smaller than the input length. Received: input_shape="
|
| 58 |
+
f"{input_shape}, cropping={self.cropping}"
|
| 59 |
+
)
|
| 60 |
+
else:
|
| 61 |
+
length = None
|
| 62 |
+
return (input_shape[0], length, input_shape[2])
|
| 63 |
+
|
| 64 |
+
def call(self, inputs):
|
| 65 |
+
if (
|
| 66 |
+
inputs.shape[1] is not None
|
| 67 |
+
and sum(self.cropping) >= inputs.shape[1]
|
| 68 |
+
):
|
| 69 |
+
raise ValueError(
|
| 70 |
+
"`cropping` parameter of `Cropping1D` layer must be "
|
| 71 |
+
"smaller than the input length. Received: inputs.shape="
|
| 72 |
+
f"{inputs.shape}, cropping={self.cropping}"
|
| 73 |
+
)
|
| 74 |
+
if self.cropping[1] == 0:
|
| 75 |
+
return inputs[:, self.cropping[0] :, :]
|
| 76 |
+
else:
|
| 77 |
+
return inputs[:, self.cropping[0] : -self.cropping[1], :]
|
| 78 |
+
|
| 79 |
+
def get_config(self):
|
| 80 |
+
config = {"cropping": self.cropping}
|
| 81 |
+
base_config = super().get_config()
|
| 82 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/cropping2d.py
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.layers.input_spec import InputSpec
|
| 4 |
+
from keras.src.layers.layer import Layer
|
| 5 |
+
from keras.src.utils import argument_validation
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@keras_export("keras.layers.Cropping2D")
|
| 9 |
+
class Cropping2D(Layer):
|
| 10 |
+
"""Cropping layer for 2D input (e.g. picture).
|
| 11 |
+
|
| 12 |
+
It crops along spatial dimensions, i.e. height and width.
|
| 13 |
+
|
| 14 |
+
Example:
|
| 15 |
+
|
| 16 |
+
>>> input_shape = (2, 28, 28, 3)
|
| 17 |
+
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
|
| 18 |
+
>>> y = keras.layers.Cropping2D(cropping=((2, 2), (4, 4)))(x)
|
| 19 |
+
>>> y.shape
|
| 20 |
+
(2, 24, 20, 3)
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
cropping: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
|
| 24 |
+
- If int: the same symmetric cropping is applied to height and
|
| 25 |
+
width.
|
| 26 |
+
- If tuple of 2 ints: interpreted as two different symmetric
|
| 27 |
+
cropping values for height and width:
|
| 28 |
+
`(symmetric_height_crop, symmetric_width_crop)`.
|
| 29 |
+
- If tuple of 2 tuples of 2 ints: interpreted as
|
| 30 |
+
`((top_crop, bottom_crop), (left_crop, right_crop))`.
|
| 31 |
+
data_format: A string, one of `"channels_last"` (default) or
|
| 32 |
+
`"channels_first"`. The ordering of the dimensions in the inputs.
|
| 33 |
+
`"channels_last"` corresponds to inputs with shape
|
| 34 |
+
`(batch_size, height, width, channels)` while `"channels_first"`
|
| 35 |
+
corresponds to inputs with shape
|
| 36 |
+
`(batch_size, channels, height, width)`.
|
| 37 |
+
When unspecified, uses `image_data_format` value found in your Keras
|
| 38 |
+
config file at `~/.keras/keras.json` (if exists). Defaults to
|
| 39 |
+
`"channels_last"`.
|
| 40 |
+
|
| 41 |
+
Input shape:
|
| 42 |
+
4D tensor with shape:
|
| 43 |
+
- If `data_format` is `"channels_last"`:
|
| 44 |
+
`(batch_size, height, width, channels)`
|
| 45 |
+
- If `data_format` is `"channels_first"`:
|
| 46 |
+
`(batch_size, channels, height, width)`
|
| 47 |
+
|
| 48 |
+
Output shape:
|
| 49 |
+
4D tensor with shape:
|
| 50 |
+
- If `data_format` is `"channels_last"`:
|
| 51 |
+
`(batch_size, cropped_height, cropped_width, channels)`
|
| 52 |
+
- If `data_format` is `"channels_first"`:
|
| 53 |
+
`(batch_size, channels, cropped_height, cropped_width)`
|
| 54 |
+
"""
|
| 55 |
+
|
| 56 |
+
def __init__(self, cropping=((0, 0), (0, 0)), data_format=None, **kwargs):
|
| 57 |
+
super().__init__(**kwargs)
|
| 58 |
+
self.data_format = backend.standardize_data_format(data_format)
|
| 59 |
+
if isinstance(cropping, int):
|
| 60 |
+
if cropping < 0:
|
| 61 |
+
raise ValueError(
|
| 62 |
+
"`cropping` cannot be negative. "
|
| 63 |
+
f"Received: cropping={cropping}."
|
| 64 |
+
)
|
| 65 |
+
self.cropping = ((cropping, cropping), (cropping, cropping))
|
| 66 |
+
elif hasattr(cropping, "__len__"):
|
| 67 |
+
if len(cropping) != 2:
|
| 68 |
+
raise ValueError(
|
| 69 |
+
"`cropping` should have two elements. "
|
| 70 |
+
f"Received: cropping={cropping}."
|
| 71 |
+
)
|
| 72 |
+
height_cropping = argument_validation.standardize_tuple(
|
| 73 |
+
cropping[0], 2, "1st entry of cropping", allow_zero=True
|
| 74 |
+
)
|
| 75 |
+
width_cropping = argument_validation.standardize_tuple(
|
| 76 |
+
cropping[1], 2, "2nd entry of cropping", allow_zero=True
|
| 77 |
+
)
|
| 78 |
+
self.cropping = (height_cropping, width_cropping)
|
| 79 |
+
else:
|
| 80 |
+
raise ValueError(
|
| 81 |
+
"`cropping` should be either an int, a tuple of 2 ints "
|
| 82 |
+
"(symmetric_height_crop, symmetric_width_crop), "
|
| 83 |
+
"or a tuple of 2 tuples of 2 ints "
|
| 84 |
+
"((top_crop, bottom_crop), (left_crop, right_crop)). "
|
| 85 |
+
f"Received: cropping={cropping}."
|
| 86 |
+
)
|
| 87 |
+
self.input_spec = InputSpec(ndim=4)
|
| 88 |
+
|
| 89 |
+
def compute_output_shape(self, input_shape):
|
| 90 |
+
if self.data_format == "channels_first":
|
| 91 |
+
if (
|
| 92 |
+
input_shape[2] is not None
|
| 93 |
+
and sum(self.cropping[0]) >= input_shape[2]
|
| 94 |
+
) or (
|
| 95 |
+
input_shape[3] is not None
|
| 96 |
+
and sum(self.cropping[1]) >= input_shape[3]
|
| 97 |
+
):
|
| 98 |
+
raise ValueError(
|
| 99 |
+
"Values in `cropping` argument should be smaller than the "
|
| 100 |
+
"corresponding spatial dimension of the input. Received: "
|
| 101 |
+
f"input_shape={input_shape}, cropping={self.cropping}"
|
| 102 |
+
)
|
| 103 |
+
return (
|
| 104 |
+
input_shape[0],
|
| 105 |
+
input_shape[1],
|
| 106 |
+
(
|
| 107 |
+
input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
|
| 108 |
+
if input_shape[2] is not None
|
| 109 |
+
else None
|
| 110 |
+
),
|
| 111 |
+
(
|
| 112 |
+
input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
|
| 113 |
+
if input_shape[3] is not None
|
| 114 |
+
else None
|
| 115 |
+
),
|
| 116 |
+
)
|
| 117 |
+
else:
|
| 118 |
+
if (
|
| 119 |
+
input_shape[1] is not None
|
| 120 |
+
and sum(self.cropping[0]) >= input_shape[1]
|
| 121 |
+
) or (
|
| 122 |
+
input_shape[2] is not None
|
| 123 |
+
and sum(self.cropping[1]) >= input_shape[2]
|
| 124 |
+
):
|
| 125 |
+
raise ValueError(
|
| 126 |
+
"Values in `cropping` argument should be smaller than the "
|
| 127 |
+
"corresponding spatial dimension of the input. Received: "
|
| 128 |
+
f"input_shape={input_shape}, cropping={self.cropping}"
|
| 129 |
+
)
|
| 130 |
+
return (
|
| 131 |
+
input_shape[0],
|
| 132 |
+
(
|
| 133 |
+
input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
|
| 134 |
+
if input_shape[1] is not None
|
| 135 |
+
else None
|
| 136 |
+
),
|
| 137 |
+
(
|
| 138 |
+
input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
|
| 139 |
+
if input_shape[2] is not None
|
| 140 |
+
else None
|
| 141 |
+
),
|
| 142 |
+
input_shape[3],
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
def call(self, inputs):
|
| 146 |
+
if self.data_format == "channels_first":
|
| 147 |
+
if (
|
| 148 |
+
inputs.shape[2] is not None
|
| 149 |
+
and sum(self.cropping[0]) >= inputs.shape[2]
|
| 150 |
+
) or (
|
| 151 |
+
inputs.shape[3] is not None
|
| 152 |
+
and sum(self.cropping[1]) >= inputs.shape[3]
|
| 153 |
+
):
|
| 154 |
+
raise ValueError(
|
| 155 |
+
"Values in `cropping` argument should be smaller than the "
|
| 156 |
+
"corresponding spatial dimension of the input. Received: "
|
| 157 |
+
f"inputs.shape={inputs.shape}, cropping={self.cropping}"
|
| 158 |
+
)
|
| 159 |
+
if self.cropping[0][1] == self.cropping[1][1] == 0:
|
| 160 |
+
return inputs[
|
| 161 |
+
:, :, self.cropping[0][0] :, self.cropping[1][0] :
|
| 162 |
+
]
|
| 163 |
+
elif self.cropping[0][1] == 0:
|
| 164 |
+
return inputs[
|
| 165 |
+
:,
|
| 166 |
+
:,
|
| 167 |
+
self.cropping[0][0] :,
|
| 168 |
+
self.cropping[1][0] : -self.cropping[1][1],
|
| 169 |
+
]
|
| 170 |
+
elif self.cropping[1][1] == 0:
|
| 171 |
+
return inputs[
|
| 172 |
+
:,
|
| 173 |
+
:,
|
| 174 |
+
self.cropping[0][0] : -self.cropping[0][1],
|
| 175 |
+
self.cropping[1][0] :,
|
| 176 |
+
]
|
| 177 |
+
return inputs[
|
| 178 |
+
:,
|
| 179 |
+
:,
|
| 180 |
+
self.cropping[0][0] : -self.cropping[0][1],
|
| 181 |
+
self.cropping[1][0] : -self.cropping[1][1],
|
| 182 |
+
]
|
| 183 |
+
else:
|
| 184 |
+
if (
|
| 185 |
+
inputs.shape[1] is not None
|
| 186 |
+
and sum(self.cropping[0]) >= inputs.shape[1]
|
| 187 |
+
) or (
|
| 188 |
+
inputs.shape[2] is not None
|
| 189 |
+
and sum(self.cropping[1]) >= inputs.shape[2]
|
| 190 |
+
):
|
| 191 |
+
raise ValueError(
|
| 192 |
+
"Values in `cropping` argument should be smaller than the "
|
| 193 |
+
"corresponding spatial dimension of the input. Received: "
|
| 194 |
+
f"inputs.shape={inputs.shape}, cropping={self.cropping}"
|
| 195 |
+
)
|
| 196 |
+
if self.cropping[0][1] == self.cropping[1][1] == 0:
|
| 197 |
+
return inputs[
|
| 198 |
+
:, self.cropping[0][0] :, self.cropping[1][0] :, :
|
| 199 |
+
]
|
| 200 |
+
elif self.cropping[0][1] == 0:
|
| 201 |
+
return inputs[
|
| 202 |
+
:,
|
| 203 |
+
self.cropping[0][0] :,
|
| 204 |
+
self.cropping[1][0] : -self.cropping[1][1],
|
| 205 |
+
:,
|
| 206 |
+
]
|
| 207 |
+
elif self.cropping[1][1] == 0:
|
| 208 |
+
return inputs[
|
| 209 |
+
:,
|
| 210 |
+
self.cropping[0][0] : -self.cropping[0][1],
|
| 211 |
+
self.cropping[1][0] :,
|
| 212 |
+
:,
|
| 213 |
+
]
|
| 214 |
+
return inputs[
|
| 215 |
+
:,
|
| 216 |
+
self.cropping[0][0] : -self.cropping[0][1],
|
| 217 |
+
self.cropping[1][0] : -self.cropping[1][1],
|
| 218 |
+
:,
|
| 219 |
+
]
|
| 220 |
+
|
| 221 |
+
def get_config(self):
|
| 222 |
+
config = {"cropping": self.cropping, "data_format": self.data_format}
|
| 223 |
+
base_config = super().get_config()
|
| 224 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/cropping3d.py
ADDED
|
@@ -0,0 +1,284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.layers.input_spec import InputSpec
|
| 4 |
+
from keras.src.layers.layer import Layer
|
| 5 |
+
from keras.src.utils import argument_validation
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@keras_export("keras.layers.Cropping3D")
|
| 9 |
+
class Cropping3D(Layer):
|
| 10 |
+
"""Cropping layer for 3D data (e.g. spatial or spatio-temporal).
|
| 11 |
+
|
| 12 |
+
Example:
|
| 13 |
+
|
| 14 |
+
>>> input_shape = (2, 28, 28, 10, 3)
|
| 15 |
+
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
|
| 16 |
+
>>> y = keras.layers.Cropping3D(cropping=(2, 4, 2))(x)
|
| 17 |
+
>>> y.shape
|
| 18 |
+
(2, 24, 20, 6, 3)
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
cropping: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.
|
| 22 |
+
- If int: the same symmetric cropping is applied to depth, height,
|
| 23 |
+
and width.
|
| 24 |
+
- If tuple of 3 ints: interpreted as three different symmetric
|
| 25 |
+
cropping values for depth, height, and width:
|
| 26 |
+
`(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`.
|
| 27 |
+
- If tuple of 3 tuples of 2 ints: interpreted as
|
| 28 |
+
`((left_dim1_crop, right_dim1_crop), (left_dim2_crop,
|
| 29 |
+
right_dim2_crop), (left_dim3_crop, right_dim3_crop))`.
|
| 30 |
+
data_format: A string, one of `"channels_last"` (default) or
|
| 31 |
+
`"channels_first"`. The ordering of the dimensions in the inputs.
|
| 32 |
+
`"channels_last"` corresponds to inputs with shape
|
| 33 |
+
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
|
| 34 |
+
while `"channels_first"` corresponds to inputs with shape
|
| 35 |
+
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
|
| 36 |
+
When unspecified, uses `image_data_format` value found in your Keras
|
| 37 |
+
config file at `~/.keras/keras.json` (if exists). Defaults to
|
| 38 |
+
`"channels_last"`.
|
| 39 |
+
|
| 40 |
+
Input shape:
|
| 41 |
+
5D tensor with shape:
|
| 42 |
+
- If `data_format` is `"channels_last"`:
|
| 43 |
+
`(batch_size, first_axis_to_crop, second_axis_to_crop,
|
| 44 |
+
third_axis_to_crop, channels)`
|
| 45 |
+
- If `data_format` is `"channels_first"`:
|
| 46 |
+
`(batch_size, channels, first_axis_to_crop, second_axis_to_crop,
|
| 47 |
+
third_axis_to_crop)`
|
| 48 |
+
|
| 49 |
+
Output shape:
|
| 50 |
+
5D tensor with shape:
|
| 51 |
+
- If `data_format` is `"channels_last"`:
|
| 52 |
+
`(batch_size, first_cropped_axis, second_cropped_axis,
|
| 53 |
+
third_cropped_axis, channels)`
|
| 54 |
+
- If `data_format` is `"channels_first"`:
|
| 55 |
+
`(batch_size, channels, first_cropped_axis, second_cropped_axis,
|
| 56 |
+
third_cropped_axis)`
|
| 57 |
+
"""
|
| 58 |
+
|
| 59 |
+
def __init__(
|
| 60 |
+
self, cropping=((1, 1), (1, 1), (1, 1)), data_format=None, **kwargs
|
| 61 |
+
):
|
| 62 |
+
super().__init__(**kwargs)
|
| 63 |
+
self.data_format = backend.standardize_data_format(data_format)
|
| 64 |
+
if isinstance(cropping, int):
|
| 65 |
+
if cropping < 0:
|
| 66 |
+
raise ValueError(
|
| 67 |
+
"`cropping` cannot be negative. "
|
| 68 |
+
f"Received: cropping={cropping}."
|
| 69 |
+
)
|
| 70 |
+
self.cropping = (
|
| 71 |
+
(cropping, cropping),
|
| 72 |
+
(cropping, cropping),
|
| 73 |
+
(cropping, cropping),
|
| 74 |
+
)
|
| 75 |
+
elif hasattr(cropping, "__len__"):
|
| 76 |
+
if len(cropping) != 3:
|
| 77 |
+
raise ValueError(
|
| 78 |
+
f"`cropping` should have 3 elements. Received: {cropping}."
|
| 79 |
+
)
|
| 80 |
+
dim1_cropping = argument_validation.standardize_tuple(
|
| 81 |
+
cropping[0], 2, "1st entry of cropping", allow_zero=True
|
| 82 |
+
)
|
| 83 |
+
dim2_cropping = argument_validation.standardize_tuple(
|
| 84 |
+
cropping[1], 2, "2nd entry of cropping", allow_zero=True
|
| 85 |
+
)
|
| 86 |
+
dim3_cropping = argument_validation.standardize_tuple(
|
| 87 |
+
cropping[2], 2, "3rd entry of cropping", allow_zero=True
|
| 88 |
+
)
|
| 89 |
+
self.cropping = (dim1_cropping, dim2_cropping, dim3_cropping)
|
| 90 |
+
else:
|
| 91 |
+
raise ValueError(
|
| 92 |
+
"`cropping` should be either an int, a tuple of 3 ints "
|
| 93 |
+
"(symmetric_dim1_crop, symmetric_dim2_crop, "
|
| 94 |
+
"symmetric_dim3_crop), "
|
| 95 |
+
"or a tuple of 3 tuples of 2 ints "
|
| 96 |
+
"((left_dim1_crop, right_dim1_crop),"
|
| 97 |
+
" (left_dim2_crop, right_dim2_crop),"
|
| 98 |
+
" (left_dim3_crop, right_dim2_crop)). "
|
| 99 |
+
f"Received: {cropping}."
|
| 100 |
+
)
|
| 101 |
+
self.input_spec = InputSpec(ndim=5)
|
| 102 |
+
|
| 103 |
+
def compute_output_shape(self, input_shape):
|
| 104 |
+
if self.data_format == "channels_first":
|
| 105 |
+
spatial_dims = list(input_shape[2:5])
|
| 106 |
+
else:
|
| 107 |
+
spatial_dims = list(input_shape[1:4])
|
| 108 |
+
|
| 109 |
+
for index in range(0, 3):
|
| 110 |
+
if spatial_dims[index] is None:
|
| 111 |
+
continue
|
| 112 |
+
spatial_dims[index] -= sum(self.cropping[index])
|
| 113 |
+
if spatial_dims[index] <= 0:
|
| 114 |
+
raise ValueError(
|
| 115 |
+
"Values in `cropping` argument should be smaller than the "
|
| 116 |
+
"corresponding spatial dimension of the input. Received: "
|
| 117 |
+
f"input_shape={input_shape}, cropping={self.cropping}"
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
if self.data_format == "channels_first":
|
| 121 |
+
return (input_shape[0], input_shape[1], *spatial_dims)
|
| 122 |
+
else:
|
| 123 |
+
return (input_shape[0], *spatial_dims, input_shape[4])
|
| 124 |
+
|
| 125 |
+
def call(self, inputs):
|
| 126 |
+
if self.data_format == "channels_first":
|
| 127 |
+
spatial_dims = list(inputs.shape[2:5])
|
| 128 |
+
else:
|
| 129 |
+
spatial_dims = list(inputs.shape[1:4])
|
| 130 |
+
|
| 131 |
+
for index in range(0, 3):
|
| 132 |
+
if spatial_dims[index] is None:
|
| 133 |
+
continue
|
| 134 |
+
spatial_dims[index] -= sum(self.cropping[index])
|
| 135 |
+
if spatial_dims[index] <= 0:
|
| 136 |
+
raise ValueError(
|
| 137 |
+
"Values in `cropping` argument should be smaller than the "
|
| 138 |
+
"corresponding spatial dimension of the input. Received: "
|
| 139 |
+
f"inputs.shape={inputs.shape}, cropping={self.cropping}"
|
| 140 |
+
)
|
| 141 |
+
|
| 142 |
+
if self.data_format == "channels_first":
|
| 143 |
+
if (
|
| 144 |
+
self.cropping[0][1]
|
| 145 |
+
== self.cropping[1][1]
|
| 146 |
+
== self.cropping[2][1]
|
| 147 |
+
== 0
|
| 148 |
+
):
|
| 149 |
+
return inputs[
|
| 150 |
+
:,
|
| 151 |
+
:,
|
| 152 |
+
self.cropping[0][0] :,
|
| 153 |
+
self.cropping[1][0] :,
|
| 154 |
+
self.cropping[2][0] :,
|
| 155 |
+
]
|
| 156 |
+
elif self.cropping[0][1] == self.cropping[1][1] == 0:
|
| 157 |
+
return inputs[
|
| 158 |
+
:,
|
| 159 |
+
:,
|
| 160 |
+
self.cropping[0][0] :,
|
| 161 |
+
self.cropping[1][0] :,
|
| 162 |
+
self.cropping[2][0] : -self.cropping[2][1],
|
| 163 |
+
]
|
| 164 |
+
elif self.cropping[1][1] == self.cropping[2][1] == 0:
|
| 165 |
+
return inputs[
|
| 166 |
+
:,
|
| 167 |
+
:,
|
| 168 |
+
self.cropping[0][0] : -self.cropping[0][1],
|
| 169 |
+
self.cropping[1][0] :,
|
| 170 |
+
self.cropping[2][0] :,
|
| 171 |
+
]
|
| 172 |
+
elif self.cropping[0][1] == self.cropping[2][1] == 0:
|
| 173 |
+
return inputs[
|
| 174 |
+
:,
|
| 175 |
+
:,
|
| 176 |
+
self.cropping[0][0] :,
|
| 177 |
+
self.cropping[1][0] : -self.cropping[1][1],
|
| 178 |
+
self.cropping[2][0] :,
|
| 179 |
+
]
|
| 180 |
+
elif self.cropping[0][1] == 0:
|
| 181 |
+
return inputs[
|
| 182 |
+
:,
|
| 183 |
+
:,
|
| 184 |
+
self.cropping[0][0] :,
|
| 185 |
+
self.cropping[1][0] : -self.cropping[1][1],
|
| 186 |
+
self.cropping[2][0] : -self.cropping[2][1],
|
| 187 |
+
]
|
| 188 |
+
elif self.cropping[1][1] == 0:
|
| 189 |
+
return inputs[
|
| 190 |
+
:,
|
| 191 |
+
:,
|
| 192 |
+
self.cropping[0][0] : -self.cropping[0][1],
|
| 193 |
+
self.cropping[1][0] :,
|
| 194 |
+
self.cropping[2][0] : -self.cropping[2][1],
|
| 195 |
+
]
|
| 196 |
+
elif self.cropping[2][1] == 0:
|
| 197 |
+
return inputs[
|
| 198 |
+
:,
|
| 199 |
+
:,
|
| 200 |
+
self.cropping[0][0] : -self.cropping[0][1],
|
| 201 |
+
self.cropping[1][0] : -self.cropping[1][1],
|
| 202 |
+
self.cropping[2][0] :,
|
| 203 |
+
]
|
| 204 |
+
return inputs[
|
| 205 |
+
:,
|
| 206 |
+
:,
|
| 207 |
+
self.cropping[0][0] : -self.cropping[0][1],
|
| 208 |
+
self.cropping[1][0] : -self.cropping[1][1],
|
| 209 |
+
self.cropping[2][0] : -self.cropping[2][1],
|
| 210 |
+
]
|
| 211 |
+
else:
|
| 212 |
+
if (
|
| 213 |
+
self.cropping[0][1]
|
| 214 |
+
== self.cropping[1][1]
|
| 215 |
+
== self.cropping[2][1]
|
| 216 |
+
== 0
|
| 217 |
+
):
|
| 218 |
+
return inputs[
|
| 219 |
+
:,
|
| 220 |
+
self.cropping[0][0] :,
|
| 221 |
+
self.cropping[1][0] :,
|
| 222 |
+
self.cropping[2][0] :,
|
| 223 |
+
:,
|
| 224 |
+
]
|
| 225 |
+
elif self.cropping[0][1] == self.cropping[1][1] == 0:
|
| 226 |
+
return inputs[
|
| 227 |
+
:,
|
| 228 |
+
self.cropping[0][0] :,
|
| 229 |
+
self.cropping[1][0] :,
|
| 230 |
+
self.cropping[2][0] : -self.cropping[2][1],
|
| 231 |
+
:,
|
| 232 |
+
]
|
| 233 |
+
elif self.cropping[1][1] == self.cropping[2][1] == 0:
|
| 234 |
+
return inputs[
|
| 235 |
+
:,
|
| 236 |
+
self.cropping[0][0] : -self.cropping[0][1],
|
| 237 |
+
self.cropping[1][0] :,
|
| 238 |
+
self.cropping[2][0] :,
|
| 239 |
+
:,
|
| 240 |
+
]
|
| 241 |
+
elif self.cropping[0][1] == self.cropping[2][1] == 0:
|
| 242 |
+
return inputs[
|
| 243 |
+
:,
|
| 244 |
+
self.cropping[0][0] :,
|
| 245 |
+
self.cropping[1][0] : -self.cropping[1][1],
|
| 246 |
+
self.cropping[2][0] :,
|
| 247 |
+
:,
|
| 248 |
+
]
|
| 249 |
+
elif self.cropping[0][1] == 0:
|
| 250 |
+
return inputs[
|
| 251 |
+
:,
|
| 252 |
+
self.cropping[0][0] :,
|
| 253 |
+
self.cropping[1][0] : -self.cropping[1][1],
|
| 254 |
+
self.cropping[2][0] : -self.cropping[2][1],
|
| 255 |
+
:,
|
| 256 |
+
]
|
| 257 |
+
elif self.cropping[1][1] == 0:
|
| 258 |
+
return inputs[
|
| 259 |
+
:,
|
| 260 |
+
self.cropping[0][0] : -self.cropping[0][1],
|
| 261 |
+
self.cropping[1][0] :,
|
| 262 |
+
self.cropping[2][0] : -self.cropping[2][1],
|
| 263 |
+
:,
|
| 264 |
+
]
|
| 265 |
+
elif self.cropping[2][1] == 0:
|
| 266 |
+
return inputs[
|
| 267 |
+
:,
|
| 268 |
+
self.cropping[0][0] : -self.cropping[0][1],
|
| 269 |
+
self.cropping[1][0] : -self.cropping[1][1],
|
| 270 |
+
self.cropping[2][0] :,
|
| 271 |
+
:,
|
| 272 |
+
]
|
| 273 |
+
return inputs[
|
| 274 |
+
:,
|
| 275 |
+
self.cropping[0][0] : -self.cropping[0][1],
|
| 276 |
+
self.cropping[1][0] : -self.cropping[1][1],
|
| 277 |
+
self.cropping[2][0] : -self.cropping[2][1],
|
| 278 |
+
:,
|
| 279 |
+
]
|
| 280 |
+
|
| 281 |
+
def get_config(self):
|
| 282 |
+
config = {"cropping": self.cropping, "data_format": self.data_format}
|
| 283 |
+
base_config = super().get_config()
|
| 284 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/flatten.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
from keras.src import backend
|
| 4 |
+
from keras.src import ops
|
| 5 |
+
from keras.src.api_export import keras_export
|
| 6 |
+
from keras.src.backend.common.keras_tensor import KerasTensor
|
| 7 |
+
from keras.src.layers.input_spec import InputSpec
|
| 8 |
+
from keras.src.layers.layer import Layer
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@keras_export("keras.layers.Flatten")
|
| 12 |
+
class Flatten(Layer):
|
| 13 |
+
"""Flattens the input. Does not affect the batch size.
|
| 14 |
+
|
| 15 |
+
Note: If inputs are shaped `(batch,)` without a feature axis, then
|
| 16 |
+
flattening adds an extra channel dimension and output shape is `(batch, 1)`.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
data_format: A string, one of `"channels_last"` (default) or
|
| 20 |
+
`"channels_first"`. The ordering of the dimensions in the inputs.
|
| 21 |
+
`"channels_last"` corresponds to inputs with shape
|
| 22 |
+
`(batch, ..., channels)` while `"channels_first"` corresponds to
|
| 23 |
+
inputs with shape `(batch, channels, ...)`.
|
| 24 |
+
When unspecified, uses `image_data_format` value found in your Keras
|
| 25 |
+
config file at `~/.keras/keras.json` (if exists). Defaults to
|
| 26 |
+
`"channels_last"`.
|
| 27 |
+
|
| 28 |
+
Example:
|
| 29 |
+
|
| 30 |
+
>>> x = keras.Input(shape=(10, 64))
|
| 31 |
+
>>> y = keras.layers.Flatten()(x)
|
| 32 |
+
>>> y.shape
|
| 33 |
+
(None, 640)
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def __init__(self, data_format=None, **kwargs):
|
| 37 |
+
super().__init__(**kwargs)
|
| 38 |
+
self.data_format = backend.standardize_data_format(data_format)
|
| 39 |
+
self.input_spec = InputSpec(min_ndim=1)
|
| 40 |
+
self._channels_first = self.data_format == "channels_first"
|
| 41 |
+
|
| 42 |
+
def call(self, inputs):
|
| 43 |
+
input_shape = inputs.shape
|
| 44 |
+
rank = len(input_shape)
|
| 45 |
+
|
| 46 |
+
if self._channels_first and rank > 1:
|
| 47 |
+
# Switch to channels-last format.
|
| 48 |
+
inputs = ops.transpose(inputs, axes=(0, *range(2, rank), 1))
|
| 49 |
+
|
| 50 |
+
output_shape = tuple(
|
| 51 |
+
dim if dim is not None else -1
|
| 52 |
+
for dim in self.compute_output_shape(input_shape)
|
| 53 |
+
)
|
| 54 |
+
return ops.reshape(inputs, output_shape)
|
| 55 |
+
|
| 56 |
+
def compute_output_shape(self, input_shape):
|
| 57 |
+
non_batch_dims = input_shape[1:]
|
| 58 |
+
if len(non_batch_dims) == 0:
|
| 59 |
+
flattened_dim = 1
|
| 60 |
+
elif any(d is None for d in non_batch_dims):
|
| 61 |
+
# NB: we cannot use the shorter `None in non_batch_dims` here b/c
|
| 62 |
+
# torchdynamo errors when calling `__contains__` op with
|
| 63 |
+
# a constant (in this case `None`) operand since it assumes
|
| 64 |
+
# that the elements in the collection are also `ConstantVariable`s
|
| 65 |
+
# but tensor shapes can be `SymNodeVariable`s (e.g. `SymInt`)
|
| 66 |
+
flattened_dim = None
|
| 67 |
+
else:
|
| 68 |
+
flattened_dim = math.prod(non_batch_dims)
|
| 69 |
+
return (input_shape[0], flattened_dim)
|
| 70 |
+
|
| 71 |
+
def compute_output_spec(self, inputs):
|
| 72 |
+
output_shape = self.compute_output_shape(inputs.shape)
|
| 73 |
+
return KerasTensor(
|
| 74 |
+
shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
def get_config(self):
|
| 78 |
+
config = {"data_format": self.data_format}
|
| 79 |
+
base_config = super().get_config()
|
| 80 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/permute.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import ops
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.backend.common.keras_tensor import KerasTensor
|
| 4 |
+
from keras.src.layers.input_spec import InputSpec
|
| 5 |
+
from keras.src.layers.layer import Layer
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@keras_export("keras.layers.Permute")
|
| 9 |
+
class Permute(Layer):
|
| 10 |
+
"""Permutes the dimensions of the input according to a given pattern.
|
| 11 |
+
|
| 12 |
+
Useful e.g. connecting RNNs and convnets.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
dims: Tuple of integers. Permutation pattern does not include the
|
| 16 |
+
batch dimension. Indexing starts at 1.
|
| 17 |
+
For instance, `(1, 3, 2)` permutes the second and third dimensions
|
| 18 |
+
of the input.
|
| 19 |
+
|
| 20 |
+
Input shape:
|
| 21 |
+
Arbitrary.
|
| 22 |
+
|
| 23 |
+
Output shape:
|
| 24 |
+
Same as the input shape, but with the dimensions re-ordered according
|
| 25 |
+
to the specified pattern.
|
| 26 |
+
|
| 27 |
+
Example:
|
| 28 |
+
|
| 29 |
+
>>> x = keras.Input(shape=(10, 64))
|
| 30 |
+
>>> y = keras.layers.Permute((2, 1))(x)
|
| 31 |
+
>>> y.shape
|
| 32 |
+
(None, 64, 10)
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
def __init__(self, dims, **kwargs):
|
| 36 |
+
super().__init__(**kwargs)
|
| 37 |
+
self.dims = tuple(dims)
|
| 38 |
+
if sorted(dims) != list(range(1, len(dims) + 1)):
|
| 39 |
+
raise ValueError(
|
| 40 |
+
"Invalid permutation argument `dims` for Permute Layer. "
|
| 41 |
+
"The set of indices in `dims` must be consecutive and start "
|
| 42 |
+
f"from 1. Received dims={dims}"
|
| 43 |
+
)
|
| 44 |
+
self.input_spec = InputSpec(ndim=len(self.dims) + 1)
|
| 45 |
+
|
| 46 |
+
def compute_output_shape(self, input_shape):
|
| 47 |
+
output_shape = [input_shape[0]]
|
| 48 |
+
for dim in self.dims:
|
| 49 |
+
output_shape.append(input_shape[dim])
|
| 50 |
+
return tuple(output_shape)
|
| 51 |
+
|
| 52 |
+
def compute_output_spec(self, inputs):
|
| 53 |
+
output_shape = self.compute_output_shape(inputs.shape)
|
| 54 |
+
return KerasTensor(
|
| 55 |
+
shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
def call(self, inputs):
|
| 59 |
+
return ops.transpose(inputs, axes=(0,) + self.dims)
|
| 60 |
+
|
| 61 |
+
def get_config(self):
|
| 62 |
+
config = {"dims": self.dims}
|
| 63 |
+
base_config = super().get_config()
|
| 64 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/repeat_vector.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import ops
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.layers.input_spec import InputSpec
|
| 4 |
+
from keras.src.layers.layer import Layer
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@keras_export("keras.layers.RepeatVector")
|
| 8 |
+
class RepeatVector(Layer):
|
| 9 |
+
"""Repeats the input n times.
|
| 10 |
+
|
| 11 |
+
Example:
|
| 12 |
+
|
| 13 |
+
>>> x = keras.Input(shape=(32,))
|
| 14 |
+
>>> y = keras.layers.RepeatVector(3)(x)
|
| 15 |
+
>>> y.shape
|
| 16 |
+
(None, 3, 32)
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
n: Integer, repetition factor.
|
| 20 |
+
|
| 21 |
+
Input shape:
|
| 22 |
+
2D tensor with shape `(batch_size, features)`.
|
| 23 |
+
|
| 24 |
+
Output shape:
|
| 25 |
+
3D tensor with shape `(batch_size, n, features)`.
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
def __init__(self, n, **kwargs):
|
| 29 |
+
super().__init__(**kwargs)
|
| 30 |
+
self.n = n
|
| 31 |
+
if not isinstance(n, int):
|
| 32 |
+
raise TypeError(
|
| 33 |
+
f"Expected an integer value for `n`, got {type(n)}."
|
| 34 |
+
)
|
| 35 |
+
self.input_spec = InputSpec(ndim=2)
|
| 36 |
+
|
| 37 |
+
def compute_output_shape(self, input_shape):
|
| 38 |
+
return (input_shape[0], self.n, input_shape[1])
|
| 39 |
+
|
| 40 |
+
def call(self, inputs):
|
| 41 |
+
input_shape = ops.shape(inputs)
|
| 42 |
+
reshaped = ops.reshape(inputs, (input_shape[0], 1, input_shape[1]))
|
| 43 |
+
return ops.repeat(reshaped, self.n, axis=1)
|
| 44 |
+
|
| 45 |
+
def get_config(self):
|
| 46 |
+
config = {"n": self.n}
|
| 47 |
+
base_config = super().get_config()
|
| 48 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/reshape.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import ops
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.backend.common.keras_tensor import KerasTensor
|
| 4 |
+
from keras.src.layers.layer import Layer
|
| 5 |
+
from keras.src.ops import operation_utils
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@keras_export("keras.layers.Reshape")
|
| 9 |
+
class Reshape(Layer):
|
| 10 |
+
"""Layer that reshapes inputs into the given shape.
|
| 11 |
+
|
| 12 |
+
Args:
|
| 13 |
+
target_shape: Target shape. Tuple of integers, does not include the
|
| 14 |
+
samples dimension (batch size).
|
| 15 |
+
|
| 16 |
+
Input shape:
|
| 17 |
+
Arbitrary, although all dimensions in the input shape must be
|
| 18 |
+
known/fixed. Use the keyword argument `input_shape` (tuple of integers,
|
| 19 |
+
does not include the samples/batch size axis) when using this layer as
|
| 20 |
+
the first layer in a model.
|
| 21 |
+
|
| 22 |
+
Output shape:
|
| 23 |
+
`(batch_size, *target_shape)`
|
| 24 |
+
|
| 25 |
+
Example:
|
| 26 |
+
|
| 27 |
+
>>> x = keras.Input(shape=(12,))
|
| 28 |
+
>>> y = keras.layers.Reshape((3, 4))(x)
|
| 29 |
+
>>> y.shape
|
| 30 |
+
(None, 3, 4)
|
| 31 |
+
|
| 32 |
+
>>> # also supports shape inference using `-1` as dimension
|
| 33 |
+
>>> y = keras.layers.Reshape((-1, 2, 2))(x)
|
| 34 |
+
>>> y.shape
|
| 35 |
+
(None, 3, 2, 2)
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
def __init__(self, target_shape, **kwargs):
|
| 39 |
+
super().__init__(**kwargs)
|
| 40 |
+
self.target_shape = tuple(target_shape)
|
| 41 |
+
|
| 42 |
+
def compute_output_shape(self, input_shape):
|
| 43 |
+
return (
|
| 44 |
+
input_shape[0],
|
| 45 |
+
*operation_utils.compute_reshape_output_shape(
|
| 46 |
+
input_shape[1:], self.target_shape, "target_shape"
|
| 47 |
+
),
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
def compute_output_spec(self, inputs):
|
| 51 |
+
output_shape = self.compute_output_shape(inputs.shape)
|
| 52 |
+
return KerasTensor(
|
| 53 |
+
shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
def build(self, input_shape):
|
| 57 |
+
sample_output_shape = operation_utils.compute_reshape_output_shape(
|
| 58 |
+
input_shape[1:], self.target_shape, "target_shape"
|
| 59 |
+
)
|
| 60 |
+
self._resolved_target_shape = tuple(
|
| 61 |
+
-1 if d is None else d for d in sample_output_shape
|
| 62 |
+
)
|
| 63 |
+
self.built = True
|
| 64 |
+
|
| 65 |
+
def call(self, inputs):
|
| 66 |
+
return ops.reshape(
|
| 67 |
+
inputs, (ops.shape(inputs)[0],) + self._resolved_target_shape
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
def get_config(self):
|
| 71 |
+
config = {"target_shape": self.target_shape}
|
| 72 |
+
base_config = super().get_config()
|
| 73 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/up_sampling1d.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import ops
|
| 2 |
+
from keras.src.api_export import keras_export
|
| 3 |
+
from keras.src.layers.input_spec import InputSpec
|
| 4 |
+
from keras.src.layers.layer import Layer
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@keras_export("keras.layers.UpSampling1D")
|
| 8 |
+
class UpSampling1D(Layer):
|
| 9 |
+
"""Upsampling layer for 1D inputs.
|
| 10 |
+
|
| 11 |
+
Repeats each temporal step `size` times along the time axis.
|
| 12 |
+
|
| 13 |
+
Example:
|
| 14 |
+
|
| 15 |
+
>>> input_shape = (2, 2, 3)
|
| 16 |
+
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
|
| 17 |
+
>>> x
|
| 18 |
+
[[[ 0 1 2]
|
| 19 |
+
[ 3 4 5]]
|
| 20 |
+
[[ 6 7 8]
|
| 21 |
+
[ 9 10 11]]]
|
| 22 |
+
>>> y = keras.layers.UpSampling1D(size=2)(x)
|
| 23 |
+
>>> y
|
| 24 |
+
[[[ 0. 1. 2.]
|
| 25 |
+
[ 0. 1. 2.]
|
| 26 |
+
[ 3. 4. 5.]
|
| 27 |
+
[ 3. 4. 5.]]
|
| 28 |
+
[[ 6. 7. 8.]
|
| 29 |
+
[ 6. 7. 8.]
|
| 30 |
+
[ 9. 10. 11.]
|
| 31 |
+
[ 9. 10. 11.]]]
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
size: Integer. Upsampling factor.
|
| 35 |
+
|
| 36 |
+
Input shape:
|
| 37 |
+
3D tensor with shape: `(batch_size, steps, features)`.
|
| 38 |
+
|
| 39 |
+
Output shape:
|
| 40 |
+
3D tensor with shape: `(batch_size, upsampled_steps, features)`.
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
def __init__(self, size=2, **kwargs):
|
| 44 |
+
super().__init__(**kwargs)
|
| 45 |
+
self.size = int(size)
|
| 46 |
+
self.input_spec = InputSpec(ndim=3)
|
| 47 |
+
|
| 48 |
+
def compute_output_shape(self, input_shape):
|
| 49 |
+
size = (
|
| 50 |
+
self.size * input_shape[1] if input_shape[1] is not None else None
|
| 51 |
+
)
|
| 52 |
+
return [input_shape[0], size, input_shape[2]]
|
| 53 |
+
|
| 54 |
+
def call(self, inputs):
|
| 55 |
+
return ops.repeat(x=inputs, repeats=self.size, axis=1)
|
| 56 |
+
|
| 57 |
+
def get_config(self):
|
| 58 |
+
config = {"size": self.size}
|
| 59 |
+
base_config = super().get_config()
|
| 60 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/up_sampling2d.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src import ops
|
| 3 |
+
from keras.src.api_export import keras_export
|
| 4 |
+
from keras.src.layers.input_spec import InputSpec
|
| 5 |
+
from keras.src.layers.layer import Layer
|
| 6 |
+
from keras.src.utils import argument_validation
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@keras_export("keras.layers.UpSampling2D")
|
| 10 |
+
class UpSampling2D(Layer):
|
| 11 |
+
"""Upsampling layer for 2D inputs.
|
| 12 |
+
|
| 13 |
+
The implementation uses interpolative resizing, given the resize method
|
| 14 |
+
(specified by the `interpolation` argument). Use `interpolation=nearest`
|
| 15 |
+
to repeat the rows and columns of the data.
|
| 16 |
+
|
| 17 |
+
Example:
|
| 18 |
+
|
| 19 |
+
>>> input_shape = (2, 2, 1, 3)
|
| 20 |
+
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
|
| 21 |
+
>>> print(x)
|
| 22 |
+
[[[[ 0 1 2]]
|
| 23 |
+
[[ 3 4 5]]]
|
| 24 |
+
[[[ 6 7 8]]
|
| 25 |
+
[[ 9 10 11]]]]
|
| 26 |
+
>>> y = keras.layers.UpSampling2D(size=(1, 2))(x)
|
| 27 |
+
>>> print(y)
|
| 28 |
+
[[[[ 0 1 2]
|
| 29 |
+
[ 0 1 2]]
|
| 30 |
+
[[ 3 4 5]
|
| 31 |
+
[ 3 4 5]]]
|
| 32 |
+
[[[ 6 7 8]
|
| 33 |
+
[ 6 7 8]]
|
| 34 |
+
[[ 9 10 11]
|
| 35 |
+
[ 9 10 11]]]]
|
| 36 |
+
|
| 37 |
+
Args:
|
| 38 |
+
size: Int, or tuple of 2 integers.
|
| 39 |
+
The upsampling factors for rows and columns.
|
| 40 |
+
data_format: A string,
|
| 41 |
+
one of `"channels_last"` (default) or `"channels_first"`.
|
| 42 |
+
The ordering of the dimensions in the inputs.
|
| 43 |
+
`"channels_last"` corresponds to inputs with shape
|
| 44 |
+
`(batch_size, height, width, channels)` while `"channels_first"`
|
| 45 |
+
corresponds to inputs with shape
|
| 46 |
+
`(batch_size, channels, height, width)`.
|
| 47 |
+
When unspecified, uses
|
| 48 |
+
`image_data_format` value found in your Keras config file at
|
| 49 |
+
`~/.keras/keras.json` (if exists) else `"channels_last"`.
|
| 50 |
+
Defaults to `"channels_last"`.
|
| 51 |
+
interpolation: A string, one of `"bicubic"`, `"bilinear"`, `"lanczos3"`,
|
| 52 |
+
`"lanczos5"`, `"nearest"`.
|
| 53 |
+
|
| 54 |
+
Input shape:
|
| 55 |
+
4D tensor with shape:
|
| 56 |
+
- If `data_format` is `"channels_last"`:
|
| 57 |
+
`(batch_size, rows, cols, channels)`
|
| 58 |
+
- If `data_format` is `"channels_first"`:
|
| 59 |
+
`(batch_size, channels, rows, cols)`
|
| 60 |
+
|
| 61 |
+
Output shape:
|
| 62 |
+
4D tensor with shape:
|
| 63 |
+
- If `data_format` is `"channels_last"`:
|
| 64 |
+
`(batch_size, upsampled_rows, upsampled_cols, channels)`
|
| 65 |
+
- If `data_format` is `"channels_first"`:
|
| 66 |
+
`(batch_size, channels, upsampled_rows, upsampled_cols)`
|
| 67 |
+
"""
|
| 68 |
+
|
| 69 |
+
def __init__(
|
| 70 |
+
self, size=(2, 2), data_format=None, interpolation="nearest", **kwargs
|
| 71 |
+
):
|
| 72 |
+
super().__init__(**kwargs)
|
| 73 |
+
self.data_format = backend.standardize_data_format(data_format)
|
| 74 |
+
self.size = argument_validation.standardize_tuple(size, 2, "size")
|
| 75 |
+
self.interpolation = interpolation.lower()
|
| 76 |
+
self.input_spec = InputSpec(ndim=4)
|
| 77 |
+
|
| 78 |
+
def compute_output_shape(self, input_shape):
|
| 79 |
+
if self.data_format == "channels_first":
|
| 80 |
+
height = (
|
| 81 |
+
self.size[0] * input_shape[2]
|
| 82 |
+
if input_shape[2] is not None
|
| 83 |
+
else None
|
| 84 |
+
)
|
| 85 |
+
width = (
|
| 86 |
+
self.size[1] * input_shape[3]
|
| 87 |
+
if input_shape[3] is not None
|
| 88 |
+
else None
|
| 89 |
+
)
|
| 90 |
+
return (input_shape[0], input_shape[1], height, width)
|
| 91 |
+
else:
|
| 92 |
+
height = (
|
| 93 |
+
self.size[0] * input_shape[1]
|
| 94 |
+
if input_shape[1] is not None
|
| 95 |
+
else None
|
| 96 |
+
)
|
| 97 |
+
width = (
|
| 98 |
+
self.size[1] * input_shape[2]
|
| 99 |
+
if input_shape[2] is not None
|
| 100 |
+
else None
|
| 101 |
+
)
|
| 102 |
+
return (input_shape[0], height, width, input_shape[3])
|
| 103 |
+
|
| 104 |
+
def call(self, inputs):
|
| 105 |
+
return self._resize_images(
|
| 106 |
+
inputs,
|
| 107 |
+
self.size[0],
|
| 108 |
+
self.size[1],
|
| 109 |
+
self.data_format,
|
| 110 |
+
interpolation=self.interpolation,
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
def get_config(self):
|
| 114 |
+
config = {
|
| 115 |
+
"size": self.size,
|
| 116 |
+
"data_format": self.data_format,
|
| 117 |
+
"interpolation": self.interpolation,
|
| 118 |
+
}
|
| 119 |
+
base_config = super().get_config()
|
| 120 |
+
return {**base_config, **config}
|
| 121 |
+
|
| 122 |
+
def _resize_images(
|
| 123 |
+
self,
|
| 124 |
+
x,
|
| 125 |
+
height_factor,
|
| 126 |
+
width_factor,
|
| 127 |
+
data_format,
|
| 128 |
+
interpolation="nearest",
|
| 129 |
+
):
|
| 130 |
+
"""Resizes the images contained in a 4D tensor.
|
| 131 |
+
|
| 132 |
+
Args:
|
| 133 |
+
x: Tensor or variable to resize.
|
| 134 |
+
height_factor: Positive integer.
|
| 135 |
+
width_factor: Positive integer.
|
| 136 |
+
data_format: One of `"channels_first"`, `"channels_last"`.
|
| 137 |
+
interpolation: A string, one of `"bicubic"`, `"bilinear"`,
|
| 138 |
+
`"lanczos3"`, `"lanczos5"`, or `"nearest"`.
|
| 139 |
+
|
| 140 |
+
Returns:
|
| 141 |
+
A tensor.
|
| 142 |
+
"""
|
| 143 |
+
if data_format not in {"channels_last", "channels_first"}:
|
| 144 |
+
raise ValueError(f"Invalid `data_format` argument: {data_format}")
|
| 145 |
+
|
| 146 |
+
if data_format == "channels_first":
|
| 147 |
+
x = ops.transpose(x, [0, 2, 3, 1])
|
| 148 |
+
# https://github.com/keras-team/keras/issues/294
|
| 149 |
+
# Use `ops.repeat` for `nearest` interpolation to enable XLA
|
| 150 |
+
if interpolation == "nearest":
|
| 151 |
+
x = ops.repeat(x, height_factor, axis=1)
|
| 152 |
+
x = ops.repeat(x, width_factor, axis=2)
|
| 153 |
+
else:
|
| 154 |
+
# multiply the height and width factor on each dim
|
| 155 |
+
# by hand (versus using element-wise multiplication
|
| 156 |
+
# by np.array([height_factor, width_factor]) then
|
| 157 |
+
# list-ifying the tensor by calling `.tolist()`)
|
| 158 |
+
# since when running under torchdynamo, `new_shape`
|
| 159 |
+
# will be traced as a symbolic variable (specifically
|
| 160 |
+
# a `FakeTensor`) which does not have a `tolist()` method.
|
| 161 |
+
shape = ops.shape(x)
|
| 162 |
+
new_shape = (
|
| 163 |
+
shape[1] * height_factor,
|
| 164 |
+
shape[2] * width_factor,
|
| 165 |
+
)
|
| 166 |
+
x = ops.image.resize(x, new_shape, interpolation=interpolation)
|
| 167 |
+
if data_format == "channels_first":
|
| 168 |
+
x = ops.transpose(x, [0, 3, 1, 2])
|
| 169 |
+
|
| 170 |
+
return x
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/up_sampling3d.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src import ops
|
| 3 |
+
from keras.src.api_export import keras_export
|
| 4 |
+
from keras.src.layers.input_spec import InputSpec
|
| 5 |
+
from keras.src.layers.layer import Layer
|
| 6 |
+
from keras.src.utils import argument_validation
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@keras_export("keras.layers.UpSampling3D")
|
| 10 |
+
class UpSampling3D(Layer):
|
| 11 |
+
"""Upsampling layer for 3D inputs.
|
| 12 |
+
|
| 13 |
+
Repeats the 1st, 2nd and 3rd dimensions
|
| 14 |
+
of the data by `size[0]`, `size[1]` and `size[2]` respectively.
|
| 15 |
+
|
| 16 |
+
Example:
|
| 17 |
+
|
| 18 |
+
>>> input_shape = (2, 1, 2, 1, 3)
|
| 19 |
+
>>> x = np.ones(input_shape)
|
| 20 |
+
>>> y = keras.layers.UpSampling3D(size=(2, 2, 2))(x)
|
| 21 |
+
>>> y.shape
|
| 22 |
+
(2, 2, 4, 2, 3)
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
size: Int, or tuple of 3 integers.
|
| 26 |
+
The upsampling factors for dim1, dim2 and dim3.
|
| 27 |
+
data_format: A string,
|
| 28 |
+
one of `"channels_last"` (default) or `"channels_first"`.
|
| 29 |
+
The ordering of the dimensions in the inputs.
|
| 30 |
+
`"channels_last"` corresponds to inputs with shape
|
| 31 |
+
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
|
| 32 |
+
while `"channels_first"` corresponds to inputs with shape
|
| 33 |
+
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
|
| 34 |
+
When unspecified, uses
|
| 35 |
+
`image_data_format` value found in your Keras config file at
|
| 36 |
+
`~/.keras/keras.json` (if exists) else `"channels_last"`.
|
| 37 |
+
Defaults to `"channels_last"`.
|
| 38 |
+
|
| 39 |
+
Input shape:
|
| 40 |
+
5D tensor with shape:
|
| 41 |
+
- If `data_format` is `"channels_last"`:
|
| 42 |
+
`(batch_size, dim1, dim2, dim3, channels)`
|
| 43 |
+
- If `data_format` is `"channels_first"`:
|
| 44 |
+
`(batch_size, channels, dim1, dim2, dim3)`
|
| 45 |
+
|
| 46 |
+
Output shape:
|
| 47 |
+
5D tensor with shape:
|
| 48 |
+
- If `data_format` is `"channels_last"`:
|
| 49 |
+
`(batch_size, upsampled_dim1, upsampled_dim2, upsampled_dim3,
|
| 50 |
+
channels)`
|
| 51 |
+
- If `data_format` is `"channels_first"`:
|
| 52 |
+
`(batch_size, channels, upsampled_dim1, upsampled_dim2,
|
| 53 |
+
upsampled_dim3)`
|
| 54 |
+
"""
|
| 55 |
+
|
| 56 |
+
def __init__(self, size=(2, 2, 2), data_format=None, **kwargs):
|
| 57 |
+
super().__init__(**kwargs)
|
| 58 |
+
self.data_format = backend.standardize_data_format(data_format)
|
| 59 |
+
self.size = argument_validation.standardize_tuple(size, 3, "size")
|
| 60 |
+
self.input_spec = InputSpec(ndim=5)
|
| 61 |
+
|
| 62 |
+
def compute_output_shape(self, input_shape):
|
| 63 |
+
if self.data_format == "channels_first":
|
| 64 |
+
dim1 = (
|
| 65 |
+
self.size[0] * input_shape[2]
|
| 66 |
+
if input_shape[2] is not None
|
| 67 |
+
else None
|
| 68 |
+
)
|
| 69 |
+
dim2 = (
|
| 70 |
+
self.size[1] * input_shape[3]
|
| 71 |
+
if input_shape[3] is not None
|
| 72 |
+
else None
|
| 73 |
+
)
|
| 74 |
+
dim3 = (
|
| 75 |
+
self.size[2] * input_shape[4]
|
| 76 |
+
if input_shape[4] is not None
|
| 77 |
+
else None
|
| 78 |
+
)
|
| 79 |
+
return (input_shape[0], input_shape[1], dim1, dim2, dim3)
|
| 80 |
+
else:
|
| 81 |
+
dim1 = (
|
| 82 |
+
self.size[0] * input_shape[1]
|
| 83 |
+
if input_shape[1] is not None
|
| 84 |
+
else None
|
| 85 |
+
)
|
| 86 |
+
dim2 = (
|
| 87 |
+
self.size[1] * input_shape[2]
|
| 88 |
+
if input_shape[2] is not None
|
| 89 |
+
else None
|
| 90 |
+
)
|
| 91 |
+
dim3 = (
|
| 92 |
+
self.size[2] * input_shape[3]
|
| 93 |
+
if input_shape[3] is not None
|
| 94 |
+
else None
|
| 95 |
+
)
|
| 96 |
+
return (input_shape[0], dim1, dim2, dim3, input_shape[4])
|
| 97 |
+
|
| 98 |
+
def call(self, inputs):
|
| 99 |
+
return self._resize_volumes(
|
| 100 |
+
inputs, self.size[0], self.size[1], self.size[2], self.data_format
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
def get_config(self):
|
| 104 |
+
config = {"size": self.size, "data_format": self.data_format}
|
| 105 |
+
base_config = super().get_config()
|
| 106 |
+
return {**base_config, **config}
|
| 107 |
+
|
| 108 |
+
def _resize_volumes(
|
| 109 |
+
self, x, depth_factor, height_factor, width_factor, data_format
|
| 110 |
+
):
|
| 111 |
+
"""Resizes the volume contained in a 5D tensor.
|
| 112 |
+
|
| 113 |
+
Args:
|
| 114 |
+
x: Tensor or variable to resize.
|
| 115 |
+
depth_factor: Positive integer.
|
| 116 |
+
height_factor: Positive integer.
|
| 117 |
+
width_factor: Positive integer.
|
| 118 |
+
data_format: One of `"channels_first"`, `"channels_last"`.
|
| 119 |
+
|
| 120 |
+
Returns:
|
| 121 |
+
Resized tensor.
|
| 122 |
+
"""
|
| 123 |
+
if data_format == "channels_first":
|
| 124 |
+
output = ops.repeat(x, depth_factor, axis=2)
|
| 125 |
+
output = ops.repeat(output, height_factor, axis=3)
|
| 126 |
+
output = ops.repeat(output, width_factor, axis=4)
|
| 127 |
+
return output
|
| 128 |
+
elif data_format == "channels_last":
|
| 129 |
+
output = ops.repeat(x, depth_factor, axis=1)
|
| 130 |
+
output = ops.repeat(output, height_factor, axis=2)
|
| 131 |
+
output = ops.repeat(output, width_factor, axis=3)
|
| 132 |
+
return output
|
| 133 |
+
else:
|
| 134 |
+
raise ValueError(f"Invalid data_format: {data_format}")
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/zero_padding1d.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src import ops
|
| 3 |
+
from keras.src.api_export import keras_export
|
| 4 |
+
from keras.src.layers.input_spec import InputSpec
|
| 5 |
+
from keras.src.layers.layer import Layer
|
| 6 |
+
from keras.src.utils import argument_validation
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@keras_export("keras.layers.ZeroPadding1D")
|
| 10 |
+
class ZeroPadding1D(Layer):
|
| 11 |
+
"""Zero-padding layer for 1D input (e.g. temporal sequence).
|
| 12 |
+
|
| 13 |
+
Example:
|
| 14 |
+
|
| 15 |
+
>>> input_shape = (2, 2, 3)
|
| 16 |
+
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
|
| 17 |
+
>>> x
|
| 18 |
+
[[[ 0 1 2]
|
| 19 |
+
[ 3 4 5]]
|
| 20 |
+
[[ 6 7 8]
|
| 21 |
+
[ 9 10 11]]]
|
| 22 |
+
>>> y = keras.layers.ZeroPadding1D(padding=2)(x)
|
| 23 |
+
>>> y
|
| 24 |
+
[[[ 0 0 0]
|
| 25 |
+
[ 0 0 0]
|
| 26 |
+
[ 0 1 2]
|
| 27 |
+
[ 3 4 5]
|
| 28 |
+
[ 0 0 0]
|
| 29 |
+
[ 0 0 0]]
|
| 30 |
+
[[ 0 0 0]
|
| 31 |
+
[ 0 0 0]
|
| 32 |
+
[ 6 7 8]
|
| 33 |
+
[ 9 10 11]
|
| 34 |
+
[ 0 0 0]
|
| 35 |
+
[ 0 0 0]]]
|
| 36 |
+
|
| 37 |
+
Args:
|
| 38 |
+
padding: Int, or tuple of int (length 2), or dictionary.
|
| 39 |
+
- If int: how many zeros to add at the beginning and end of
|
| 40 |
+
the padding dimension (axis 1).
|
| 41 |
+
- If tuple of 2 ints: how many zeros to add at the beginning and the
|
| 42 |
+
end of the padding dimension (`(left_pad, right_pad)`).
|
| 43 |
+
data_format: A string, one of `"channels_last"` (default) or
|
| 44 |
+
`"channels_first"`. The ordering of the dimensions in the inputs.
|
| 45 |
+
`"channels_last"` corresponds to inputs with shape
|
| 46 |
+
`(batch_size, axis_to_pad, channels)` while `"channels_first"`
|
| 47 |
+
corresponds to inputs with shape
|
| 48 |
+
`(batch_size, channels, axis_to_pad)`.
|
| 49 |
+
When unspecified, uses `image_data_format` value found in your Keras
|
| 50 |
+
config file at `~/.keras/keras.json` (if exists). Defaults to
|
| 51 |
+
`"channels_last"`.
|
| 52 |
+
|
| 53 |
+
Input shape:
|
| 54 |
+
3D tensor with shape:
|
| 55 |
+
- If `data_format` is `"channels_last"`:
|
| 56 |
+
`(batch_size, axis_to_pad, features)`
|
| 57 |
+
- If `data_format` is `"channels_first"`:
|
| 58 |
+
`(batch_size, features, axis_to_pad)`
|
| 59 |
+
|
| 60 |
+
Output shape:
|
| 61 |
+
3D tensor with shape:
|
| 62 |
+
- If `data_format` is `"channels_last"`:
|
| 63 |
+
`(batch_size, padded_axis, features)`
|
| 64 |
+
- If `data_format` is `"channels_first"`:
|
| 65 |
+
`(batch_size, features, padded_axis)`
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
def __init__(self, padding=1, data_format=None, **kwargs):
|
| 69 |
+
super().__init__(**kwargs)
|
| 70 |
+
self.data_format = backend.standardize_data_format(data_format)
|
| 71 |
+
self.padding = argument_validation.standardize_tuple(
|
| 72 |
+
padding, 2, "padding", allow_zero=True
|
| 73 |
+
)
|
| 74 |
+
self.input_spec = InputSpec(ndim=3)
|
| 75 |
+
|
| 76 |
+
def compute_output_shape(self, input_shape):
|
| 77 |
+
output_shape = list(input_shape)
|
| 78 |
+
padding_dim = 2 if self.data_format == "channels_first" else 1
|
| 79 |
+
if output_shape[padding_dim] is not None:
|
| 80 |
+
output_shape[padding_dim] += self.padding[0] + self.padding[1]
|
| 81 |
+
return tuple(output_shape)
|
| 82 |
+
|
| 83 |
+
def call(self, inputs):
|
| 84 |
+
if self.data_format == "channels_first":
|
| 85 |
+
all_dims_padding = ((0, 0), (0, 0), self.padding)
|
| 86 |
+
else:
|
| 87 |
+
all_dims_padding = ((0, 0), self.padding, (0, 0))
|
| 88 |
+
return ops.pad(inputs, all_dims_padding)
|
| 89 |
+
|
| 90 |
+
def get_config(self):
|
| 91 |
+
config = {"padding": self.padding, "data_format": self.data_format}
|
| 92 |
+
base_config = super().get_config()
|
| 93 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/zero_padding2d.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src import ops
|
| 3 |
+
from keras.src.api_export import keras_export
|
| 4 |
+
from keras.src.layers.input_spec import InputSpec
|
| 5 |
+
from keras.src.layers.layer import Layer
|
| 6 |
+
from keras.src.utils import argument_validation
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@keras_export("keras.layers.ZeroPadding2D")
|
| 10 |
+
class ZeroPadding2D(Layer):
|
| 11 |
+
"""Zero-padding layer for 2D input (e.g. picture).
|
| 12 |
+
|
| 13 |
+
This layer can add rows and columns of zeros at the top, bottom, left and
|
| 14 |
+
right side of an image tensor.
|
| 15 |
+
|
| 16 |
+
Example:
|
| 17 |
+
|
| 18 |
+
>>> input_shape = (1, 1, 2, 2)
|
| 19 |
+
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
|
| 20 |
+
>>> x
|
| 21 |
+
[[[[0 1]
|
| 22 |
+
[2 3]]]]
|
| 23 |
+
>>> y = keras.layers.ZeroPadding2D(padding=1)(x)
|
| 24 |
+
>>> y
|
| 25 |
+
[[[[0 0]
|
| 26 |
+
[0 0]
|
| 27 |
+
[0 0]
|
| 28 |
+
[0 0]]
|
| 29 |
+
[[0 0]
|
| 30 |
+
[0 1]
|
| 31 |
+
[2 3]
|
| 32 |
+
[0 0]]
|
| 33 |
+
[[0 0]
|
| 34 |
+
[0 0]
|
| 35 |
+
[0 0]
|
| 36 |
+
[0 0]]]]
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
padding: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
|
| 40 |
+
- If int: the same symmetric padding is applied to height and width.
|
| 41 |
+
- If tuple of 2 ints: interpreted as two different symmetric padding
|
| 42 |
+
values for height and width:
|
| 43 |
+
`(symmetric_height_pad, symmetric_width_pad)`.
|
| 44 |
+
- If tuple of 2 tuples of 2 ints: interpreted as
|
| 45 |
+
`((top_pad, bottom_pad), (left_pad, right_pad))`.
|
| 46 |
+
data_format: A string, one of `"channels_last"` (default) or
|
| 47 |
+
`"channels_first"`. The ordering of the dimensions in the inputs.
|
| 48 |
+
`"channels_last"` corresponds to inputs with shape
|
| 49 |
+
`(batch_size, height, width, channels)` while `"channels_first"`
|
| 50 |
+
corresponds to inputs with shape
|
| 51 |
+
`(batch_size, channels, height, width)`.
|
| 52 |
+
When unspecified, uses `image_data_format` value found in your Keras
|
| 53 |
+
config file at `~/.keras/keras.json` (if exists). Defaults to
|
| 54 |
+
`"channels_last"`.
|
| 55 |
+
|
| 56 |
+
Input shape:
|
| 57 |
+
4D tensor with shape:
|
| 58 |
+
- If `data_format` is `"channels_last"`:
|
| 59 |
+
`(batch_size, height, width, channels)`
|
| 60 |
+
- If `data_format` is `"channels_first"`:
|
| 61 |
+
`(batch_size, channels, height, width)`
|
| 62 |
+
|
| 63 |
+
Output shape:
|
| 64 |
+
4D tensor with shape:
|
| 65 |
+
- If `data_format` is `"channels_last"`:
|
| 66 |
+
`(batch_size, padded_height, padded_width, channels)`
|
| 67 |
+
- If `data_format` is `"channels_first"`:
|
| 68 |
+
`(batch_size, channels, padded_height, padded_width)`
|
| 69 |
+
"""
|
| 70 |
+
|
| 71 |
+
def __init__(self, padding=(1, 1), data_format=None, **kwargs):
|
| 72 |
+
super().__init__(**kwargs)
|
| 73 |
+
self.data_format = backend.standardize_data_format(data_format)
|
| 74 |
+
if isinstance(padding, int):
|
| 75 |
+
self.padding = ((padding, padding), (padding, padding))
|
| 76 |
+
elif hasattr(padding, "__len__"):
|
| 77 |
+
if len(padding) != 2:
|
| 78 |
+
raise ValueError(
|
| 79 |
+
"`padding` should have two elements. "
|
| 80 |
+
f"Received: padding={padding}."
|
| 81 |
+
)
|
| 82 |
+
height_padding = argument_validation.standardize_tuple(
|
| 83 |
+
padding[0], 2, "1st entry of padding", allow_zero=True
|
| 84 |
+
)
|
| 85 |
+
width_padding = argument_validation.standardize_tuple(
|
| 86 |
+
padding[1], 2, "2nd entry of padding", allow_zero=True
|
| 87 |
+
)
|
| 88 |
+
self.padding = (height_padding, width_padding)
|
| 89 |
+
else:
|
| 90 |
+
raise ValueError(
|
| 91 |
+
"`padding` should be either an int, a tuple of 2 ints "
|
| 92 |
+
"(symmetric_height_crop, symmetric_width_crop), "
|
| 93 |
+
"or a tuple of 2 tuples of 2 ints "
|
| 94 |
+
"((top_crop, bottom_crop), (left_crop, right_crop)). "
|
| 95 |
+
f"Received: padding={padding}."
|
| 96 |
+
)
|
| 97 |
+
self.input_spec = InputSpec(ndim=4)
|
| 98 |
+
|
| 99 |
+
def compute_output_shape(self, input_shape):
|
| 100 |
+
output_shape = list(input_shape)
|
| 101 |
+
spatial_dims_offset = 2 if self.data_format == "channels_first" else 1
|
| 102 |
+
for index in range(0, 2):
|
| 103 |
+
if output_shape[index + spatial_dims_offset] is not None:
|
| 104 |
+
output_shape[index + spatial_dims_offset] += (
|
| 105 |
+
self.padding[index][0] + self.padding[index][1]
|
| 106 |
+
)
|
| 107 |
+
return tuple(output_shape)
|
| 108 |
+
|
| 109 |
+
def call(self, inputs):
|
| 110 |
+
if self.data_format == "channels_first":
|
| 111 |
+
all_dims_padding = ((0, 0), (0, 0), *self.padding)
|
| 112 |
+
else:
|
| 113 |
+
all_dims_padding = ((0, 0), *self.padding, (0, 0))
|
| 114 |
+
return ops.pad(inputs, all_dims_padding)
|
| 115 |
+
|
| 116 |
+
def get_config(self):
|
| 117 |
+
config = {"padding": self.padding, "data_format": self.data_format}
|
| 118 |
+
base_config = super().get_config()
|
| 119 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/zero_padding3d.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src import ops
|
| 3 |
+
from keras.src.api_export import keras_export
|
| 4 |
+
from keras.src.layers.input_spec import InputSpec
|
| 5 |
+
from keras.src.layers.layer import Layer
|
| 6 |
+
from keras.src.utils import argument_validation
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@keras_export("keras.layers.ZeroPadding3D")
|
| 10 |
+
class ZeroPadding3D(Layer):
|
| 11 |
+
"""Zero-padding layer for 3D data (spatial or spatio-temporal).
|
| 12 |
+
|
| 13 |
+
Example:
|
| 14 |
+
|
| 15 |
+
>>> input_shape = (1, 1, 2, 2, 3)
|
| 16 |
+
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
|
| 17 |
+
>>> y = keras.layers.ZeroPadding3D(padding=2)(x)
|
| 18 |
+
>>> y.shape
|
| 19 |
+
(1, 5, 6, 6, 3)
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
padding: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.
|
| 23 |
+
- If int: the same symmetric padding is applied to depth, height,
|
| 24 |
+
and width.
|
| 25 |
+
- If tuple of 3 ints: interpreted as three different symmetric
|
| 26 |
+
padding values for depth, height, and width:
|
| 27 |
+
`(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`.
|
| 28 |
+
- If tuple of 3 tuples of 2 ints: interpreted as
|
| 29 |
+
`((left_dim1_pad, right_dim1_pad), (left_dim2_pad,
|
| 30 |
+
right_dim2_pad), (left_dim3_pad, right_dim3_pad))`.
|
| 31 |
+
data_format: A string, one of `"channels_last"` (default) or
|
| 32 |
+
`"channels_first"`. The ordering of the dimensions in the inputs.
|
| 33 |
+
`"channels_last"` corresponds to inputs with shape
|
| 34 |
+
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
|
| 35 |
+
while `"channels_first"` corresponds to inputs with shape
|
| 36 |
+
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
|
| 37 |
+
When unspecified, uses `image_data_format` value found in your Keras
|
| 38 |
+
config file at `~/.keras/keras.json` (if exists). Defaults to
|
| 39 |
+
`"channels_last"`.
|
| 40 |
+
|
| 41 |
+
Input shape:
|
| 42 |
+
5D tensor with shape:
|
| 43 |
+
- If `data_format` is `"channels_last"`:
|
| 44 |
+
`(batch_size, first_axis_to_pad, second_axis_to_pad,
|
| 45 |
+
third_axis_to_pad, depth)`
|
| 46 |
+
- If `data_format` is `"channels_first"`:
|
| 47 |
+
`(batch_size, depth, first_axis_to_pad, second_axis_to_pad,
|
| 48 |
+
third_axis_to_pad)`
|
| 49 |
+
|
| 50 |
+
Output shape:
|
| 51 |
+
5D tensor with shape:
|
| 52 |
+
- If `data_format` is `"channels_last"`:
|
| 53 |
+
`(batch_size, first_padded_axis, second_padded_axis,
|
| 54 |
+
third_axis_to_pad, depth)`
|
| 55 |
+
- If `data_format` is `"channels_first"`:
|
| 56 |
+
`(batch_size, depth, first_padded_axis, second_padded_axis,
|
| 57 |
+
third_axis_to_pad)`
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
def __init__(
|
| 61 |
+
self, padding=((1, 1), (1, 1), (1, 1)), data_format=None, **kwargs
|
| 62 |
+
):
|
| 63 |
+
super().__init__(**kwargs)
|
| 64 |
+
self.data_format = backend.standardize_data_format(data_format)
|
| 65 |
+
if isinstance(padding, int):
|
| 66 |
+
self.padding = (
|
| 67 |
+
(padding, padding),
|
| 68 |
+
(padding, padding),
|
| 69 |
+
(padding, padding),
|
| 70 |
+
)
|
| 71 |
+
elif hasattr(padding, "__len__"):
|
| 72 |
+
if len(padding) != 3:
|
| 73 |
+
raise ValueError(
|
| 74 |
+
f"`padding` should have 3 elements. Received: {padding}."
|
| 75 |
+
)
|
| 76 |
+
dim1_padding = argument_validation.standardize_tuple(
|
| 77 |
+
padding[0], 2, "1st entry of padding", allow_zero=True
|
| 78 |
+
)
|
| 79 |
+
dim2_padding = argument_validation.standardize_tuple(
|
| 80 |
+
padding[1], 2, "2nd entry of padding", allow_zero=True
|
| 81 |
+
)
|
| 82 |
+
dim3_padding = argument_validation.standardize_tuple(
|
| 83 |
+
padding[2], 2, "3rd entry of padding", allow_zero=True
|
| 84 |
+
)
|
| 85 |
+
self.padding = (dim1_padding, dim2_padding, dim3_padding)
|
| 86 |
+
else:
|
| 87 |
+
raise ValueError(
|
| 88 |
+
"`padding` should be either an int, a tuple of 3 ints "
|
| 89 |
+
"(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad), "
|
| 90 |
+
"or a tuple of 3 tuples of 2 ints "
|
| 91 |
+
"((left_dim1_pad, right_dim1_pad),"
|
| 92 |
+
" (left_dim2_pad, right_dim2_pad),"
|
| 93 |
+
" (left_dim3_pad, right_dim2_pad)). "
|
| 94 |
+
f"Received: padding={padding}."
|
| 95 |
+
)
|
| 96 |
+
self.input_spec = InputSpec(ndim=5)
|
| 97 |
+
|
| 98 |
+
def compute_output_shape(self, input_shape):
|
| 99 |
+
output_shape = list(input_shape)
|
| 100 |
+
spatial_dims_offset = 2 if self.data_format == "channels_first" else 1
|
| 101 |
+
for index in range(0, 3):
|
| 102 |
+
if output_shape[index + spatial_dims_offset] is not None:
|
| 103 |
+
output_shape[index + spatial_dims_offset] += (
|
| 104 |
+
self.padding[index][0] + self.padding[index][1]
|
| 105 |
+
)
|
| 106 |
+
return tuple(output_shape)
|
| 107 |
+
|
| 108 |
+
def call(self, inputs):
|
| 109 |
+
if self.data_format == "channels_first":
|
| 110 |
+
all_dims_padding = ((0, 0), (0, 0), *self.padding)
|
| 111 |
+
else:
|
| 112 |
+
all_dims_padding = ((0, 0), *self.padding, (0, 0))
|
| 113 |
+
return ops.pad(inputs, all_dims_padding)
|
| 114 |
+
|
| 115 |
+
def get_config(self):
|
| 116 |
+
config = {"padding": self.padding, "data_format": self.data_format}
|
| 117 |
+
base_config = super().get_config()
|
| 118 |
+
return {**base_config, **config}
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (196 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/bidirectional.cpython-310.pyc
ADDED
|
Binary file (9.87 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/conv_lstm.cpython-310.pyc
ADDED
|
Binary file (21.7 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/conv_lstm1d.cpython-310.pyc
ADDED
|
Binary file (7.87 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/conv_lstm2d.cpython-310.pyc
ADDED
|
Binary file (7.95 kB). View file
|
|
|