diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..59f241cbaf23b1cf21c76294cf9695ae69691f70 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/__init__.py @@ -0,0 +1,234 @@ +from keras.src.api_export import keras_export +from keras.src.layers.activations.activation import Activation +from keras.src.layers.activations.elu import ELU +from keras.src.layers.activations.leaky_relu import LeakyReLU +from keras.src.layers.activations.prelu import PReLU +from keras.src.layers.activations.relu import ReLU +from keras.src.layers.activations.softmax import Softmax +from keras.src.layers.attention.additive_attention import AdditiveAttention +from keras.src.layers.attention.attention import Attention +from keras.src.layers.attention.grouped_query_attention import ( + GroupedQueryAttention, +) +from keras.src.layers.attention.multi_head_attention import MultiHeadAttention +from keras.src.layers.convolutional.conv1d import Conv1D +from keras.src.layers.convolutional.conv1d_transpose import Conv1DTranspose +from keras.src.layers.convolutional.conv2d import Conv2D +from keras.src.layers.convolutional.conv2d_transpose import Conv2DTranspose +from keras.src.layers.convolutional.conv3d import Conv3D +from keras.src.layers.convolutional.conv3d_transpose import Conv3DTranspose +from keras.src.layers.convolutional.depthwise_conv1d import DepthwiseConv1D +from keras.src.layers.convolutional.depthwise_conv2d import DepthwiseConv2D +from keras.src.layers.convolutional.separable_conv1d import SeparableConv1D +from keras.src.layers.convolutional.separable_conv2d import SeparableConv2D +from keras.src.layers.core.dense import Dense +from keras.src.layers.core.einsum_dense import EinsumDense +from keras.src.layers.core.embedding import Embedding +from keras.src.layers.core.identity import Identity +from keras.src.layers.core.input_layer import Input +from keras.src.layers.core.input_layer import InputLayer +from keras.src.layers.core.lambda_layer import Lambda +from keras.src.layers.core.masking import Masking +from keras.src.layers.core.wrapper import Wrapper +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.layers.merging.add import Add +from keras.src.layers.merging.add import add +from keras.src.layers.merging.average import Average +from keras.src.layers.merging.average import average +from keras.src.layers.merging.concatenate import Concatenate +from keras.src.layers.merging.concatenate import concatenate +from keras.src.layers.merging.dot import Dot +from keras.src.layers.merging.dot import dot +from keras.src.layers.merging.maximum import Maximum +from keras.src.layers.merging.maximum import maximum +from keras.src.layers.merging.minimum import Minimum +from keras.src.layers.merging.minimum import minimum +from keras.src.layers.merging.multiply import Multiply +from keras.src.layers.merging.multiply import multiply +from keras.src.layers.merging.subtract import Subtract +from keras.src.layers.merging.subtract import subtract +from keras.src.layers.normalization.batch_normalization import ( + BatchNormalization, +) +from keras.src.layers.normalization.group_normalization import ( + GroupNormalization, +) +from keras.src.layers.normalization.layer_normalization import ( + LayerNormalization, +) +from keras.src.layers.normalization.spectral_normalization import ( + SpectralNormalization, +) +from keras.src.layers.normalization.unit_normalization import UnitNormalization +from keras.src.layers.pooling.average_pooling1d import AveragePooling1D +from keras.src.layers.pooling.average_pooling2d import AveragePooling2D +from keras.src.layers.pooling.average_pooling3d import AveragePooling3D +from keras.src.layers.pooling.global_average_pooling1d import ( + GlobalAveragePooling1D, +) +from keras.src.layers.pooling.global_average_pooling2d import ( + GlobalAveragePooling2D, +) +from keras.src.layers.pooling.global_average_pooling3d import ( + GlobalAveragePooling3D, +) +from keras.src.layers.pooling.global_max_pooling1d import GlobalMaxPooling1D +from keras.src.layers.pooling.global_max_pooling2d import GlobalMaxPooling2D +from keras.src.layers.pooling.global_max_pooling3d import GlobalMaxPooling3D +from keras.src.layers.pooling.max_pooling1d import MaxPooling1D +from keras.src.layers.pooling.max_pooling2d import MaxPooling2D +from keras.src.layers.pooling.max_pooling3d import MaxPooling3D +from keras.src.layers.preprocessing.category_encoding import CategoryEncoding +from keras.src.layers.preprocessing.discretization import Discretization +from keras.src.layers.preprocessing.hashed_crossing import HashedCrossing +from keras.src.layers.preprocessing.hashing import Hashing +from keras.src.layers.preprocessing.image_preprocessing.auto_contrast import ( + AutoContrast, +) +from keras.src.layers.preprocessing.image_preprocessing.center_crop import ( + CenterCrop, +) +from keras.src.layers.preprocessing.image_preprocessing.equalization import ( + Equalization, +) +from keras.src.layers.preprocessing.image_preprocessing.max_num_bounding_box import ( + MaxNumBoundingBoxes, +) +from keras.src.layers.preprocessing.image_preprocessing.mix_up import MixUp +from keras.src.layers.preprocessing.image_preprocessing.rand_augment import ( + RandAugment, +) +from keras.src.layers.preprocessing.image_preprocessing.random_brightness import ( + RandomBrightness, +) +from keras.src.layers.preprocessing.image_preprocessing.random_color_degeneration import ( + RandomColorDegeneration, +) +from keras.src.layers.preprocessing.image_preprocessing.random_color_jitter import ( + RandomColorJitter, +) +from keras.src.layers.preprocessing.image_preprocessing.random_contrast import ( + RandomContrast, +) +from keras.src.layers.preprocessing.image_preprocessing.random_crop import ( + RandomCrop, +) +from keras.src.layers.preprocessing.image_preprocessing.random_flip import ( + RandomFlip, +) +from keras.src.layers.preprocessing.image_preprocessing.random_grayscale import ( + RandomGrayscale, +) +from keras.src.layers.preprocessing.image_preprocessing.random_hue import ( + RandomHue, +) +from keras.src.layers.preprocessing.image_preprocessing.random_posterization import ( + RandomPosterization, +) +from keras.src.layers.preprocessing.image_preprocessing.random_rotation import ( + RandomRotation, +) +from keras.src.layers.preprocessing.image_preprocessing.random_saturation import ( + RandomSaturation, +) +from keras.src.layers.preprocessing.image_preprocessing.random_sharpness import ( + RandomSharpness, +) +from keras.src.layers.preprocessing.image_preprocessing.random_shear import ( + RandomShear, +) +from keras.src.layers.preprocessing.image_preprocessing.random_translation import ( + RandomTranslation, +) +from keras.src.layers.preprocessing.image_preprocessing.random_zoom import ( + RandomZoom, +) +from keras.src.layers.preprocessing.image_preprocessing.resizing import Resizing +from keras.src.layers.preprocessing.image_preprocessing.solarization import ( + Solarization, +) +from keras.src.layers.preprocessing.index_lookup import IndexLookup +from keras.src.layers.preprocessing.integer_lookup import IntegerLookup +from keras.src.layers.preprocessing.mel_spectrogram import MelSpectrogram +from keras.src.layers.preprocessing.normalization import Normalization +from keras.src.layers.preprocessing.pipeline import Pipeline +from keras.src.layers.preprocessing.rescaling import Rescaling +from keras.src.layers.preprocessing.stft_spectrogram import STFTSpectrogram +from keras.src.layers.preprocessing.string_lookup import StringLookup +from keras.src.layers.preprocessing.text_vectorization import TextVectorization +from keras.src.layers.regularization.activity_regularization import ( + ActivityRegularization, +) +from keras.src.layers.regularization.alpha_dropout import AlphaDropout +from keras.src.layers.regularization.dropout import Dropout +from keras.src.layers.regularization.gaussian_dropout import GaussianDropout +from keras.src.layers.regularization.gaussian_noise import GaussianNoise +from keras.src.layers.regularization.spatial_dropout import SpatialDropout1D +from keras.src.layers.regularization.spatial_dropout import SpatialDropout2D +from keras.src.layers.regularization.spatial_dropout import SpatialDropout3D +from keras.src.layers.reshaping.cropping1d import Cropping1D +from keras.src.layers.reshaping.cropping2d import Cropping2D +from keras.src.layers.reshaping.cropping3d import Cropping3D +from keras.src.layers.reshaping.flatten import Flatten +from keras.src.layers.reshaping.permute import Permute +from keras.src.layers.reshaping.repeat_vector import RepeatVector +from keras.src.layers.reshaping.reshape import Reshape +from keras.src.layers.reshaping.up_sampling1d import UpSampling1D +from keras.src.layers.reshaping.up_sampling2d import UpSampling2D +from keras.src.layers.reshaping.up_sampling3d import UpSampling3D +from keras.src.layers.reshaping.zero_padding1d import ZeroPadding1D +from keras.src.layers.reshaping.zero_padding2d import ZeroPadding2D +from keras.src.layers.reshaping.zero_padding3d import ZeroPadding3D +from keras.src.layers.rnn.bidirectional import Bidirectional +from keras.src.layers.rnn.conv_lstm1d import ConvLSTM1D +from keras.src.layers.rnn.conv_lstm2d import ConvLSTM2D +from keras.src.layers.rnn.conv_lstm3d import ConvLSTM3D +from keras.src.layers.rnn.gru import GRU +from keras.src.layers.rnn.gru import GRUCell +from keras.src.layers.rnn.lstm import LSTM +from keras.src.layers.rnn.lstm import LSTMCell +from keras.src.layers.rnn.rnn import RNN +from keras.src.layers.rnn.simple_rnn import SimpleRNN +from keras.src.layers.rnn.simple_rnn import SimpleRNNCell +from keras.src.layers.rnn.stacked_rnn_cells import StackedRNNCells +from keras.src.layers.rnn.time_distributed import TimeDistributed +from keras.src.saving import serialization_lib + + +@keras_export("keras.layers.serialize") +def serialize(layer): + """Returns the layer configuration as a Python dict. + + Args: + layer: A `keras.layers.Layer` instance to serialize. + + Returns: + Python dict which contains the configuration of the layer. + """ + return serialization_lib.serialize_keras_object(layer) + + +@keras_export("keras.layers.deserialize") +def deserialize(config, custom_objects=None): + """Returns a Keras layer object via its configuration. + + Args: + config: A python dict containing a serialized layer configuration. + custom_objects: Optional dictionary mapping names (strings) to custom + objects (classes and functions) to be considered during + deserialization. + + Returns: + A Keras layer instance. + """ + obj = serialization_lib.deserialize_keras_object( + config, + custom_objects=custom_objects, + ) + if not isinstance(obj, Layer): + raise ValueError( + "`keras.layers.deserialize` was passed a `config` object that is " + f"not a `keras.layers.Layer`. Received: {config}" + ) + return obj diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eef06174b7bbc663014638d746869f8d145985b2 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/__pycache__/input_spec.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/__pycache__/input_spec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9bd112b9e836c685435aef7c0d16125f41551a84 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/__pycache__/input_spec.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/__pycache__/layer.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/__pycache__/layer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45dc2b2d269c8f0e45b602fa1f9562fff3e42bc8 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/__pycache__/layer.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/input_spec.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/input_spec.py new file mode 100644 index 0000000000000000000000000000000000000000..25e4c8d9cda4f6eeb9128b93193816115fb03e31 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/input_spec.py @@ -0,0 +1,250 @@ +from keras.src import backend +from keras.src import tree +from keras.src.api_export import keras_export + + +@keras_export(["keras.InputSpec", "keras.layers.InputSpec"]) +class InputSpec: + """Specifies the rank, dtype and shape of every input to a layer. + + Layers can expose (if appropriate) an `input_spec` attribute: + an instance of `InputSpec`, or a nested structure of `InputSpec` instances + (one per input tensor). These objects enable the layer to run input + compatibility checks for input structure, input rank, input shape, and + input dtype for the first argument of `Layer.__call__`. + + A `None` entry in a shape is compatible with any dimension. + + Args: + dtype: Expected dtype of the input. + shape: Shape tuple, expected shape of the input + (may include `None` for dynamic axes). + Includes the batch size. + ndim: Integer, expected rank of the input. + max_ndim: Integer, maximum rank of the input. + min_ndim: Integer, minimum rank of the input. + axes: Dictionary mapping integer axes to + a specific dimension value. + allow_last_axis_squeeze: If `True`, allow inputs of rank N+1 as long + as the last axis of the input is 1, as well as inputs of rank N-1 + as long as the last axis of the spec is 1. + name: Expected key corresponding to this input when passing data as + a dictionary. + optional: Boolean, whether the input is optional or not. + An optional input can accept `None` values. + + Example: + + ```python + class MyLayer(Layer): + def __init__(self): + super().__init__() + # The layer will accept inputs with + # shape (*, 28, 28) & (*, 28, 28, 1) + # and raise an appropriate error message otherwise. + self.input_spec = InputSpec( + shape=(None, 28, 28, 1), + allow_last_axis_squeeze=True) + ``` + """ + + def __init__( + self, + dtype=None, + shape=None, + ndim=None, + max_ndim=None, + min_ndim=None, + axes=None, + allow_last_axis_squeeze=False, + name=None, + optional=False, + ): + self.dtype = ( + backend.standardize_dtype(dtype) if dtype is not None else None + ) + if shape is not None: + self.shape = backend.standardize_shape(shape) + self.ndim = len(shape) + else: + self.ndim = ndim + self.shape = None + self.max_ndim = max_ndim + self.min_ndim = min_ndim + self.name = name + self.optional = optional + self.allow_last_axis_squeeze = allow_last_axis_squeeze + try: + axes = axes or {} + self.axes = {int(k): axes[k] for k in axes} + except (ValueError, TypeError): + raise TypeError( + "Argument `axes` must be a dict with integer keys. " + f"Received: axes={axes}" + ) + + if self.axes and (self.ndim is not None or self.max_ndim is not None): + max_dim = (self.ndim if self.ndim else self.max_ndim) - 1 + max_axis = max(self.axes) + if max_axis > max_dim: + raise ValueError( + "Axis {} is greater than the maximum " + "allowed value: {}".format(max_axis, max_dim) + ) + + def __repr__(self): + spec = [ + ("dtype=" + str(self.dtype)) if self.dtype else "", + ("shape=" + str(self.shape)) if self.shape else "", + ("ndim=" + str(self.ndim)) if self.ndim else "", + ("max_ndim=" + str(self.max_ndim)) if self.max_ndim else "", + ("min_ndim=" + str(self.min_ndim)) if self.min_ndim else "", + ("axes=" + str(self.axes)) if self.axes else "", + ] + return f"InputSpec({', '.join(x for x in spec if x)})" + + def get_config(self): + return { + "dtype": self.dtype, + "shape": self.shape, + "ndim": self.ndim, + "max_ndim": self.max_ndim, + "min_ndim": self.min_ndim, + "axes": self.axes, + } + + @classmethod + def from_config(cls, config): + return cls(**config) + + +def assert_input_compatibility(input_spec, inputs, layer_name): + """Checks compatibility between the layer and provided inputs. + + This checks that the tensor(s) `inputs` verify the input assumptions + of a layer (if any). If not, a clear and actional exception gets raised. + + Args: + input_spec: An InputSpec instance, list of InputSpec instances, a nested + structure of InputSpec instances, or None. + inputs: Input tensor, list of input tensors, or a nested structure of + input tensors. + layer_name: String, name of the layer (for error message formatting). + + Raises: + ValueError: in case of mismatch between + the provided inputs and the expectations of the layer. + """ + if not input_spec: + return + + input_spec = tree.flatten(input_spec) + if isinstance(inputs, dict): + # Flatten `inputs` by reference order if input spec names are provided + names = [spec.name for spec in input_spec] + if all(names): + list_inputs = [] + for name in names: + if name not in inputs: + raise ValueError( + f'Missing data for input "{name}". ' + "You passed a data dictionary with keys " + f"{list(inputs.keys())}. " + f"Expected the following keys: {names}" + ) + list_inputs.append(inputs[name]) + inputs = list_inputs + + inputs = tree.flatten(inputs) + if len(inputs) != len(input_spec): + raise ValueError( + f'Layer "{layer_name}" expects {len(input_spec)} input(s),' + f" but it received {len(inputs)} input tensors. " + f"Inputs received: {inputs}" + ) + for input_index, (x, spec) in enumerate(zip(inputs, input_spec)): + if spec is None: + continue + if x is None and spec.optional: + continue + + # Having a shape/dtype is the only commonality of the various + # tensor-like objects that may be passed. The most common kind of + # invalid type we are guarding for is a Layer instance (Functional API), + # which does not have a `shape` attribute. + if not hasattr(x, "shape"): + raise ValueError( + f"Inputs to a layer should be tensors. Got '{x}' " + f"(of type {type(x)}) as input for layer '{layer_name}'." + ) + + shape = backend.standardize_shape(x.shape) + ndim = len(shape) + # Check ndim. + if spec.ndim is not None and not spec.allow_last_axis_squeeze: + if ndim != spec.ndim: + raise ValueError( + f'Input {input_index} of layer "{layer_name}" ' + "is incompatible with the layer: " + f"expected ndim={spec.ndim}, found ndim={ndim}. " + f"Full shape received: {shape}" + ) + if spec.max_ndim is not None: + if ndim is not None and ndim > spec.max_ndim: + raise ValueError( + f'Input {input_index} of layer "{layer_name}" ' + "is incompatible with the layer: " + f"expected max_ndim={spec.max_ndim}, " + f"found ndim={ndim}" + ) + if spec.min_ndim is not None: + if ndim is not None and ndim < spec.min_ndim: + raise ValueError( + f'Input {input_index} of layer "{layer_name}" ' + "is incompatible with the layer: " + f"expected min_ndim={spec.min_ndim}, " + f"found ndim={ndim}. " + f"Full shape received: {shape}" + ) + # Check dtype. + if spec.dtype is not None: + dtype = backend.standardize_dtype(x.dtype) + if dtype != spec.dtype: + raise ValueError( + f'Input {input_index} of layer "{layer_name}" ' + "is incompatible with the layer: " + f"expected dtype={spec.dtype}, " + f"found dtype={dtype}" + ) + + # Check specific shape axes. + if spec.axes: + for axis, value in spec.axes.items(): + if value is not None and shape[axis] not in { + value, + None, + }: + raise ValueError( + f'Input {input_index} of layer "{layer_name}" is ' + f"incompatible with the layer: expected axis {axis} " + f"of input shape to have value {value}, " + "but received input with " + f"shape {shape}" + ) + # Check shape. + if spec.shape is not None: + spec_shape = spec.shape + if spec.allow_last_axis_squeeze: + if shape and shape[-1] == 1: + shape = shape[:-1] + if spec_shape and spec_shape[-1] == 1: + spec_shape = spec_shape[:-1] + for spec_dim, dim in zip(spec_shape, shape): + if spec_dim is not None and dim is not None: + if spec_dim != dim: + raise ValueError( + f'Input {input_index} of layer "{layer_name}" is ' + "incompatible with the layer: " + f"expected shape={spec.shape}, " + f"found shape={shape}" + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/layer.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/layer.py new file mode 100644 index 0000000000000000000000000000000000000000..8e36bb20456bfeb837cef5fdff04028fa2eca781 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/layer.py @@ -0,0 +1,1759 @@ +"""Layer is an Operation with state. + +Takes care of: + +- Weights / variables (and tracking thereof) +- deferred build +- trainable argument value inference +- masking +- autocasting + +And some more magic: + +- add_loss +- metric tracking +- RNG seed tracking +- activity regularization +""" + +import collections +import inspect +import warnings +from functools import wraps + +from keras.src import backend +from keras.src import constraints +from keras.src import dtype_policies +from keras.src import initializers +from keras.src import regularizers +from keras.src import tree +from keras.src import utils +from keras.src.api_export import keras_export +from keras.src.backend import KerasTensor +from keras.src.backend.common import global_state +from keras.src.backend.common.name_scope import current_path +from keras.src.backend.common.symbolic_scope import in_symbolic_scope +from keras.src.distribution import distribution_lib +from keras.src.dtype_policies import DTypePolicyMap +from keras.src.layers import input_spec +from keras.src.metrics.metric import Metric +from keras.src.ops.operation import Operation +from keras.src.saving.keras_saveable import KerasSaveable +from keras.src.utils import python_utils +from keras.src.utils import summary_utils +from keras.src.utils import traceback_utils +from keras.src.utils import tracking + +if backend.backend() == "tensorflow": + from keras.src.backend.tensorflow.layer import TFLayer as BackendLayer +elif backend.backend() == "jax": + from keras.src.backend.jax.layer import JaxLayer as BackendLayer +elif backend.backend() == "torch": + from keras.src.backend.torch.layer import TorchLayer as BackendLayer +elif backend.backend() == "numpy": + from keras.src.backend.numpy.layer import NumpyLayer as BackendLayer +elif backend.backend() == "openvino": + from keras.src.backend.openvino.layer import OpenvinoLayer as BackendLayer +else: + raise RuntimeError( + f"Backend '{backend.backend()}' must implement a layer mixin class." + ) + + +@keras_export(["keras.Layer", "keras.layers.Layer"]) +class Layer(BackendLayer, Operation, KerasSaveable): + """This is the class from which all layers inherit. + + A layer is a callable object that takes as input one or more tensors and + that outputs one or more tensors. It involves *computation*, defined + in the `call()` method, and a *state* (weight variables). State can be + created: + + * in `__init__()`, for instance via `self.add_weight()`; + * in the optional `build()` method, which is invoked by the first + `__call__()` to the layer, and supplies the shape(s) of the input(s), + which may not have been known at initialization time. + + Layers are recursively composable: If you assign a Layer instance as an + attribute of another Layer, the outer layer will start tracking the weights + created by the inner layer. Nested layers should be instantiated in the + `__init__()` method or `build()` method. + + Users will just instantiate a layer and then treat it as a callable. + + Args: + trainable: Boolean, whether the layer's variables should be trainable. + name: String name of the layer. + dtype: The dtype of the layer's computations and weights. Can also be a + `keras.DTypePolicy`, + which allows the computation and + weight dtype to differ. Defaults to `None`. `None` means to use + `keras.config.dtype_policy()`, + which is a `float32` policy unless set to different value + (via `keras.config.set_dtype_policy()`). + + Attributes: + name: The name of the layer (string). + dtype: Dtype of the layer's weights. Alias of `layer.variable_dtype`. + variable_dtype: Dtype of the layer's weights. + compute_dtype: The dtype of the layer's computations. + Layers automatically cast inputs to this dtype, which causes + the computations and output to also be in this dtype. + When mixed precision is used with a + `keras.DTypePolicy`, this will be different + than `variable_dtype`. + trainable_weights: List of variables to be included in backprop. + non_trainable_weights: List of variables that should not be + included in backprop. + weights: The concatenation of the lists trainable_weights and + non_trainable_weights (in this order). + trainable: Whether the layer should be trained (boolean), i.e. + whether its potentially-trainable weights should be returned + as part of `layer.trainable_weights`. + input_spec: Optional (list of) `InputSpec` object(s) specifying the + constraints on inputs that can be accepted by the layer. + + We recommend that descendants of `Layer` implement the following methods: + + * `__init__()`: Defines custom layer attributes, and creates layer weights + that do not depend on input shapes, using `add_weight()`, + or other state. + * `build(self, input_shape)`: This method can be used to create weights that + depend on the shape(s) of the input(s), using `add_weight()`, or other + state. `__call__()` will automatically build the layer + (if it has not been built yet) by calling `build()`. + * `call(self, *args, **kwargs)`: Called in `__call__` after making + sure `build()` has been called. `call()` performs the logic of applying + the layer to the input arguments. + Two reserved keyword arguments you can optionally use in `call()` are: + 1. `training` (boolean, whether the call is in inference mode or + training mode). + 2. `mask` (boolean tensor encoding masked timesteps in the input, + used e.g. in RNN layers). + A typical signature for this method is `call(self, inputs)`, and user + could optionally add `training` and `mask` if the layer need them. + * `get_config(self)`: Returns a dictionary containing the configuration + used to initialize this layer. If the keys differ from the arguments + in `__init__()`, then override `from_config(self)` as well. + This method is used when saving + the layer or a model that contains this layer. + + Examples: + + Here's a basic example: a layer with two variables, `w` and `b`, + that returns `y = w . x + b`. + It shows how to implement `build()` and `call()`. + Variables set as attributes of a layer are tracked as weights + of the layers (in `layer.weights`). + + ```python + class SimpleDense(Layer): + def __init__(self, units=32): + super().__init__() + self.units = units + + # Create the state of the layer (weights) + def build(self, input_shape): + self.kernel = self.add_weight( + shape=(input_shape[-1], self.units), + initializer="glorot_uniform", + trainable=True, + name="kernel", + ) + self.bias = self.add_weight( + shape=(self.units,), + initializer="zeros", + trainable=True, + name="bias", + ) + + # Defines the computation + def call(self, inputs): + return ops.matmul(inputs, self.kernel) + self.bias + + # Instantiates the layer. + linear_layer = SimpleDense(4) + + # This will also call `build(input_shape)` and create the weights. + y = linear_layer(ops.ones((2, 2))) + assert len(linear_layer.weights) == 2 + + # These weights are trainable, so they're listed in `trainable_weights`: + assert len(linear_layer.trainable_weights) == 2 + ``` + + Besides trainable weights, updated via backpropagation during training, + layers can also have non-trainable weights. These weights are meant to + be updated manually during `call()`. Here's a example layer that computes + the running sum of its inputs: + + ```python + class ComputeSum(Layer): + + def __init__(self, input_dim): + super(ComputeSum, self).__init__() + # Create a non-trainable weight. + self.total = self.add_weight( + shape=(), + initializer="zeros", + trainable=False, + name="total", + ) + + def call(self, inputs): + self.total.assign(self.total + ops.sum(inputs)) + return self.total + + my_sum = ComputeSum(2) + x = ops.ones((2, 2)) + y = my_sum(x) + + assert my_sum.weights == [my_sum.total] + assert my_sum.non_trainable_weights == [my_sum.total] + assert my_sum.trainable_weights == [] + ``` + """ + + def __new__(cls, *args, **kwargs): + obj = super().__new__(cls, *args, **kwargs) + + # Wrap the user-provided `build` method in the `build_wrapper` + # to add name scope support and serialization support. + original_build_method = obj.build + + @wraps(original_build_method) + def build_wrapper(*args, **kwargs): + with obj._open_name_scope(): + obj._path = current_path() + original_build_method(*args, **kwargs) + # Record build config. + signature = inspect.signature(original_build_method) + obj._build_shapes_dict = signature.bind(*args, **kwargs).arguments + # Set built, post build actions, and lock state. + obj.built = True + obj._post_build() + obj._lock_state() + + obj.build = build_wrapper + + # Wrap the user-provided `quantize` method in the `quantize_wrapper` + # to add tracker support. + original_quantize_method = obj.quantize + + @wraps(original_quantize_method) + def quantize_wrapper(mode, **kwargs): + obj._check_quantize_args(mode, obj.compute_dtype) + obj._tracker.unlock() + try: + original_quantize_method(mode, **kwargs) + except Exception: + raise + finally: + obj._tracker.lock() + + obj.quantize = quantize_wrapper + + return obj + + def __init__( + self, + *, + activity_regularizer=None, + trainable=True, + dtype=None, + autocast=True, + name=None, + **kwargs, + ): + BackendLayer.__init__(self) + self._lock = False + Operation.__init__(self, dtype=dtype, name=name) + self.activity_regularizer = regularizers.get(activity_regularizer) + input_dim_arg = kwargs.pop("input_dim", None) + if input_dim_arg is not None: + input_shape_arg = (input_dim_arg,) + else: + input_shape_arg = kwargs.pop("input_shape", None) + if input_shape_arg is not None: + warnings.warn( + "Do not pass an `input_shape`/`input_dim` argument to " + "a layer. When using Sequential models, " + "prefer using an `Input(shape)` object as the " + "first layer in the model instead.", + stacklevel=2, + ) + self._input_shape_arg = input_shape_arg + if kwargs: + raise ValueError( + "Unrecognized keyword arguments " + f"passed to {self.__class__.__name__}: {kwargs}" + ) + + self._path = None # Will be determined in `build_wrapper` + self.built = False + self.autocast = autocast + self._input_spec = None + self._called = False + self.supports_jit = True + + self._trainable = trainable + self._losses = [] + self._loss_ids = set() + self._losses_override = [] + + self._call_signature = inspect.signature(self.call) + call_signature_parameters = [ + p.name for p in self._call_signature.parameters.values() + ] + self._call_has_training_arg = "training" in call_signature_parameters + self._call_has_mask_arg = "mask" in call_signature_parameters + + self._supports_masking = not utils.is_default(self.compute_mask) + # Whether to automatically convert (+ auto-cast) inputs to `call()`. + self._convert_input_args = True + # Whether to allow non-tensors as positional arguments in `call()`. + self._allow_non_tensor_positional_args = False + # Dict of shapes that were used to call `build()`. + self._build_shapes_dict = None + # Parent path + self._parent_path = None + self._initialize_tracker() + + @tracking.no_automatic_dependency_tracking + def _initialize_tracker(self): + if hasattr(self, "_tracker"): + return + + trainable_variables = [] + non_trainable_variables = [] + layers = [] + metrics = [] + seed_generators = [] + self._tracker = tracking.Tracker( + { + "trainable_variables": ( + lambda x: isinstance(x, backend.Variable) and x.trainable, + trainable_variables, + ), + "non_trainable_variables": ( + lambda x: isinstance(x, backend.Variable) + and not x.trainable, + non_trainable_variables, + ), + "metrics": (lambda x: isinstance(x, Metric), metrics), + "layers": ( + lambda x: isinstance(x, Layer) + and not isinstance(x, Metric), + layers, + ), + "seed_generators": ( + lambda x: isinstance(x, backend.random.SeedGenerator), + seed_generators, + ), + }, + exclusions={"non_trainable_variables": ["trainable_variables"]}, + ) + if backend.backend() == "tensorflow": + # Remove attribute tracking for lists (TF-specific attribute) + _self_setattr_tracking = getattr( + self, "_self_setattr_tracking", True + ) + self._self_setattr_tracking = False + + self._trainable_variables = trainable_variables + self._non_trainable_variables = non_trainable_variables + self._layers = layers + self._metrics = metrics + self._seed_generators = seed_generators + + if backend.backend() == "tensorflow": + # Reset attribute tracking (TF-specific) + self._self_setattr_tracking = _self_setattr_tracking + + @property + def path(self): + """The path of the layer. + + If the layer has not been built yet, it will be `None`. + """ + return self._path + + @property + def input_spec(self): + return self._input_spec + + @input_spec.setter + def input_spec(self, value): + self._input_spec = value + + @utils.default + def build(self, input_shape): + self._check_super_called() + if utils.is_default(self.build) and might_have_unbuilt_state(self): + warnings.warn( + f"`build()` was called on layer '{self.name}', however " + "the layer does not have a `build()` method implemented " + "and it looks like it has unbuilt state. This will cause " + "the layer to be marked as built, despite not being " + "actually built, which may cause failures down the line. " + "Make sure to implement a proper `build()` method." + ) + self.built = True + + def _lock_state(self): + """Prevent further state updates, called automatically in `build()`.""" + if not self._tracker.locked: + self._tracker.lock( + msg=( + "You cannot add new elements of state " + "(variables or sub-layers) " + "to a layer that is already built. All state " + "must be created in the `__init__()` method or " + "in the `build()` method." + ) + ) + + def get_build_config(self): + """Returns a dictionary with the layer's input shape. + + This method returns a config dict that can be used by + `build_from_config(config)` to create all states (e.g. Variables and + Lookup tables) needed by the layer. + + By default, the config only contains the input shape that the layer + was built with. If you're writing a custom layer that creates state in + an unusual way, you should override this method to make sure this state + is already created when Keras attempts to load its value upon model + loading. + + Returns: + A dict containing the input shape associated with the layer. + """ + if self._build_shapes_dict is not None: + if len(self._build_shapes_dict) == 1: + return { + "input_shape": tuple(self._build_shapes_dict.values())[0], + } + else: + return {"shapes_dict": self._build_shapes_dict} + + def build_from_config(self, config): + """Builds the layer's states with the supplied config dict. + + By default, this method calls the `build(config["input_shape"])` method, + which creates weights based on the layer's input shape in the supplied + config. If your config contains other information needed to load the + layer's state, you should override this method. + + Args: + config: Dict containing the input shape associated with this layer. + """ + if config: + if "input_shape" in config: + self.build(config["input_shape"]) + elif "shapes_dict" in config: + self.build(**config["shapes_dict"]) + self.built = True + + def _obj_type(self): + return "Layer" + + def add_variable( + self, + shape, + initializer, + dtype=None, + trainable=True, + autocast=True, + regularizer=None, + constraint=None, + name=None, + ): + """Add a weight variable to the layer. + + Alias of `add_weight()`. + """ + return self.add_weight( + shape=shape, + initializer=initializer, + dtype=dtype, + trainable=trainable, + autocast=autocast, + regularizer=regularizer, + constraint=constraint, + name=name, + ) + + def add_weight( + self, + shape=None, + initializer=None, + dtype=None, + trainable=True, + autocast=True, + regularizer=None, + constraint=None, + aggregation="none", + name=None, + ): + """Add a weight variable to the layer. + + Args: + shape: Shape tuple for the variable. Must be fully-defined + (no `None` entries). Defaults to `()` (scalar) if unspecified. + initializer: Initializer object to use to populate the initial + variable value, or string name of a built-in initializer + (e.g. `"random_normal"`). If unspecified, defaults to + `"glorot_uniform"` for floating-point variables and to `"zeros"` + for all other types (e.g. int, bool). + dtype: Dtype of the variable to create, e.g. `"float32"`. If + unspecified, defaults to the layer's variable dtype + (which itself defaults to `"float32"` if unspecified). + trainable: Boolean, whether the variable should be trainable via + backprop or whether its updates are managed manually. Defaults + to `True`. + autocast: Boolean, whether to autocast layers variables when + accessing them. Defaults to `True`. + regularizer: Regularizer object to call to apply penalty on the + weight. These penalties are summed into the loss function + during optimization. Defaults to `None`. + constraint: Contrainst object to call on the variable after any + optimizer update, or string name of a built-in constraint. + Defaults to `None`. + aggregation: Optional string, one of `None`, `"none"`, `"mean"`, + `"sum"` or `"only_first_replica"`. Annotates the variable with + the type of multi-replica aggregation to be used for this + variable when writing custom data parallel training loops. + Defaults to `"none"`. + name: String name of the variable. Useful for debugging purposes. + """ + self._check_super_called() + if shape is None: + shape = () + if dtype is not None: + dtype = backend.standardize_dtype(dtype) + else: + dtype = self.variable_dtype + if initializer is None: + if "float" in dtype: + initializer = "glorot_uniform" + else: + initializer = "zeros" + initializer = initializers.get(initializer) + with backend.name_scope(self.name, caller=self): + variable = backend.Variable( + initializer=initializer, + shape=shape, + dtype=dtype, + trainable=trainable, + autocast=autocast, + aggregation=aggregation, + name=name, + ) + # Will be added to layer.losses + variable.regularizer = regularizers.get(regularizer) + variable.constraint = constraints.get(constraint) + self._track_variable(variable) + return variable + + @property + def trainable(self): + """Settable boolean, whether this layer should be trainable or not.""" + return self._trainable + + @trainable.setter + def trainable(self, value): + """Sets trainable attribute for the layer and its sublayers. + + When this value is changed during training (e.g. with a + `Callback`) you need to call the parent + `Model.make_train_function` with `force=True` in order to + recompile the training graph. + + Args: + value: Boolean with the desired state for the layer's trainable + attribute. + """ + value = bool(value) + self._trainable = value + for v in self._trainable_variables: + v.trainable = value + for layer in self._layers: + layer.trainable = value + + @property + def variables(self): + """List of all layer state, including random seeds. + + This extends `layer.weights` to include all state used by the layer + including `SeedGenerator`s. + + Note that metrics variables are not included here, use + `metrics_variables` to visit all the metric variables. + """ + # Return all `Variables` associate with the layer including metrics + # and random seeds. Also deduplicate them. + variables = [] + seen_ids = set() + for v in self._trainable_variables + self._non_trainable_variables: + if id(v) not in seen_ids: + variables.append(v) + seen_ids.add(id(v)) + for sg in self._seed_generators: + variables.append(sg.state) + for layer in self._layers: + for v in layer.variables: + if id(v) not in seen_ids: + variables.append(v) + seen_ids.add(id(v)) + return variables + + @property + def trainable_variables(self): + """List of all trainable layer state. + + This is equivalent to `layer.trainable_weights`. + """ + if not self.trainable: + return [] + return [v for v in self.variables if v.trainable] + + @property + def non_trainable_variables(self): + """List of all non-trainable layer state. + + This extends `layer.non_trainable_weights` to include all state used by + the layer including state for metrics and `SeedGenerator`s. + """ + if not self.trainable: + return self.variables + return [v for v in self.variables if not v.trainable] + + @property + def weights(self): + """List of all weight variables of the layer. + + Unlike, `layer.variables` this excludes metric state and random seeds. + """ + # Return only `Variables` directly owned by layers and sub-layers. + # Also deduplicate them. + weights = [] + seen_ids = set() + for w in self._trainable_variables + self._non_trainable_variables: + if id(w) not in seen_ids: + weights.append(w) + seen_ids.add(id(w)) + for layer in self._layers: + for w in layer.weights: + if id(w) not in seen_ids: + weights.append(w) + seen_ids.add(id(w)) + return weights + + @property + def trainable_weights(self): + """List of all trainable weight variables of the layer. + + These are the weights that get updated by the optimizer during training. + """ + if not self.trainable: + return [] + return [v for v in self.weights if v.trainable] + + @property + def non_trainable_weights(self): + """List of all non-trainable weight variables of the layer. + + These are the weights that should not be updated by the optimizer during + training. Unlike, `layer.non_trainable_variables` this excludes metric + state and random seeds. + """ + if not self.trainable: + return self.weights + return [v for v in self.weights if not v.trainable] + + @property + def metrics(self): + """List of all metrics.""" + metrics = list(self._metrics) + for layer in self._layers: + metrics.extend(layer.metrics) + return metrics + + @property + def metrics_variables(self): + """List of all metric variables.""" + vars = [] + for metric in self.metrics: + vars.extend(metric.variables) + return vars + + def get_weights(self): + """Return the values of `layer.weights` as a list of NumPy arrays.""" + return [v.numpy() for v in self.weights] + + def set_weights(self, weights): + """Sets the values of `layer.weights` from a list of NumPy arrays.""" + layer_weights = self.weights + if len(layer_weights) != len(weights): + raise ValueError( + f"You called `set_weights(weights)` on layer '{self.name}' " + f"with a weight list of length {len(weights)}, but the layer " + f"was expecting {len(layer_weights)} weights." + ) + for variable, value in zip(layer_weights, weights): + if variable.shape != value.shape: + raise ValueError( + f"Layer {self.name} weight shape {variable.shape} " + "is not compatible with provided weight " + f"shape {value.shape}." + ) + variable.assign(value) + + @property + def dtype_policy(self): + return self._dtype_policy + + @dtype_policy.setter + def dtype_policy(self, value): + policy = dtype_policies.get(value) + if isinstance(self._dtype_policy, DTypePolicyMap) and self.path: + if self.path in self._dtype_policy: + del self._dtype_policy[self.path] + self._dtype_policy[self.path] = policy + else: + self._dtype_policy = policy + if policy.quantization_mode is not None: + if self.built and not getattr(self, "_is_quantized", False): + self.quantize(policy.quantization_mode) + + @property + def dtype(self): + """Alias of `layer.variable_dtype`.""" + return self.variable_dtype + + @property + def compute_dtype(self): + """The dtype of the computations performed by the layer.""" + if isinstance(self._dtype_policy, DTypePolicyMap) and self.path: + policy = self._dtype_policy[self.path] + else: + policy = self._dtype_policy + return policy.compute_dtype + + @property + def variable_dtype(self): + """The dtype of the state (weights) of the layer.""" + if isinstance(self._dtype_policy, DTypePolicyMap) and self.path: + policy = self._dtype_policy[self.path] + else: + policy = self._dtype_policy + return policy.variable_dtype + + @property + def quantization_mode(self): + """The quantization mode of this layer, `None` if not quantized.""" + if isinstance(self._dtype_policy, DTypePolicyMap) and self.path: + policy = self._dtype_policy[self.path] + else: + policy = self._dtype_policy + return policy.quantization_mode + + @property + def input_dtype(self): + """The dtype layer inputs should be converted to.""" + return self.compute_dtype + + @property + def supports_masking(self): + """Whether this layer supports computing a mask using `compute_mask`.""" + return self._supports_masking + + @supports_masking.setter + def supports_masking(self, value): + self._supports_masking = value + + @utils.default + def compute_mask(self, inputs, previous_mask): + return previous_mask + + @traceback_utils.filter_traceback + def __call__(self, *args, **kwargs): + self._check_super_called() + self._called = True + + ##################################### + # 1. Convert any array arguments to tensors of correct dtype. + def maybe_convert(x): + return self.dtype_policy.convert_input( + x, self.autocast, self.input_dtype + ) + + # Used to avoid expensive `tree` operations in the most common case. + if ( + kwargs + or len(args) != 1 + or not backend.is_tensor(args[0]) + or backend.standardize_dtype(args[0].dtype) != self.input_dtype + ) and self._convert_input_args: + args = tree.map_structure(maybe_convert, args) + kwargs = tree.map_structure(maybe_convert, kwargs) + + ########################################################## + # 2. Enforce that only tensors can be passed positionally. + if not self._allow_non_tensor_positional_args: + for arg in tree.flatten(args): + if ( + not isinstance(arg, KerasTensor) + and not backend.is_tensor(arg) + and arg is not None + ): + raise ValueError( + "Only input tensors may be passed as " + "positional arguments. The following argument value " + f"should be passed as a keyword argument: {arg} " + f"(of type {type(arg)})" + ) + + # Caches info about `call()` signature, args, kwargs. + call_spec = CallSpec(self._call_signature, args, kwargs) + + ############################################ + # 3. Check input spec for 1st positional arg. + # TODO: consider extending this to all args and kwargs. + self._assert_input_compatibility(call_spec.first_arg) + + ################ + # 4. Call build + with self._open_name_scope(): + self._maybe_build(call_spec) + + ########################## + # 5. Infer training value + # Training phase for `Layer.call` is set via (in order of priority): + # (1) The `training` argument passed to this `Layer.call`, if not None + # (2) The training argument of an outer `Layer.call`. + # (4) Any non-None default value for `training` in the call signature + # (5) False (treating the layer as if it's in inference) + + # Maintains info about the `Layer.call` stack + # across nested calls. + call_context = self._get_call_context() + + # This is the value explicitly passed by the user + training = call_spec.user_arguments_dict.get("training", None) + if training is None: + # Wasn't passed explicitly: use context value + training = call_context.training + if training is None: + # Get signature default value + training = call_spec.arguments_dict.get("training", None) + call_context.training = training + if self._call_has_training_arg and training is not None: + # Only populate arg if it has a concrete value + kwargs["training"] = training + + ############################## + # 6. Populate mask argument(s) + if len(call_spec.tensor_arguments_dict) == 1: + if ( + "mask" in call_spec.argument_names + and call_spec.arguments_dict["mask"] is None + ): + arg_name = list(call_spec.tensor_arguments_dict.keys())[0] + only_tensor_arg = call_spec.tensor_arguments_dict[arg_name] + mask = tree.map_structure( + backend.get_keras_mask, + only_tensor_arg, + ) + kwargs["mask"] = mask + elif len(call_spec.tensor_arguments_dict) > 1: + for k, v in call_spec.tensor_arguments_dict.items(): + expected_mask_arg_name = f"{k}_mask" + if expected_mask_arg_name in call_spec.argument_names: + if call_spec.arguments_dict[expected_mask_arg_name] is None: + mask = tree.map_structure(backend.get_keras_mask, v) + kwargs[expected_mask_arg_name] = mask + + # We need to cache the `previous_mask` before `__call__` because the + # mask might be removed during the call, such as `MultiHeadAttention`. + previous_mask = tree.map_structure( + backend.get_keras_mask, call_spec.first_arg + ) + + #################### + # 7. Call the layer. + try: + with self._open_name_scope(): + current_scope = backend.get_autocast_scope() + new_scope = None + if current_scope is not None: + # Clear or update the current scope if necessary. + if not self.autocast: + new_scope = backend.AutocastScope(None) + elif not backend.is_float_dtype(self.compute_dtype): + # Some preprocessing layers might have a non-float + # dtype, we should not autocast in this case. + new_scope = backend.AutocastScope(None) + elif current_scope.dtype != self.compute_dtype: + new_scope = backend.AutocastScope(self.compute_dtype) + elif self.compute_dtype != self.variable_dtype: + # Enter a new scope if our dtypes are "mixed". + new_scope = backend.AutocastScope(self.compute_dtype) + + if new_scope is not None: + with new_scope: + outputs = super().__call__(*args, **kwargs) + else: + outputs = super().__call__(*args, **kwargs) + # Change the layout for the layer output if needed. + # This is useful for relayout intermediate tensor in the model + # to achieve the optimal performance. + distribution = distribution_lib.distribution() + if distribution is not None: + current_layer_path = current_path() + current_layer_path += "/output" + layout = distribution.get_tensor_layout(current_layer_path) + if layout: + outputs = distribution_lib.distribute_tensor( + outputs, layout + ) + + if not self.built: + self.built = True + # Record activity regularizer loss. + if self.activity_regularizer is not None: + for output in tree.flatten(outputs): + if backend.is_tensor(output): + self.add_loss(self.activity_regularizer(output)) + + # Set `previous_mask` on outputs if available. It is provided only + # for the first positional input arg and its mask. + # TODO: consider extending this to all args and kwargs. + if self.supports_masking: + self._set_mask_metadata( + call_spec.first_arg, outputs, previous_mask + ) + elif any(m is not None for m in tree.flatten(previous_mask)): + warnings.warn( + f"Layer '{self.name}' (of type {self.__class__.__name__}) " + "was passed an input with a mask attached to it. " + "However, this layer does not support masking and will " + "therefore destroy the mask information. Downstream " + "layers will not see the mask." + ) + finally: + # Destroy call context if we created it + self._maybe_reset_call_context() + return outputs + + def call(self, *args, **kwargs): + raise self._not_implemented_error(self.call) + + @traceback_utils.filter_traceback + def stateless_call( + self, + trainable_variables, + non_trainable_variables, + *args, + return_losses=False, + **kwargs, + ): + """Call the layer without any side effects. + + Args: + trainable_variables: List of trainable variables of the model. + non_trainable_variables: List of non-trainable variables of the + model. + *args: Positional arguments to be passed to `call()`. + return_losses: If `True`, `stateless_call()` will return the list of + losses created during `call()` as part of its return values. + **kwargs: Keyword arguments to be passed to `call()`. + + Returns: + A tuple. By default, returns `(outputs, non_trainable_variables)`. + If `return_losses = True`, then returns + `(outputs, non_trainable_variables, losses)`. + + Note: `non_trainable_variables` include not only non-trainable weights + such as `BatchNormalization` statistics, but also RNG seed state + (if there are any random operations part of the layer, such as dropout), + and `Metric` state (if there are any metrics attached to the layer). + These are all elements of state of the layer. + + Example: + + ```python + model = ... + data = ... + trainable_variables = model.trainable_variables + non_trainable_variables = model.non_trainable_variables + # Call the model with zero side effects + outputs, non_trainable_variables = model.stateless_call( + trainable_variables, + non_trainable_variables, + data, + ) + # Attach the updated state to the model + # (until you do this, the model is still in its pre-call state). + for ref_var, value in zip( + model.non_trainable_variables, non_trainable_variables + ): + ref_var.assign(value) + ``` + """ + self._check_super_called() + + if not self.built: + raise ValueError( + f"To call stateless_call, {self.__class__.__name__} must be " + "built (i.e. its variables must have been already created). " + "You can build it by calling it on some data." + ) + if len(trainable_variables) != len(self.trainable_variables): + raise ValueError( + "Argument `trainable_variables` must be a list of tensors " + "corresponding 1:1 to " + f"{self.__class__.__name__}().trainable_variables. " + f"Received list with length {len(trainable_variables)}, " + f"but expected {len(self.trainable_variables)} variables." + ) + if len(non_trainable_variables) != len(self.non_trainable_variables): + raise ValueError( + "Argument `non_trainable_variables` must be a list of tensors " + "corresponding 1:1 to " + f"{self.__class__.__name__}().non_trainable_variables. " + f"Received list with length {len(non_trainable_variables)}, " + f"but expected {len(self.non_trainable_variables)} variables." + ) + + # Gather variable mapping + trainable_mapping = zip(self.trainable_variables, trainable_variables) + non_trainable_mapping = zip( + self.non_trainable_variables, non_trainable_variables + ) + mapping = list(trainable_mapping) + list(non_trainable_mapping) + + # Call in stateless scope + losses = None + with backend.StatelessScope( + state_mapping=mapping, collect_losses=return_losses + ) as scope: + if self.dtype_policy.quantization_mode is not None: + outputs = self.quantized_call(*args, **kwargs) + else: + outputs = self.call(*args, **kwargs) + if return_losses: + losses = self.losses + + # Gather updated non-trainable variables + non_trainable_variables = [] + for v in self.non_trainable_variables: + new_v = scope.get_current_value(v) + non_trainable_variables.append(new_v) + + if return_losses: + return outputs, non_trainable_variables, losses + return outputs, non_trainable_variables + + def compute_output_spec(self, *args, **kwargs): + if utils.is_default(self.compute_output_shape): + return super().compute_output_spec(*args, **kwargs) + else: + # Use compute_output_shape() to return the right output spec + call_spec = CallSpec(self._call_signature, args, kwargs) + shapes_dict = get_shapes_dict(call_spec) + shapes_dict = update_shapes_dict_for_target_fn( + self.compute_output_shape, + shapes_dict=shapes_dict, + call_spec=call_spec, + class_name=self.__class__.__name__, + ) + output_shape = self.compute_output_shape(**shapes_dict) + + if ( + isinstance(output_shape, list) + and output_shape + and isinstance(output_shape[0], (int, type(None))) + ): + output_shape = tuple(output_shape) + if not isinstance(output_shape, (list, tuple, dict)): + try: + output_shape = tuple(output_shape) + except: + raise ValueError( + "Method `compute_output_shape()` of layer " + f"{self.__class__.__name__} is returning " + "a type that cannot be interpreted as a shape. " + "It should return a shape tuple. " + f"Received: {output_shape}" + ) + if ( + isinstance(output_shape, tuple) + and output_shape + and isinstance(output_shape[0], (int, type(None))) + ): + return KerasTensor(output_shape, dtype=self.compute_dtype) + # Case: nested. Could be a tuple/list of shapes, or a dict of + # shapes. Could be deeply nested. + return tree.map_shape_structure( + lambda s: KerasTensor(s, dtype=self.compute_dtype), output_shape + ) + + @utils.default + def compute_output_shape(self, *args, **kwargs): + raise self._not_implemented_error( + self.compute_output_shape, + "Should implement `def compute_output_shape(self, input_shape)`.", + ) + + def add_loss(self, loss): + """Can be called inside of the `call()` method to add a scalar loss. + + Example: + + ```python + class MyLayer(Layer): + ... + def call(self, x): + self.add_loss(ops.sum(x)) + return x + ``` + """ + # Eager only. + losses = tree.flatten(loss) + for x in losses: + if not backend.is_tensor(x): + raise ValueError( + "`add_loss()` can only be called from inside `build()` or " + f"`call()`, on a tensor input. Received invalid value: {x}" + ) + if backend.in_stateless_scope(): + scope = backend.get_stateless_scope() + if scope.collect_losses: + for x in losses: + scope.add_loss(loss) + self._loss_ids.add(id(loss)) + else: + self._losses.extend(losses) + + def _get_own_losses(self): + if backend.in_stateless_scope(): + losses = [] + scope = backend.get_stateless_scope() + for loss in scope.losses: + if id(loss) in self._loss_ids: + losses.append(loss) + return losses + else: + return self._losses[:] + + def _get_regularization_losses(self): + weight_regularization_losses = [] + for variable in self.trainable_weights: + if variable.regularizer is None: + continue + if backend.in_stateless_scope() and not in_symbolic_scope(): + # If in symbolic scope, we might get `None` from + # `get_current_value` in `backend.compute_output_spec`. So we + # assign `variable` instead. + v = backend.get_stateless_scope().get_current_value(variable) + else: + v = variable + weight_regularization_losses.append(variable.regularizer(v)) + return weight_regularization_losses + + @property + def losses(self): + """List of scalar losses from `add_loss`, regularizers and sublayers.""" + if self._losses_override: + return self._losses_override + losses = self._get_own_losses() + for layer in self._flatten_layers(include_self=False): + losses.extend(layer._get_own_losses()) + weight_regularization_losses = self._get_regularization_losses() + losses.extend(weight_regularization_losses) + return losses + + def _clear_losses(self): + if backend.in_stateless_scope(): + scope = backend.get_stateless_scope() + if scope.collect_losses: + for x in scope.losses: + if id(x) in self._loss_ids: + scope.losses.remove(x) + self._losses.clear() + self._loss_ids.clear() + for layer in self._layers: + layer._clear_losses() + + # Quantization-related (int8 and float8) methods + + def quantized_build(self, input_shape, mode): + raise self._not_implemented_error(self.quantized_build) + + def quantize(self, mode, type_check=True): + raise self._not_implemented_error(self.quantize) + + def _check_quantize_args(self, mode, compute_dtype): + if not self.built: + raise ValueError( + "Cannot quantize a layer that isn't yet built. " + f"Layer '{self.name}' (of type '{self.__class__.__name__}') " + "is not built yet." + ) + if getattr(self, "_is_quantized", False): + raise ValueError( + f"Layer '{self.name}' is already quantized with " + f"dtype_policy='{self.dtype_policy.name}'. " + f"Received: mode={mode}" + ) + if mode not in dtype_policies.QUANTIZATION_MODES: + raise ValueError( + "Invalid quantization mode. " + f"Expected one of {dtype_policies.QUANTIZATION_MODES}. " + f"Received: mode={mode}" + ) + if mode == "int8" and compute_dtype == "float16": + raise ValueError( + f"Quantization mode='{mode}' doesn't work well with " + "compute_dtype='float16'. Consider loading model/layer with " + "another dtype policy such as 'mixed_bfloat16' or " + "'mixed_float16' before calling `quantize()`." + ) + + def quantized_call(self, *args, **kwargs): + if self.quantization_mode == "int8": + return self._int8_call(*args, **kwargs) + elif self.quantization_mode == "float8": + return self._float8_call(*args, **kwargs) + else: + raise self._quantization_mode_error(self.quantization_mode) + + def _int8_call(self, *args, **kwargs): + raise self._not_implemented_error(self._int8_call) + + def _float8_call(self, *args, **kwargs): + raise self._not_implemented_error(self._float8_call) + + def _not_implemented_error(self, attr, msg=None): + if callable(attr): + attr_name = attr.__name__ + attr_type = "method" + else: + attr_name = str(attr) + attr_type = "attribute" + msg = " " + msg if msg is not None else "" + return NotImplementedError( + f"Layer {self.__class__.__name__} does not have a `{attr_name}` " + f"{attr_type} implemented.{msg}" + ) + + def _quantization_mode_error(self, mode): + return NotImplementedError( + "Invalid quantization mode. Expected one of " + f"{dtype_policies.QUANTIZATION_MODES}. " + f"Received: quantization_mode={mode}" + ) + + def save_own_variables(self, store): + """Saves the state of the layer. + + You can override this method to take full control of how the state of + the layer is saved upon calling `model.save()`. + + Args: + store: Dict where the state of the model will be saved. + """ + all_vars = self._trainable_variables + self._non_trainable_variables + for i, v in enumerate(all_vars): + store[f"{i}"] = v + + def load_own_variables(self, store): + """Loads the state of the layer. + + You can override this method to take full control of how the state of + the layer is loaded upon calling `keras.models.load_model()`. + + Args: + store: Dict from which the state of the model will be loaded. + """ + all_vars = self._trainable_variables + self._non_trainable_variables + if len(store.keys()) != len(all_vars): + if len(all_vars) == 0 and not self.built: + raise ValueError( + f"Layer '{self.name}' was never built " + "and thus it doesn't have any variables. " + f"However the weights file lists {len(store.keys())} " + "variables for this layer.\n" + "In most cases, this error indicates that either:\n\n" + "1. The layer is owned by a parent layer that " + "implements a `build()` method, but calling the " + "parent's `build()` method did NOT create the state of " + f"the child layer '{self.name}'. A `build()` method " + "must create ALL state for the layer, including " + "the state of any children layers.\n\n" + "2. You need to implement " + "the `def build_from_config(self, config)` method " + f"on layer '{self.name}', to specify how to rebuild " + "it during loading. " + "In this case, you might also want to implement the " + "method that generates the build config at saving time, " + "`def get_build_config(self)`. " + "The method `build_from_config()` is meant " + "to create the state " + "of the layer (i.e. its variables) upon deserialization.", + ) + raise ValueError( + f"Layer '{self.name}' expected {len(all_vars)} variables, " + "but received " + f"{len(store.keys())} variables during loading. " + f"Expected: {[v.name for v in all_vars]}" + ) + for i, v in enumerate(all_vars): + v.assign(store[f"{i}"]) + + def _track_variable(self, variable): + if variable.trainable: + self._tracker.add_to_store("trainable_variables", variable) + else: + self._tracker.add_to_store("non_trainable_variables", variable) + if not self.trainable: + variable.trainable = False + self._post_track_variable(variable) + + def _untrack_variable(self, variable): + previous_lock_state = self._tracker.locked + self._tracker.unlock() + self._tracker.untrack(variable) + if previous_lock_state is True: + self._tracker.lock() + self._post_untrack_variable(variable) + + def add_metric(self, *args, **kwargs): + # Permanently disabled + raise NotImplementedError( + "Layer `add_metric()` method is deprecated" + " add your metric in `Model.compile(metrics=[...]).`" + ) + + def count_params(self): + """Count the total number of scalars composing the weights. + + Returns: + An integer count. + """ + if not self.built: + raise ValueError( + "You tried to call `count_params` " + f"on layer '{self.name}', " + "but the layer isn't built. " + "You can build it manually via: " + f"`layer.build(input_shape)`." + ) + return summary_utils.count_params(self.weights) + + def _maybe_build(self, call_spec): + if self.built: + return + + shapes_dict = get_shapes_dict(call_spec) + first_shape = next(iter(shapes_dict.values()), None) + + # If the layer has a build method, call it with our input shapes. + if not utils.is_default(self.build): + shapes_dict = update_shapes_dict_for_target_fn( + self.build, + shapes_dict=shapes_dict, + call_spec=call_spec, + class_name=self.__class__.__name__, + ) + self.build(**shapes_dict) + # Check input spec again (after build, since self.input_spec + # may have been updated + self._assert_input_compatibility(call_spec.first_arg) + return + + # Otherwise, attempt to build the layer by calling it on symbolic input. + if might_have_unbuilt_state(self): + try: + backend.compute_output_spec( + self.call, **call_spec.arguments_dict + ) + except Exception as e: + if call_spec.eager: + # Will let the actual eager call do state-building + return + warnings.warn( + f"Layer '{self.name}' looks like it has unbuilt state, but " + "Keras is not able to trace the layer `call()` in order to " + "build it automatically. Possible causes:\n" + "1. The `call()` method of your layer may be crashing. Try " + "to `__call__()` the layer eagerly on some test input " + "first to see if it works. " + "E.g. `x = np.random.random((3, 4)); y = layer(x)`\n" + "2. If the `call()` method is correct, then you may need " + "to implement the `def build(self, input_shape)` method on " + "your layer. It should create all variables used by the " + "layer (e.g. by calling `layer.build()` on all its " + "children layers).\n" + f"Exception encountered: ''{e}''" + ) + self.build(first_shape) + + def _build_by_run_for_single_pos_arg(self, input_shape): + # Case: all inputs are in the first arg (possibly nested). + input_tensors = tree.map_shape_structure( + lambda s: backend.KerasTensor(s), input_shape + ) + try: + backend.compute_output_spec(self.call, input_tensors) + return True + except: + return False + + def _build_by_run_for_kwargs(self, shapes_dict): + # Case: inputs were recorded as multiple keyword arguments. + if all(is_shape_tuple(s) for s in shapes_dict.values()): + # Case: all input keyword arguments were plain tensors. + input_tensors = { + # We strip the `_shape` suffix to recover kwarg names. + utils.removesuffix(k, "_shape"): backend.KerasTensor(shape) + for k, shape in shapes_dict.items() + } + try: + backend.compute_output_spec(self.call, **input_tensors) + return True + except: + return False + else: + # Not supported: nested input keyword arguments. + return False + + def __repr__(self): + return ( + f"<{self.__class__.__name__} " + f"name={self.name}, built={self.built}>" + ) + + def __str__(self): + return self.__repr__() + + def __setattr__(self, name, value): + # Track Variables, Layers, Metrics, SeedGenerators. + name, value = self._setattr_hook(name, value) + if name != "_tracker": + if not hasattr(self, "_tracker"): + self._initialize_tracker() + value = self._tracker.track(value) + return super().__setattr__(name, value) + + def __delattr__(self, name): + obj = getattr(self, name) + if isinstance(obj, backend.Variable): + import gc + + # It will take a short amount of time for the corresponding buffer + # to be actually removed from the device. + # https://stackoverflow.com/a/74631949 + self._untrack_variable(obj) + super().__delattr__(name) + gc.collect() + else: + super().__delattr__(name) + + def _check_super_called(self): + if getattr(self, "_lock", True): + raise RuntimeError( + f"In layer '{self.__class__.__name__}', you forgot to call " + "`super().__init__()` as the first statement " + "in the `__init__()` method. Go add it!" + ) + + def _assert_input_compatibility(self, arg_0): + if self.input_spec: + try: + input_spec.assert_input_compatibility( + self.input_spec, arg_0, layer_name=self.name + ) + except SystemError: + if backend.backend() == "torch": + # TODO: The torch backend failed the ONNX CI with the error: + # SystemError: returned a result with an exception set + # As a workaround, we are skipping this for now. + pass + else: + raise + + def _get_call_context(self): + """Returns currently active `CallContext`.""" + layer_call_ctx = global_state.get_global_attribute("current_call_ctx") + if layer_call_ctx is None: + # Enter new call context. + layer_call_ctx = CallContext(entry_layer=self) + global_state.set_global_attribute( + "current_call_ctx", layer_call_ctx + ) + self._clear_losses() + return layer_call_ctx + + def _maybe_reset_call_context(self): + layer_call_ctx = global_state.get_global_attribute("current_call_ctx") + if layer_call_ctx is None or layer_call_ctx.entry_layer == self: + global_state.set_global_attribute("current_call_ctx", None) + + def _flatten_layers(self, include_self=True, recursive=True): + layers = [] + if include_self: + layers.append(self) + seen_object_ids = set() + deque = collections.deque(self._layers) + while deque: + layer = deque.popleft() + if id(layer) in seen_object_ids: + continue + seen_object_ids.add(id(layer)) + layers.append(layer) + # Introspect recursively through sublayers. + if recursive: + deque.extendleft(layer._layers) + return layers + + def _set_mask_metadata(self, inputs, outputs, previous_mask): + flat_outputs = tree.flatten(outputs) + + mask_already_computed = all( + backend.get_keras_mask(x) is not None for x in flat_outputs + ) + if mask_already_computed: + return + + output_masks = self.compute_mask(inputs, previous_mask) + if output_masks is None: + return + + flat_masks = tree.flatten(output_masks) + for tensor, mask in zip(flat_outputs, flat_masks): + if backend.get_keras_mask(tensor) is None and mask is not None: + if backend.backend() == "numpy": + warnings.warn( + "The NumPy backend does not support masking at this" + "time. Masks will be ignored." + ) + else: + backend.set_keras_mask(tensor, mask) + + @python_utils.default + def get_config(self): + self._check_super_called() + base_config = super().get_config() + config = { + "trainable": self.trainable, + "dtype": dtype_policies.serialize(self.dtype_policy), + } + if self.activity_regularizer is not None: + config["activity_regularizer"] = regularizers.serialize( + self.activity_regularizer + ) + return {**base_config, **config} + + def _open_name_scope(self): + if self._parent_path is None: + self._parent_path = current_path() + return backend.name_scope(self.name, caller=self) + + +def is_backend_tensor_or_symbolic(x, allow_none=False): + if allow_none and x is None: + return True + return backend.is_tensor(x) or isinstance(x, backend.KerasTensor) + + +class CallSpec: + def __init__(self, signature, args, kwargs): + # `training` and `mask` are special kwargs that are always available in + # a layer, if user specifies them in their call without adding to spec, + # we remove them to be able to bind variables. User is not using + # `training` anyway so we can ignore. + # TODO: If necessary use workaround for `mask` + if "training" in kwargs and "training" not in signature.parameters: + kwargs.pop("training") + bound_args = signature.bind(*args, **kwargs) + else: + bound_args = signature.bind(*args, **kwargs) + self.user_arguments_dict = { + k: v for k, v in bound_args.arguments.items() + } + bound_args.apply_defaults() + arg_dict = {} + arg_names = [] + tensor_arg_dict = {} + tensor_args = [] + tensor_arg_names = [] + nested_tensor_arg_names = [] + for name, value in bound_args.arguments.items(): + arg_dict[name] = value + arg_names.append(name) + if is_backend_tensor_or_symbolic(value): + tensor_args.append(value) + tensor_arg_names.append(name) + tensor_arg_dict[name] = value + elif tree.is_nested(value) and len(value) > 0: + flat_values = tree.flatten(value) + if all( + is_backend_tensor_or_symbolic(x, allow_none=True) + for x in flat_values + ): + tensor_args.append(value) + tensor_arg_names.append(name) + tensor_arg_dict[name] = value + nested_tensor_arg_names.append(name) + elif any(is_backend_tensor_or_symbolic(x) for x in flat_values): + raise ValueError( + "In a nested call() argument, " + "you cannot mix tensors and non-tensors. " + "Received invalid mixed argument: " + f"{name}={value}" + ) + self.arguments_dict = arg_dict + self.argument_names = arg_names + self.tensor_arguments_dict = tensor_arg_dict + self.tensor_arguments_names = tensor_arg_names + self.nested_tensor_argument_names = nested_tensor_arg_names + self.first_arg = arg_dict[arg_names[0]] + if all( + backend.is_tensor(x) for x in self.tensor_arguments_dict.values() + ): + self.eager = True + else: + self.eager = False + + +def get_arguments_dict(fn, args, kwargs): + """Return a dict mapping argument names to their values.""" + sig = inspect.signature(fn) + bound_args = sig.bind(*args, **kwargs) + arg_dict = {} + for name, value in bound_args.arguments.items(): + arg_dict[name] = value + return arg_dict + + +def get_shapes_dict(call_spec): + """Convert the call() arguments dict into a dict of input shape arguments. + + Example: + + ``` + >>> get_shapes_dict(call_spec) + {"input_a_shape": (2, 3)} + ``` + """ + shapes_dict = {} + for k, v in call_spec.tensor_arguments_dict.items(): + if k == "mask" or k.endswith("_mask"): + # Do not include mask tensors in shapes dict + continue + if k == "kwargs" or k == "args": + # Do not include catch-alls in shapes dict + continue + if k in call_spec.nested_tensor_argument_names: + shapes_dict[f"{k}_shape"] = tree.map_structure( + lambda x: backend.standardize_shape(x.shape), v + ) + else: + shapes_dict[f"{k}_shape"] = backend.standardize_shape(v.shape) + return shapes_dict + + +def update_shapes_dict_for_target_fn( + target_fn, + shapes_dict, + call_spec, + class_name, +): + """Updates a `shapes_dict` for `build()` or `compute_output_shape()`. + + This function will align a dictionary of the shapes of all tensor + passed to `call`, with the signatures of `build()` or + `compute_output_shape()`. + + The alignment is a follows: + + - If `build()` or `compute_output_shape()` accept only one argument, + forward the shape of the first positional argument from call without + checking any argument names. + - If `build()` or `compute_output_shape()` accept multiple arguments, + enforce that all argument names match a call argument name, e.g. + `foo_shape` would match call argument `foo`. + + Returns: + An updated `shapes_dict` that can be used to invoke + `target_fn(**shapes_dict)`. + """ + if utils.is_default(target_fn): + return None + sig = inspect.signature(target_fn) + expected_names = [] + for name, param in sig.parameters.items(): + if param.kind in ( + param.POSITIONAL_OR_KEYWORD, + param.POSITIONAL_ONLY, + param.KEYWORD_ONLY, + ): + expected_names.append(name) + + # Single arg: don't check names, pass first shape. + if len(expected_names) == 1: + key = expected_names[0] + values = tuple(shapes_dict.values()) + if values: + input_shape = values[0] + else: + input_shape = None + return {key: input_shape} + + # Multiple args: check that all names line up. + kwargs = {} + for name in expected_names: + method_name = target_fn.__name__ + error_preamble = ( + f"For a `{method_name}()` method with more than one argument, all " + "arguments should have a `_shape` suffix and match an argument " + f"from `call()`. E.g. `{method_name}(self, foo_shape, bar_shape)` " + ) + if not name.endswith("_shape"): + raise ValueError( + f"{error_preamble} For layer '{class_name}', " + f"Received `{method_name}()` argument " + f"`{name}`, which does not end in `_shape`." + ) + expected_call_arg = utils.removesuffix(name, "_shape") + if expected_call_arg not in call_spec.arguments_dict: + raise ValueError( + f"{error_preamble} For layer '{class_name}', " + f"received `{method_name}()` argument " + f"`{name}`, but `call()` does not have argument " + f"`{expected_call_arg}`." + ) + if name in shapes_dict: + kwargs[name] = shapes_dict[name] + + return kwargs + + +class CallContext: + def __init__(self, entry_layer): + self.entry_layer = entry_layer + self.training = None + + +def is_shape_tuple(s): + return isinstance(s, (list, tuple)) and all( + d is None or isinstance(d, int) for d in s + ) + + +def might_have_unbuilt_state(layer): + return any(not lr.built for lr in layer._layers) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/__pycache__/gaussian_dropout.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/__pycache__/gaussian_dropout.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d736ac83e0e5e3b3bdd18b684ee98dd80990009 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/__pycache__/gaussian_dropout.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/__pycache__/gaussian_noise.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/__pycache__/gaussian_noise.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f01b39550a4cf2bb8c84480775ba333e1c351b1 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/__pycache__/gaussian_noise.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/__pycache__/spatial_dropout.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/__pycache__/spatial_dropout.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..affd0315e97e70e824a9ccf2919a645702c995e8 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/__pycache__/spatial_dropout.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/activity_regularization.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/activity_regularization.py new file mode 100644 index 0000000000000000000000000000000000000000..ecd796efa29f47339a3f0a71e0556c0237855d6c --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/activity_regularization.py @@ -0,0 +1,42 @@ +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer + + +@keras_export("keras.layers.ActivityRegularization") +class ActivityRegularization(Layer): + """Layer that applies an update to the cost function based input activity. + + Args: + l1: L1 regularization factor (positive float). + l2: L2 regularization factor (positive float). + + Input shape: + Arbitrary. Use the keyword argument `input_shape` + (tuple of integers, does not include the samples axis) + when using this layer as the first layer in a model. + + Output shape: + Same shape as input. + """ + + def __init__(self, l1=0.0, l2=0.0, **kwargs): + super().__init__( + activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs + ) + self.supports_masking = True + self.l1 = l1 + self.l2 = l2 + self.built = True + + def call(self, inputs): + return inputs + + def compute_output_shape(self, input_shape): + return input_shape + + def get_config(self): + base_config = super().get_config() + base_config.pop("activity_regularizer", None) + config = {"l1": self.l1, "l2": self.l2} + return {**base_config, **config} diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/alpha_dropout.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/alpha_dropout.py new file mode 100644 index 0000000000000000000000000000000000000000..5036efc43ee964e7617af3578e3c2c3a0c4b14d1 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/alpha_dropout.py @@ -0,0 +1,98 @@ +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer + + +@keras_export("keras.layers.AlphaDropout") +class AlphaDropout(Layer): + """Applies Alpha Dropout to the input. + + Alpha Dropout is a `Dropout` that keeps mean and variance of inputs + to their original values, in order to ensure the self-normalizing property + even after this dropout. + Alpha Dropout fits well to Scaled Exponential Linear Units (SELU) by + randomly setting activations to the negative saturation value. + + Args: + rate: Float between 0 and 1. The multiplicative noise will have + standard deviation `sqrt(rate / (1 - rate))`. + noise_shape: 1D integer tensor representing the shape of the + binary alpha dropout mask that will be multiplied with the input. + For instance, if your inputs have shape + `(batch_size, timesteps, features)` and + you want the alpha dropout mask to be the same for all timesteps, + you can use `noise_shape=(batch_size, 1, features)`. + seed: A Python integer to use as random seed. + + Call arguments: + inputs: Input tensor (of any rank). + training: Python boolean indicating whether the layer should behave in + training mode (adding alpha dropout) or in inference mode + (doing nothing). + """ + + def __init__(self, rate, noise_shape=None, seed=None, **kwargs): + super().__init__(**kwargs) + if not 0 <= rate <= 1: + raise ValueError( + f"Invalid value received for argument " + "`rate`. Expected a float value between 0 and 1. " + f"Received: rate={rate}" + ) + self.rate = rate + self.seed = seed + self.noise_shape = noise_shape + if rate > 0: + self.seed_generator = backend.random.SeedGenerator(seed) + self.supports_masking = True + self.built = True + + def call(self, inputs, training=False): + if training and self.rate > 0: + noise_shape = self._get_concrete_noise_shape( + inputs, self.noise_shape + ) + alpha = 1.6732632423543772848170429916717 + scale = 1.0507009873554804934193349852946 + alpha_p = -alpha * scale + + kept_idx = ops.greater_equal( + ops.random.uniform(noise_shape, seed=self.seed_generator), + self.rate, + ) + kept_idx = ops.cast(kept_idx, inputs.dtype) + + # Compute affine transformation parameters + a = ((1 - self.rate) * (1 + self.rate * alpha_p**2)) ** -0.5 + b = -a * alpha_p * self.rate + + # Apply mask + x = inputs * kept_idx + alpha_p * (1 - kept_idx) + return a * x + b + + return inputs + + def compute_output_shape(self, input_shape): + return input_shape + + def _get_concrete_noise_shape(self, inputs, noise_shape): + if noise_shape is None: + return ops.shape(inputs) + + concrete_inputs_shape = ops.shape(inputs) + concrete_noise_shape = [] + for i, value in enumerate(noise_shape): + concrete_noise_shape.append( + concrete_inputs_shape[i] if value is None else value + ) + return concrete_noise_shape + + def get_config(self): + base_config = super().get_config() + config = { + "rate": self.rate, + "seed": self.seed, + "noise_shape": self.noise_shape, + } + return {**base_config, **config} diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/dropout.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/dropout.py new file mode 100644 index 0000000000000000000000000000000000000000..46a5cac5bbb0a753faebc666d1aa656be50a8dfd --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/dropout.py @@ -0,0 +1,77 @@ +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer + + +@keras_export("keras.layers.Dropout") +class Dropout(Layer): + """Applies dropout to the input. + + The `Dropout` layer randomly sets input units to 0 with a frequency of + `rate` at each step during training time, which helps prevent overfitting. + Inputs not set to 0 are scaled up by `1 / (1 - rate)` such that the sum over + all inputs is unchanged. + + Note that the `Dropout` layer only applies when `training` is set to `True` + in `call()`, such that no values are dropped during inference. + When using `model.fit`, `training` will be appropriately set to `True` + automatically. In other contexts, you can set the argument explicitly + to `True` when calling the layer. + + (This is in contrast to setting `trainable=False` for a `Dropout` layer. + `trainable` does not affect the layer's behavior, as `Dropout` does + not have any variables/weights that can be frozen during training.) + + Args: + rate: Float between 0 and 1. Fraction of the input units to drop. + noise_shape: 1D integer tensor representing the shape of the + binary dropout mask that will be multiplied with the input. + For instance, if your inputs have shape + `(batch_size, timesteps, features)` and + you want the dropout mask to be the same for all timesteps, + you can use `noise_shape=(batch_size, 1, features)`. + seed: A Python integer to use as random seed. + + Call arguments: + inputs: Input tensor (of any rank). + training: Python boolean indicating whether the layer should behave in + training mode (adding dropout) or in inference mode (doing nothing). + """ + + def __init__(self, rate, noise_shape=None, seed=None, **kwargs): + super().__init__(**kwargs) + if not 0 <= rate <= 1: + raise ValueError( + f"Invalid value received for argument " + "`rate`. Expected a float value between 0 and 1. " + f"Received: rate={rate}" + ) + self.rate = rate + self.seed = seed + self.noise_shape = noise_shape + if rate > 0: + self.seed_generator = backend.random.SeedGenerator(seed) + self.supports_masking = True + self.built = True + + def call(self, inputs, training=False): + if training and self.rate > 0: + return backend.random.dropout( + inputs, + self.rate, + noise_shape=self.noise_shape, + seed=self.seed_generator, + ) + return inputs + + def compute_output_shape(self, input_shape): + return input_shape + + def get_config(self): + base_config = super().get_config() + config = { + "rate": self.rate, + "seed": self.seed, + "noise_shape": self.noise_shape, + } + return {**base_config, **config} diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/gaussian_dropout.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/gaussian_dropout.py new file mode 100644 index 0000000000000000000000000000000000000000..6945a64e22c5619a7b5ef488f94de55fef0eb2a4 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/gaussian_dropout.py @@ -0,0 +1,63 @@ +import math + +from keras.src import backend +from keras.src import layers +from keras.src import ops +from keras.src.api_export import keras_export + + +@keras_export("keras.layers.GaussianDropout") +class GaussianDropout(layers.Layer): + """Apply multiplicative 1-centered Gaussian noise. + + As it is a regularization layer, it is only active at training time. + + Args: + rate: Float, drop probability (as with `Dropout`). + The multiplicative noise will have + standard deviation `sqrt(rate / (1 - rate))`. + seed: Integer, optional random seed to enable deterministic behavior. + + Call arguments: + inputs: Input tensor (of any rank). + training: Python boolean indicating whether the layer should behave in + training mode (adding dropout) or in inference mode (doing nothing). + """ + + def __init__(self, rate, seed=None, **kwargs): + super().__init__(**kwargs) + if not 0 <= rate <= 1: + raise ValueError( + f"Invalid value received for argument " + "`rate`. Expected a float value between 0 and 1. " + f"Received: rate={rate}" + ) + self.rate = rate + self.seed = seed + if rate > 0: + self.seed_generator = backend.random.SeedGenerator(seed) + self.supports_masking = True + self.built = True + + def call(self, inputs, training=False): + if training and self.rate > 0: + stddev = math.sqrt(self.rate / (1.0 - self.rate)) + return inputs * backend.random.normal( + shape=ops.shape(inputs), + mean=1.0, + stddev=stddev, + dtype=self.compute_dtype, + seed=self.seed_generator, + ) + return inputs + + def compute_output_shape(self, input_shape): + return input_shape + + def get_config(self): + base_config = super().get_config() + config = { + "rate": self.rate, + "seed": self.seed, + } + return {**base_config, **config} diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/gaussian_noise.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/gaussian_noise.py new file mode 100644 index 0000000000000000000000000000000000000000..5c0bd2dcb381ce1ae44cd4ff513195a1fb71a607 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/gaussian_noise.py @@ -0,0 +1,63 @@ +from keras.src import backend +from keras.src import layers +from keras.src import ops +from keras.src.api_export import keras_export + + +@keras_export("keras.layers.GaussianNoise") +class GaussianNoise(layers.Layer): + """Apply additive zero-centered Gaussian noise. + + This is useful to mitigate overfitting + (you could see it as a form of random data augmentation). + Gaussian Noise (GS) is a natural choice as corruption process + for real valued inputs. + + As it is a regularization layer, it is only active at training time. + + Args: + stddev: Float, standard deviation of the noise distribution. + seed: Integer, optional random seed to enable deterministic behavior. + + Call arguments: + inputs: Input tensor (of any rank). + training: Python boolean indicating whether the layer should behave in + training mode (adding noise) or in inference mode (doing nothing). + """ + + def __init__(self, stddev, seed=None, **kwargs): + super().__init__(**kwargs) + if not 0 <= stddev <= 1: + raise ValueError( + f"Invalid value received for argument " + "`stddev`. Expected a float value between 0 and 1. " + f"Received: stddev={stddev}" + ) + self.stddev = stddev + self.seed = seed + if stddev > 0: + self.seed_generator = backend.random.SeedGenerator(seed) + self.supports_masking = True + self.built = True + + def call(self, inputs, training=False): + if training and self.stddev > 0: + return inputs + backend.random.normal( + shape=ops.shape(inputs), + mean=0.0, + stddev=self.stddev, + dtype=self.compute_dtype, + seed=self.seed_generator, + ) + return inputs + + def compute_output_shape(self, input_shape): + return input_shape + + def get_config(self): + base_config = super().get_config() + config = { + "stddev": self.stddev, + "seed": self.seed, + } + return {**base_config, **config} diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/spatial_dropout.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/spatial_dropout.py new file mode 100644 index 0000000000000000000000000000000000000000..5f440164f40d4aee3af0d75250548f504c358446 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/regularization/spatial_dropout.py @@ -0,0 +1,192 @@ +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.regularization.dropout import Dropout + + +class BaseSpatialDropout(Dropout): + def __init__(self, rate, seed=None, name=None, dtype=None): + super().__init__(rate, seed=seed, name=name, dtype=dtype) + + def call(self, inputs, training=False): + if training and self.rate > 0: + return backend.random.dropout( + inputs, + self.rate, + noise_shape=self._get_noise_shape(inputs), + seed=self.seed_generator, + ) + return inputs + + def get_config(self): + return { + "rate": self.rate, + "seed": self.seed, + "name": self.name, + "dtype": self.dtype, + } + + +@keras_export("keras.layers.SpatialDropout1D") +class SpatialDropout1D(BaseSpatialDropout): + """Spatial 1D version of Dropout. + + This layer performs the same function as Dropout, however, it drops + entire 1D feature maps instead of individual elements. If adjacent frames + within feature maps are strongly correlated (as is normally the case in + early convolution layers) then regular dropout will not regularize the + activations and will otherwise just result in an effective learning rate + decrease. In this case, `SpatialDropout1D` will help promote independence + between feature maps and should be used instead. + + Args: + rate: Float between 0 and 1. Fraction of the input units to drop. + + Call arguments: + inputs: A 3D tensor. + training: Python boolean indicating whether the layer + should behave in training mode (applying dropout) + or in inference mode (pass-through). + + Input shape: + 3D tensor with shape: `(samples, timesteps, channels)` + + Output shape: Same as input. + + Reference: + + - [Tompson et al., 2014](https://arxiv.org/abs/1411.4280) + """ + + def __init__(self, rate, seed=None, name=None, dtype=None): + super().__init__(rate, seed=seed, name=name, dtype=dtype) + self.input_spec = InputSpec(ndim=3) + + def _get_noise_shape(self, inputs): + input_shape = ops.shape(inputs) + return (input_shape[0], 1, input_shape[2]) + + +@keras_export("keras.layers.SpatialDropout2D") +class SpatialDropout2D(BaseSpatialDropout): + """Spatial 2D version of Dropout. + + This version performs the same function as Dropout, however, it drops + entire 2D feature maps instead of individual elements. If adjacent pixels + within feature maps are strongly correlated (as is normally the case in + early convolution layers) then regular dropout will not regularize the + activations and will otherwise just result in an effective learning rate + decrease. In this case, `SpatialDropout2D` will help promote independence + between feature maps and should be used instead. + + Args: + rate: Float between 0 and 1. Fraction of the input units to drop. + data_format: `"channels_first"` or `"channels_last"`. + In `"channels_first"` mode, the channels dimension (the depth) + is at index 1, in `"channels_last"` mode is it at index 3. + It defaults to the `image_data_format` value found in your + Keras config file at `~/.keras/keras.json`. + If you never set it, then it will be `"channels_last"`. + + Call arguments: + inputs: A 4D tensor. + training: Python boolean indicating whether the layer + should behave in training mode (applying dropout) + or in inference mode (pass-through). + + Input shape: + 4D tensor with shape: `(samples, channels, rows, cols)` if + data_format='channels_first' + or 4D tensor with shape: `(samples, rows, cols, channels)` if + data_format='channels_last'. + + Output shape: Same as input. + + Reference: + + - [Tompson et al., 2014](https://arxiv.org/abs/1411.4280) + """ + + def __init__( + self, rate, data_format=None, seed=None, name=None, dtype=None + ): + super().__init__(rate, seed=seed, name=name, dtype=dtype) + self.data_format = backend.standardize_data_format(data_format) + self.input_spec = InputSpec(ndim=4) + + def _get_noise_shape(self, inputs): + input_shape = ops.shape(inputs) + if self.data_format == "channels_first": + return (input_shape[0], input_shape[1], 1, 1) + elif self.data_format == "channels_last": + return (input_shape[0], 1, 1, input_shape[3]) + + def get_config(self): + base_config = super().get_config() + config = { + "data_format": self.data_format, + } + return {**base_config, **config} + + +@keras_export("keras.layers.SpatialDropout3D") +class SpatialDropout3D(BaseSpatialDropout): + """Spatial 3D version of Dropout. + + This version performs the same function as Dropout, however, it drops + entire 3D feature maps instead of individual elements. If adjacent voxels + within feature maps are strongly correlated (as is normally the case in + early convolution layers) then regular dropout will not regularize the + activations and will otherwise just result in an effective learning rate + decrease. In this case, SpatialDropout3D will help promote independence + between feature maps and should be used instead. + + Args: + rate: Float between 0 and 1. Fraction of the input units to drop. + data_format: `"channels_first"` or `"channels_last"`. + In `"channels_first"` mode, the channels dimension (the depth) + is at index 1, in `"channels_last"` mode is it at index 4. + It defaults to the `image_data_format` value found in your + Keras config file at `~/.keras/keras.json`. + If you never set it, then it will be `"channels_last"`. + + Call arguments: + inputs: A 5D tensor. + training: Python boolean indicating whether the layer + should behave in training mode (applying dropout) + or in inference mode (pass-through). + + Input shape: + 5D tensor with shape: `(samples, channels, dim1, dim2, dim3)` if + data_format='channels_first' + or 5D tensor with shape: `(samples, dim1, dim2, dim3, channels)` if + data_format='channels_last'. + + Output shape: Same as input. + + Reference: + + - [Tompson et al., 2014](https://arxiv.org/abs/1411.4280) + """ + + def __init__( + self, rate, data_format=None, seed=None, name=None, dtype=None + ): + super().__init__(rate, seed=seed, name=name, dtype=dtype) + self.data_format = backend.standardize_data_format(data_format) + self.input_spec = InputSpec(ndim=5) + + def _get_noise_shape(self, inputs): + input_shape = ops.shape(inputs) + if self.data_format == "channels_first": + return (input_shape[0], input_shape[1], 1, 1, 1) + elif self.data_format == "channels_last": + return (input_shape[0], 1, 1, 1, input_shape[4]) + + def get_config(self): + base_config = super().get_config() + config = { + "data_format": self.data_format, + } + return {**base_config, **config} diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f17b80d096d1c1d4704be11e5b2ff65ab6aa95b Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/cropping1d.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/cropping1d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1d125288b5f0a2a34b0a80b4bb1292d4b589eba Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/cropping1d.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/cropping2d.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/cropping2d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd2a2a9df0424ba206367721a01f7f89eb46117e Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/cropping2d.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/cropping3d.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/cropping3d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1ed92b9cc0ba720dedde9cb9dcd6c1a1acff7a1 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/cropping3d.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/flatten.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/flatten.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa8fc7481d0d727ca4ab8ad8961018bcf20f5107 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/flatten.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/permute.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/permute.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3c1d88c7da117b63b1c0aac6381e6ce5f625430 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/permute.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/repeat_vector.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/repeat_vector.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41fa2f9e2835c74562886841730c8122e62bf951 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/repeat_vector.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/reshape.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/reshape.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f98f0f8262236be38e69ea88e73365c57049702 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/reshape.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/up_sampling1d.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/up_sampling1d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70e45f416c34b9294d63fdbbc6500b796d451414 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/up_sampling1d.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/up_sampling2d.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/up_sampling2d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e823e098bf895763a93d646b6dd27559f0afccb Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/up_sampling2d.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/up_sampling3d.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/up_sampling3d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d07e72321040fb8a5cf681228a89677a5705032 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/up_sampling3d.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/zero_padding1d.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/zero_padding1d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e635236b575fa0092bca601eac56c83a356e0fd Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/zero_padding1d.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/zero_padding2d.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/zero_padding2d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67e40d0dfdac8d4662e01def7bcb7aacbfafdb66 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/zero_padding2d.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/zero_padding3d.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/zero_padding3d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11a0b582b966db09cb7156d060856ba2ac31c2c9 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/__pycache__/zero_padding3d.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/cropping1d.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/cropping1d.py new file mode 100644 index 0000000000000000000000000000000000000000..abce618dff658b48a0218bfef3ce90682fe1b1c7 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/cropping1d.py @@ -0,0 +1,82 @@ +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation + + +@keras_export("keras.layers.Cropping1D") +class Cropping1D(Layer): + """Cropping layer for 1D input (e.g. temporal sequence). + + It crops along the time dimension (axis 1). + + Example: + + >>> input_shape = (2, 3, 2) + >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) + >>> x + [[[ 0 1] + [ 2 3] + [ 4 5]] + [[ 6 7] + [ 8 9] + [10 11]]] + >>> y = keras.layers.Cropping1D(cropping=1)(x) + >>> y + [[[2 3]] + [[8 9]]] + + Args: + cropping: Int, or tuple of int (length 2), or dictionary. + - If int: how many units should be trimmed off at the beginning and + end of the cropping dimension (axis 1). + - If tuple of 2 ints: how many units should be trimmed off at the + beginning and end of the cropping dimension + (`(left_crop, right_crop)`). + + Input shape: + 3D tensor with shape `(batch_size, axis_to_crop, features)` + + Output shape: + 3D tensor with shape `(batch_size, cropped_axis, features)` + """ + + def __init__(self, cropping=(1, 1), **kwargs): + super().__init__(**kwargs) + self.cropping = argument_validation.standardize_tuple( + cropping, 2, "cropping", allow_zero=True + ) + self.input_spec = InputSpec(ndim=3) + + def compute_output_shape(self, input_shape): + if input_shape[1] is not None: + length = input_shape[1] - self.cropping[0] - self.cropping[1] + if length <= 0: + raise ValueError( + "`cropping` parameter of `Cropping1D` layer must be " + "smaller than the input length. Received: input_shape=" + f"{input_shape}, cropping={self.cropping}" + ) + else: + length = None + return (input_shape[0], length, input_shape[2]) + + def call(self, inputs): + if ( + inputs.shape[1] is not None + and sum(self.cropping) >= inputs.shape[1] + ): + raise ValueError( + "`cropping` parameter of `Cropping1D` layer must be " + "smaller than the input length. Received: inputs.shape=" + f"{inputs.shape}, cropping={self.cropping}" + ) + if self.cropping[1] == 0: + return inputs[:, self.cropping[0] :, :] + else: + return inputs[:, self.cropping[0] : -self.cropping[1], :] + + def get_config(self): + config = {"cropping": self.cropping} + base_config = super().get_config() + return {**base_config, **config} diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/cropping2d.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/cropping2d.py new file mode 100644 index 0000000000000000000000000000000000000000..aec6813a861fd294796d90a6bad86e658939c037 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/cropping2d.py @@ -0,0 +1,224 @@ +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation + + +@keras_export("keras.layers.Cropping2D") +class Cropping2D(Layer): + """Cropping layer for 2D input (e.g. picture). + + It crops along spatial dimensions, i.e. height and width. + + Example: + + >>> input_shape = (2, 28, 28, 3) + >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) + >>> y = keras.layers.Cropping2D(cropping=((2, 2), (4, 4)))(x) + >>> y.shape + (2, 24, 20, 3) + + Args: + cropping: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. + - If int: the same symmetric cropping is applied to height and + width. + - If tuple of 2 ints: interpreted as two different symmetric + cropping values for height and width: + `(symmetric_height_crop, symmetric_width_crop)`. + - If tuple of 2 tuples of 2 ints: interpreted as + `((top_crop, bottom_crop), (left_crop, right_crop))`. + data_format: A string, one of `"channels_last"` (default) or + `"channels_first"`. The ordering of the dimensions in the inputs. + `"channels_last"` corresponds to inputs with shape + `(batch_size, height, width, channels)` while `"channels_first"` + corresponds to inputs with shape + `(batch_size, channels, height, width)`. + When unspecified, uses `image_data_format` value found in your Keras + config file at `~/.keras/keras.json` (if exists). Defaults to + `"channels_last"`. + + Input shape: + 4D tensor with shape: + - If `data_format` is `"channels_last"`: + `(batch_size, height, width, channels)` + - If `data_format` is `"channels_first"`: + `(batch_size, channels, height, width)` + + Output shape: + 4D tensor with shape: + - If `data_format` is `"channels_last"`: + `(batch_size, cropped_height, cropped_width, channels)` + - If `data_format` is `"channels_first"`: + `(batch_size, channels, cropped_height, cropped_width)` + """ + + def __init__(self, cropping=((0, 0), (0, 0)), data_format=None, **kwargs): + super().__init__(**kwargs) + self.data_format = backend.standardize_data_format(data_format) + if isinstance(cropping, int): + if cropping < 0: + raise ValueError( + "`cropping` cannot be negative. " + f"Received: cropping={cropping}." + ) + self.cropping = ((cropping, cropping), (cropping, cropping)) + elif hasattr(cropping, "__len__"): + if len(cropping) != 2: + raise ValueError( + "`cropping` should have two elements. " + f"Received: cropping={cropping}." + ) + height_cropping = argument_validation.standardize_tuple( + cropping[0], 2, "1st entry of cropping", allow_zero=True + ) + width_cropping = argument_validation.standardize_tuple( + cropping[1], 2, "2nd entry of cropping", allow_zero=True + ) + self.cropping = (height_cropping, width_cropping) + else: + raise ValueError( + "`cropping` should be either an int, a tuple of 2 ints " + "(symmetric_height_crop, symmetric_width_crop), " + "or a tuple of 2 tuples of 2 ints " + "((top_crop, bottom_crop), (left_crop, right_crop)). " + f"Received: cropping={cropping}." + ) + self.input_spec = InputSpec(ndim=4) + + def compute_output_shape(self, input_shape): + if self.data_format == "channels_first": + if ( + input_shape[2] is not None + and sum(self.cropping[0]) >= input_shape[2] + ) or ( + input_shape[3] is not None + and sum(self.cropping[1]) >= input_shape[3] + ): + raise ValueError( + "Values in `cropping` argument should be smaller than the " + "corresponding spatial dimension of the input. Received: " + f"input_shape={input_shape}, cropping={self.cropping}" + ) + return ( + input_shape[0], + input_shape[1], + ( + input_shape[2] - self.cropping[0][0] - self.cropping[0][1] + if input_shape[2] is not None + else None + ), + ( + input_shape[3] - self.cropping[1][0] - self.cropping[1][1] + if input_shape[3] is not None + else None + ), + ) + else: + if ( + input_shape[1] is not None + and sum(self.cropping[0]) >= input_shape[1] + ) or ( + input_shape[2] is not None + and sum(self.cropping[1]) >= input_shape[2] + ): + raise ValueError( + "Values in `cropping` argument should be smaller than the " + "corresponding spatial dimension of the input. Received: " + f"input_shape={input_shape}, cropping={self.cropping}" + ) + return ( + input_shape[0], + ( + input_shape[1] - self.cropping[0][0] - self.cropping[0][1] + if input_shape[1] is not None + else None + ), + ( + input_shape[2] - self.cropping[1][0] - self.cropping[1][1] + if input_shape[2] is not None + else None + ), + input_shape[3], + ) + + def call(self, inputs): + if self.data_format == "channels_first": + if ( + inputs.shape[2] is not None + and sum(self.cropping[0]) >= inputs.shape[2] + ) or ( + inputs.shape[3] is not None + and sum(self.cropping[1]) >= inputs.shape[3] + ): + raise ValueError( + "Values in `cropping` argument should be smaller than the " + "corresponding spatial dimension of the input. Received: " + f"inputs.shape={inputs.shape}, cropping={self.cropping}" + ) + if self.cropping[0][1] == self.cropping[1][1] == 0: + return inputs[ + :, :, self.cropping[0][0] :, self.cropping[1][0] : + ] + elif self.cropping[0][1] == 0: + return inputs[ + :, + :, + self.cropping[0][0] :, + self.cropping[1][0] : -self.cropping[1][1], + ] + elif self.cropping[1][1] == 0: + return inputs[ + :, + :, + self.cropping[0][0] : -self.cropping[0][1], + self.cropping[1][0] :, + ] + return inputs[ + :, + :, + self.cropping[0][0] : -self.cropping[0][1], + self.cropping[1][0] : -self.cropping[1][1], + ] + else: + if ( + inputs.shape[1] is not None + and sum(self.cropping[0]) >= inputs.shape[1] + ) or ( + inputs.shape[2] is not None + and sum(self.cropping[1]) >= inputs.shape[2] + ): + raise ValueError( + "Values in `cropping` argument should be smaller than the " + "corresponding spatial dimension of the input. Received: " + f"inputs.shape={inputs.shape}, cropping={self.cropping}" + ) + if self.cropping[0][1] == self.cropping[1][1] == 0: + return inputs[ + :, self.cropping[0][0] :, self.cropping[1][0] :, : + ] + elif self.cropping[0][1] == 0: + return inputs[ + :, + self.cropping[0][0] :, + self.cropping[1][0] : -self.cropping[1][1], + :, + ] + elif self.cropping[1][1] == 0: + return inputs[ + :, + self.cropping[0][0] : -self.cropping[0][1], + self.cropping[1][0] :, + :, + ] + return inputs[ + :, + self.cropping[0][0] : -self.cropping[0][1], + self.cropping[1][0] : -self.cropping[1][1], + :, + ] + + def get_config(self): + config = {"cropping": self.cropping, "data_format": self.data_format} + base_config = super().get_config() + return {**base_config, **config} diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/cropping3d.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/cropping3d.py new file mode 100644 index 0000000000000000000000000000000000000000..724d0cf72635ee94f10d88761a6bee3179d3a963 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/cropping3d.py @@ -0,0 +1,284 @@ +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation + + +@keras_export("keras.layers.Cropping3D") +class Cropping3D(Layer): + """Cropping layer for 3D data (e.g. spatial or spatio-temporal). + + Example: + + >>> input_shape = (2, 28, 28, 10, 3) + >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) + >>> y = keras.layers.Cropping3D(cropping=(2, 4, 2))(x) + >>> y.shape + (2, 24, 20, 6, 3) + + Args: + cropping: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints. + - If int: the same symmetric cropping is applied to depth, height, + and width. + - If tuple of 3 ints: interpreted as three different symmetric + cropping values for depth, height, and width: + `(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`. + - If tuple of 3 tuples of 2 ints: interpreted as + `((left_dim1_crop, right_dim1_crop), (left_dim2_crop, + right_dim2_crop), (left_dim3_crop, right_dim3_crop))`. + data_format: A string, one of `"channels_last"` (default) or + `"channels_first"`. The ordering of the dimensions in the inputs. + `"channels_last"` corresponds to inputs with shape + `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` + while `"channels_first"` corresponds to inputs with shape + `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. + When unspecified, uses `image_data_format` value found in your Keras + config file at `~/.keras/keras.json` (if exists). Defaults to + `"channels_last"`. + + Input shape: + 5D tensor with shape: + - If `data_format` is `"channels_last"`: + `(batch_size, first_axis_to_crop, second_axis_to_crop, + third_axis_to_crop, channels)` + - If `data_format` is `"channels_first"`: + `(batch_size, channels, first_axis_to_crop, second_axis_to_crop, + third_axis_to_crop)` + + Output shape: + 5D tensor with shape: + - If `data_format` is `"channels_last"`: + `(batch_size, first_cropped_axis, second_cropped_axis, + third_cropped_axis, channels)` + - If `data_format` is `"channels_first"`: + `(batch_size, channels, first_cropped_axis, second_cropped_axis, + third_cropped_axis)` + """ + + def __init__( + self, cropping=((1, 1), (1, 1), (1, 1)), data_format=None, **kwargs + ): + super().__init__(**kwargs) + self.data_format = backend.standardize_data_format(data_format) + if isinstance(cropping, int): + if cropping < 0: + raise ValueError( + "`cropping` cannot be negative. " + f"Received: cropping={cropping}." + ) + self.cropping = ( + (cropping, cropping), + (cropping, cropping), + (cropping, cropping), + ) + elif hasattr(cropping, "__len__"): + if len(cropping) != 3: + raise ValueError( + f"`cropping` should have 3 elements. Received: {cropping}." + ) + dim1_cropping = argument_validation.standardize_tuple( + cropping[0], 2, "1st entry of cropping", allow_zero=True + ) + dim2_cropping = argument_validation.standardize_tuple( + cropping[1], 2, "2nd entry of cropping", allow_zero=True + ) + dim3_cropping = argument_validation.standardize_tuple( + cropping[2], 2, "3rd entry of cropping", allow_zero=True + ) + self.cropping = (dim1_cropping, dim2_cropping, dim3_cropping) + else: + raise ValueError( + "`cropping` should be either an int, a tuple of 3 ints " + "(symmetric_dim1_crop, symmetric_dim2_crop, " + "symmetric_dim3_crop), " + "or a tuple of 3 tuples of 2 ints " + "((left_dim1_crop, right_dim1_crop)," + " (left_dim2_crop, right_dim2_crop)," + " (left_dim3_crop, right_dim2_crop)). " + f"Received: {cropping}." + ) + self.input_spec = InputSpec(ndim=5) + + def compute_output_shape(self, input_shape): + if self.data_format == "channels_first": + spatial_dims = list(input_shape[2:5]) + else: + spatial_dims = list(input_shape[1:4]) + + for index in range(0, 3): + if spatial_dims[index] is None: + continue + spatial_dims[index] -= sum(self.cropping[index]) + if spatial_dims[index] <= 0: + raise ValueError( + "Values in `cropping` argument should be smaller than the " + "corresponding spatial dimension of the input. Received: " + f"input_shape={input_shape}, cropping={self.cropping}" + ) + + if self.data_format == "channels_first": + return (input_shape[0], input_shape[1], *spatial_dims) + else: + return (input_shape[0], *spatial_dims, input_shape[4]) + + def call(self, inputs): + if self.data_format == "channels_first": + spatial_dims = list(inputs.shape[2:5]) + else: + spatial_dims = list(inputs.shape[1:4]) + + for index in range(0, 3): + if spatial_dims[index] is None: + continue + spatial_dims[index] -= sum(self.cropping[index]) + if spatial_dims[index] <= 0: + raise ValueError( + "Values in `cropping` argument should be smaller than the " + "corresponding spatial dimension of the input. Received: " + f"inputs.shape={inputs.shape}, cropping={self.cropping}" + ) + + if self.data_format == "channels_first": + if ( + self.cropping[0][1] + == self.cropping[1][1] + == self.cropping[2][1] + == 0 + ): + return inputs[ + :, + :, + self.cropping[0][0] :, + self.cropping[1][0] :, + self.cropping[2][0] :, + ] + elif self.cropping[0][1] == self.cropping[1][1] == 0: + return inputs[ + :, + :, + self.cropping[0][0] :, + self.cropping[1][0] :, + self.cropping[2][0] : -self.cropping[2][1], + ] + elif self.cropping[1][1] == self.cropping[2][1] == 0: + return inputs[ + :, + :, + self.cropping[0][0] : -self.cropping[0][1], + self.cropping[1][0] :, + self.cropping[2][0] :, + ] + elif self.cropping[0][1] == self.cropping[2][1] == 0: + return inputs[ + :, + :, + self.cropping[0][0] :, + self.cropping[1][0] : -self.cropping[1][1], + self.cropping[2][0] :, + ] + elif self.cropping[0][1] == 0: + return inputs[ + :, + :, + self.cropping[0][0] :, + self.cropping[1][0] : -self.cropping[1][1], + self.cropping[2][0] : -self.cropping[2][1], + ] + elif self.cropping[1][1] == 0: + return inputs[ + :, + :, + self.cropping[0][0] : -self.cropping[0][1], + self.cropping[1][0] :, + self.cropping[2][0] : -self.cropping[2][1], + ] + elif self.cropping[2][1] == 0: + return inputs[ + :, + :, + self.cropping[0][0] : -self.cropping[0][1], + self.cropping[1][0] : -self.cropping[1][1], + self.cropping[2][0] :, + ] + return inputs[ + :, + :, + self.cropping[0][0] : -self.cropping[0][1], + self.cropping[1][0] : -self.cropping[1][1], + self.cropping[2][0] : -self.cropping[2][1], + ] + else: + if ( + self.cropping[0][1] + == self.cropping[1][1] + == self.cropping[2][1] + == 0 + ): + return inputs[ + :, + self.cropping[0][0] :, + self.cropping[1][0] :, + self.cropping[2][0] :, + :, + ] + elif self.cropping[0][1] == self.cropping[1][1] == 0: + return inputs[ + :, + self.cropping[0][0] :, + self.cropping[1][0] :, + self.cropping[2][0] : -self.cropping[2][1], + :, + ] + elif self.cropping[1][1] == self.cropping[2][1] == 0: + return inputs[ + :, + self.cropping[0][0] : -self.cropping[0][1], + self.cropping[1][0] :, + self.cropping[2][0] :, + :, + ] + elif self.cropping[0][1] == self.cropping[2][1] == 0: + return inputs[ + :, + self.cropping[0][0] :, + self.cropping[1][0] : -self.cropping[1][1], + self.cropping[2][0] :, + :, + ] + elif self.cropping[0][1] == 0: + return inputs[ + :, + self.cropping[0][0] :, + self.cropping[1][0] : -self.cropping[1][1], + self.cropping[2][0] : -self.cropping[2][1], + :, + ] + elif self.cropping[1][1] == 0: + return inputs[ + :, + self.cropping[0][0] : -self.cropping[0][1], + self.cropping[1][0] :, + self.cropping[2][0] : -self.cropping[2][1], + :, + ] + elif self.cropping[2][1] == 0: + return inputs[ + :, + self.cropping[0][0] : -self.cropping[0][1], + self.cropping[1][0] : -self.cropping[1][1], + self.cropping[2][0] :, + :, + ] + return inputs[ + :, + self.cropping[0][0] : -self.cropping[0][1], + self.cropping[1][0] : -self.cropping[1][1], + self.cropping[2][0] : -self.cropping[2][1], + :, + ] + + def get_config(self): + config = {"cropping": self.cropping, "data_format": self.data_format} + base_config = super().get_config() + return {**base_config, **config} diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/flatten.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/flatten.py new file mode 100644 index 0000000000000000000000000000000000000000..84aad840246ca63a211309065dc6002ed0299e5d --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/flatten.py @@ -0,0 +1,80 @@ +import math + +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer + + +@keras_export("keras.layers.Flatten") +class Flatten(Layer): + """Flattens the input. Does not affect the batch size. + + Note: If inputs are shaped `(batch,)` without a feature axis, then + flattening adds an extra channel dimension and output shape is `(batch, 1)`. + + Args: + data_format: A string, one of `"channels_last"` (default) or + `"channels_first"`. The ordering of the dimensions in the inputs. + `"channels_last"` corresponds to inputs with shape + `(batch, ..., channels)` while `"channels_first"` corresponds to + inputs with shape `(batch, channels, ...)`. + When unspecified, uses `image_data_format` value found in your Keras + config file at `~/.keras/keras.json` (if exists). Defaults to + `"channels_last"`. + + Example: + + >>> x = keras.Input(shape=(10, 64)) + >>> y = keras.layers.Flatten()(x) + >>> y.shape + (None, 640) + """ + + def __init__(self, data_format=None, **kwargs): + super().__init__(**kwargs) + self.data_format = backend.standardize_data_format(data_format) + self.input_spec = InputSpec(min_ndim=1) + self._channels_first = self.data_format == "channels_first" + + def call(self, inputs): + input_shape = inputs.shape + rank = len(input_shape) + + if self._channels_first and rank > 1: + # Switch to channels-last format. + inputs = ops.transpose(inputs, axes=(0, *range(2, rank), 1)) + + output_shape = tuple( + dim if dim is not None else -1 + for dim in self.compute_output_shape(input_shape) + ) + return ops.reshape(inputs, output_shape) + + def compute_output_shape(self, input_shape): + non_batch_dims = input_shape[1:] + if len(non_batch_dims) == 0: + flattened_dim = 1 + elif any(d is None for d in non_batch_dims): + # NB: we cannot use the shorter `None in non_batch_dims` here b/c + # torchdynamo errors when calling `__contains__` op with + # a constant (in this case `None`) operand since it assumes + # that the elements in the collection are also `ConstantVariable`s + # but tensor shapes can be `SymNodeVariable`s (e.g. `SymInt`) + flattened_dim = None + else: + flattened_dim = math.prod(non_batch_dims) + return (input_shape[0], flattened_dim) + + def compute_output_spec(self, inputs): + output_shape = self.compute_output_shape(inputs.shape) + return KerasTensor( + shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse + ) + + def get_config(self): + config = {"data_format": self.data_format} + base_config = super().get_config() + return {**base_config, **config} diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/permute.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/permute.py new file mode 100644 index 0000000000000000000000000000000000000000..86580dfa08204c9ab865a3b4800e35ddd0ee324e --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/permute.py @@ -0,0 +1,64 @@ +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer + + +@keras_export("keras.layers.Permute") +class Permute(Layer): + """Permutes the dimensions of the input according to a given pattern. + + Useful e.g. connecting RNNs and convnets. + + Args: + dims: Tuple of integers. Permutation pattern does not include the + batch dimension. Indexing starts at 1. + For instance, `(1, 3, 2)` permutes the second and third dimensions + of the input. + + Input shape: + Arbitrary. + + Output shape: + Same as the input shape, but with the dimensions re-ordered according + to the specified pattern. + + Example: + + >>> x = keras.Input(shape=(10, 64)) + >>> y = keras.layers.Permute((2, 1))(x) + >>> y.shape + (None, 64, 10) + """ + + def __init__(self, dims, **kwargs): + super().__init__(**kwargs) + self.dims = tuple(dims) + if sorted(dims) != list(range(1, len(dims) + 1)): + raise ValueError( + "Invalid permutation argument `dims` for Permute Layer. " + "The set of indices in `dims` must be consecutive and start " + f"from 1. Received dims={dims}" + ) + self.input_spec = InputSpec(ndim=len(self.dims) + 1) + + def compute_output_shape(self, input_shape): + output_shape = [input_shape[0]] + for dim in self.dims: + output_shape.append(input_shape[dim]) + return tuple(output_shape) + + def compute_output_spec(self, inputs): + output_shape = self.compute_output_shape(inputs.shape) + return KerasTensor( + shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse + ) + + def call(self, inputs): + return ops.transpose(inputs, axes=(0,) + self.dims) + + def get_config(self): + config = {"dims": self.dims} + base_config = super().get_config() + return {**base_config, **config} diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/repeat_vector.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/repeat_vector.py new file mode 100644 index 0000000000000000000000000000000000000000..d8914d10fce77bd8fc0ab879d63a70bf92b1f867 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/repeat_vector.py @@ -0,0 +1,48 @@ +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer + + +@keras_export("keras.layers.RepeatVector") +class RepeatVector(Layer): + """Repeats the input n times. + + Example: + + >>> x = keras.Input(shape=(32,)) + >>> y = keras.layers.RepeatVector(3)(x) + >>> y.shape + (None, 3, 32) + + Args: + n: Integer, repetition factor. + + Input shape: + 2D tensor with shape `(batch_size, features)`. + + Output shape: + 3D tensor with shape `(batch_size, n, features)`. + """ + + def __init__(self, n, **kwargs): + super().__init__(**kwargs) + self.n = n + if not isinstance(n, int): + raise TypeError( + f"Expected an integer value for `n`, got {type(n)}." + ) + self.input_spec = InputSpec(ndim=2) + + def compute_output_shape(self, input_shape): + return (input_shape[0], self.n, input_shape[1]) + + def call(self, inputs): + input_shape = ops.shape(inputs) + reshaped = ops.reshape(inputs, (input_shape[0], 1, input_shape[1])) + return ops.repeat(reshaped, self.n, axis=1) + + def get_config(self): + config = {"n": self.n} + base_config = super().get_config() + return {**base_config, **config} diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/reshape.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/reshape.py new file mode 100644 index 0000000000000000000000000000000000000000..c87e4bd7381b0628bf2ec3ad20c7d30f9844042e --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/reshape.py @@ -0,0 +1,73 @@ +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.layers.layer import Layer +from keras.src.ops import operation_utils + + +@keras_export("keras.layers.Reshape") +class Reshape(Layer): + """Layer that reshapes inputs into the given shape. + + Args: + target_shape: Target shape. Tuple of integers, does not include the + samples dimension (batch size). + + Input shape: + Arbitrary, although all dimensions in the input shape must be + known/fixed. Use the keyword argument `input_shape` (tuple of integers, + does not include the samples/batch size axis) when using this layer as + the first layer in a model. + + Output shape: + `(batch_size, *target_shape)` + + Example: + + >>> x = keras.Input(shape=(12,)) + >>> y = keras.layers.Reshape((3, 4))(x) + >>> y.shape + (None, 3, 4) + + >>> # also supports shape inference using `-1` as dimension + >>> y = keras.layers.Reshape((-1, 2, 2))(x) + >>> y.shape + (None, 3, 2, 2) + """ + + def __init__(self, target_shape, **kwargs): + super().__init__(**kwargs) + self.target_shape = tuple(target_shape) + + def compute_output_shape(self, input_shape): + return ( + input_shape[0], + *operation_utils.compute_reshape_output_shape( + input_shape[1:], self.target_shape, "target_shape" + ), + ) + + def compute_output_spec(self, inputs): + output_shape = self.compute_output_shape(inputs.shape) + return KerasTensor( + shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse + ) + + def build(self, input_shape): + sample_output_shape = operation_utils.compute_reshape_output_shape( + input_shape[1:], self.target_shape, "target_shape" + ) + self._resolved_target_shape = tuple( + -1 if d is None else d for d in sample_output_shape + ) + self.built = True + + def call(self, inputs): + return ops.reshape( + inputs, (ops.shape(inputs)[0],) + self._resolved_target_shape + ) + + def get_config(self): + config = {"target_shape": self.target_shape} + base_config = super().get_config() + return {**base_config, **config} diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/up_sampling1d.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/up_sampling1d.py new file mode 100644 index 0000000000000000000000000000000000000000..47a16b9824f403bdd176d5d7f30e987811738d3a --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/up_sampling1d.py @@ -0,0 +1,60 @@ +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer + + +@keras_export("keras.layers.UpSampling1D") +class UpSampling1D(Layer): + """Upsampling layer for 1D inputs. + + Repeats each temporal step `size` times along the time axis. + + Example: + + >>> input_shape = (2, 2, 3) + >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) + >>> x + [[[ 0 1 2] + [ 3 4 5]] + [[ 6 7 8] + [ 9 10 11]]] + >>> y = keras.layers.UpSampling1D(size=2)(x) + >>> y + [[[ 0. 1. 2.] + [ 0. 1. 2.] + [ 3. 4. 5.] + [ 3. 4. 5.]] + [[ 6. 7. 8.] + [ 6. 7. 8.] + [ 9. 10. 11.] + [ 9. 10. 11.]]] + + Args: + size: Integer. Upsampling factor. + + Input shape: + 3D tensor with shape: `(batch_size, steps, features)`. + + Output shape: + 3D tensor with shape: `(batch_size, upsampled_steps, features)`. + """ + + def __init__(self, size=2, **kwargs): + super().__init__(**kwargs) + self.size = int(size) + self.input_spec = InputSpec(ndim=3) + + def compute_output_shape(self, input_shape): + size = ( + self.size * input_shape[1] if input_shape[1] is not None else None + ) + return [input_shape[0], size, input_shape[2]] + + def call(self, inputs): + return ops.repeat(x=inputs, repeats=self.size, axis=1) + + def get_config(self): + config = {"size": self.size} + base_config = super().get_config() + return {**base_config, **config} diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/up_sampling2d.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/up_sampling2d.py new file mode 100644 index 0000000000000000000000000000000000000000..cb046f863583fd07df8f3865081d548e1eac0d06 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/up_sampling2d.py @@ -0,0 +1,170 @@ +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation + + +@keras_export("keras.layers.UpSampling2D") +class UpSampling2D(Layer): + """Upsampling layer for 2D inputs. + + The implementation uses interpolative resizing, given the resize method + (specified by the `interpolation` argument). Use `interpolation=nearest` + to repeat the rows and columns of the data. + + Example: + + >>> input_shape = (2, 2, 1, 3) + >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) + >>> print(x) + [[[[ 0 1 2]] + [[ 3 4 5]]] + [[[ 6 7 8]] + [[ 9 10 11]]]] + >>> y = keras.layers.UpSampling2D(size=(1, 2))(x) + >>> print(y) + [[[[ 0 1 2] + [ 0 1 2]] + [[ 3 4 5] + [ 3 4 5]]] + [[[ 6 7 8] + [ 6 7 8]] + [[ 9 10 11] + [ 9 10 11]]]] + + Args: + size: Int, or tuple of 2 integers. + The upsampling factors for rows and columns. + data_format: A string, + one of `"channels_last"` (default) or `"channels_first"`. + The ordering of the dimensions in the inputs. + `"channels_last"` corresponds to inputs with shape + `(batch_size, height, width, channels)` while `"channels_first"` + corresponds to inputs with shape + `(batch_size, channels, height, width)`. + When unspecified, uses + `image_data_format` value found in your Keras config file at + `~/.keras/keras.json` (if exists) else `"channels_last"`. + Defaults to `"channels_last"`. + interpolation: A string, one of `"bicubic"`, `"bilinear"`, `"lanczos3"`, + `"lanczos5"`, `"nearest"`. + + Input shape: + 4D tensor with shape: + - If `data_format` is `"channels_last"`: + `(batch_size, rows, cols, channels)` + - If `data_format` is `"channels_first"`: + `(batch_size, channels, rows, cols)` + + Output shape: + 4D tensor with shape: + - If `data_format` is `"channels_last"`: + `(batch_size, upsampled_rows, upsampled_cols, channels)` + - If `data_format` is `"channels_first"`: + `(batch_size, channels, upsampled_rows, upsampled_cols)` + """ + + def __init__( + self, size=(2, 2), data_format=None, interpolation="nearest", **kwargs + ): + super().__init__(**kwargs) + self.data_format = backend.standardize_data_format(data_format) + self.size = argument_validation.standardize_tuple(size, 2, "size") + self.interpolation = interpolation.lower() + self.input_spec = InputSpec(ndim=4) + + def compute_output_shape(self, input_shape): + if self.data_format == "channels_first": + height = ( + self.size[0] * input_shape[2] + if input_shape[2] is not None + else None + ) + width = ( + self.size[1] * input_shape[3] + if input_shape[3] is not None + else None + ) + return (input_shape[0], input_shape[1], height, width) + else: + height = ( + self.size[0] * input_shape[1] + if input_shape[1] is not None + else None + ) + width = ( + self.size[1] * input_shape[2] + if input_shape[2] is not None + else None + ) + return (input_shape[0], height, width, input_shape[3]) + + def call(self, inputs): + return self._resize_images( + inputs, + self.size[0], + self.size[1], + self.data_format, + interpolation=self.interpolation, + ) + + def get_config(self): + config = { + "size": self.size, + "data_format": self.data_format, + "interpolation": self.interpolation, + } + base_config = super().get_config() + return {**base_config, **config} + + def _resize_images( + self, + x, + height_factor, + width_factor, + data_format, + interpolation="nearest", + ): + """Resizes the images contained in a 4D tensor. + + Args: + x: Tensor or variable to resize. + height_factor: Positive integer. + width_factor: Positive integer. + data_format: One of `"channels_first"`, `"channels_last"`. + interpolation: A string, one of `"bicubic"`, `"bilinear"`, + `"lanczos3"`, `"lanczos5"`, or `"nearest"`. + + Returns: + A tensor. + """ + if data_format not in {"channels_last", "channels_first"}: + raise ValueError(f"Invalid `data_format` argument: {data_format}") + + if data_format == "channels_first": + x = ops.transpose(x, [0, 2, 3, 1]) + # https://github.com/keras-team/keras/issues/294 + # Use `ops.repeat` for `nearest` interpolation to enable XLA + if interpolation == "nearest": + x = ops.repeat(x, height_factor, axis=1) + x = ops.repeat(x, width_factor, axis=2) + else: + # multiply the height and width factor on each dim + # by hand (versus using element-wise multiplication + # by np.array([height_factor, width_factor]) then + # list-ifying the tensor by calling `.tolist()`) + # since when running under torchdynamo, `new_shape` + # will be traced as a symbolic variable (specifically + # a `FakeTensor`) which does not have a `tolist()` method. + shape = ops.shape(x) + new_shape = ( + shape[1] * height_factor, + shape[2] * width_factor, + ) + x = ops.image.resize(x, new_shape, interpolation=interpolation) + if data_format == "channels_first": + x = ops.transpose(x, [0, 3, 1, 2]) + + return x diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/up_sampling3d.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/up_sampling3d.py new file mode 100644 index 0000000000000000000000000000000000000000..3b642e48ef6a46bc3b666ccec66167541fb3b88b --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/up_sampling3d.py @@ -0,0 +1,134 @@ +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation + + +@keras_export("keras.layers.UpSampling3D") +class UpSampling3D(Layer): + """Upsampling layer for 3D inputs. + + Repeats the 1st, 2nd and 3rd dimensions + of the data by `size[0]`, `size[1]` and `size[2]` respectively. + + Example: + + >>> input_shape = (2, 1, 2, 1, 3) + >>> x = np.ones(input_shape) + >>> y = keras.layers.UpSampling3D(size=(2, 2, 2))(x) + >>> y.shape + (2, 2, 4, 2, 3) + + Args: + size: Int, or tuple of 3 integers. + The upsampling factors for dim1, dim2 and dim3. + data_format: A string, + one of `"channels_last"` (default) or `"channels_first"`. + The ordering of the dimensions in the inputs. + `"channels_last"` corresponds to inputs with shape + `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` + while `"channels_first"` corresponds to inputs with shape + `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. + When unspecified, uses + `image_data_format` value found in your Keras config file at + `~/.keras/keras.json` (if exists) else `"channels_last"`. + Defaults to `"channels_last"`. + + Input shape: + 5D tensor with shape: + - If `data_format` is `"channels_last"`: + `(batch_size, dim1, dim2, dim3, channels)` + - If `data_format` is `"channels_first"`: + `(batch_size, channels, dim1, dim2, dim3)` + + Output shape: + 5D tensor with shape: + - If `data_format` is `"channels_last"`: + `(batch_size, upsampled_dim1, upsampled_dim2, upsampled_dim3, + channels)` + - If `data_format` is `"channels_first"`: + `(batch_size, channels, upsampled_dim1, upsampled_dim2, + upsampled_dim3)` + """ + + def __init__(self, size=(2, 2, 2), data_format=None, **kwargs): + super().__init__(**kwargs) + self.data_format = backend.standardize_data_format(data_format) + self.size = argument_validation.standardize_tuple(size, 3, "size") + self.input_spec = InputSpec(ndim=5) + + def compute_output_shape(self, input_shape): + if self.data_format == "channels_first": + dim1 = ( + self.size[0] * input_shape[2] + if input_shape[2] is not None + else None + ) + dim2 = ( + self.size[1] * input_shape[3] + if input_shape[3] is not None + else None + ) + dim3 = ( + self.size[2] * input_shape[4] + if input_shape[4] is not None + else None + ) + return (input_shape[0], input_shape[1], dim1, dim2, dim3) + else: + dim1 = ( + self.size[0] * input_shape[1] + if input_shape[1] is not None + else None + ) + dim2 = ( + self.size[1] * input_shape[2] + if input_shape[2] is not None + else None + ) + dim3 = ( + self.size[2] * input_shape[3] + if input_shape[3] is not None + else None + ) + return (input_shape[0], dim1, dim2, dim3, input_shape[4]) + + def call(self, inputs): + return self._resize_volumes( + inputs, self.size[0], self.size[1], self.size[2], self.data_format + ) + + def get_config(self): + config = {"size": self.size, "data_format": self.data_format} + base_config = super().get_config() + return {**base_config, **config} + + def _resize_volumes( + self, x, depth_factor, height_factor, width_factor, data_format + ): + """Resizes the volume contained in a 5D tensor. + + Args: + x: Tensor or variable to resize. + depth_factor: Positive integer. + height_factor: Positive integer. + width_factor: Positive integer. + data_format: One of `"channels_first"`, `"channels_last"`. + + Returns: + Resized tensor. + """ + if data_format == "channels_first": + output = ops.repeat(x, depth_factor, axis=2) + output = ops.repeat(output, height_factor, axis=3) + output = ops.repeat(output, width_factor, axis=4) + return output + elif data_format == "channels_last": + output = ops.repeat(x, depth_factor, axis=1) + output = ops.repeat(output, height_factor, axis=2) + output = ops.repeat(output, width_factor, axis=3) + return output + else: + raise ValueError(f"Invalid data_format: {data_format}") diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/zero_padding1d.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/zero_padding1d.py new file mode 100644 index 0000000000000000000000000000000000000000..c9e50d8897b3d684086c83a365573b09a6ee30eb --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/zero_padding1d.py @@ -0,0 +1,93 @@ +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation + + +@keras_export("keras.layers.ZeroPadding1D") +class ZeroPadding1D(Layer): + """Zero-padding layer for 1D input (e.g. temporal sequence). + + Example: + + >>> input_shape = (2, 2, 3) + >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) + >>> x + [[[ 0 1 2] + [ 3 4 5]] + [[ 6 7 8] + [ 9 10 11]]] + >>> y = keras.layers.ZeroPadding1D(padding=2)(x) + >>> y + [[[ 0 0 0] + [ 0 0 0] + [ 0 1 2] + [ 3 4 5] + [ 0 0 0] + [ 0 0 0]] + [[ 0 0 0] + [ 0 0 0] + [ 6 7 8] + [ 9 10 11] + [ 0 0 0] + [ 0 0 0]]] + + Args: + padding: Int, or tuple of int (length 2), or dictionary. + - If int: how many zeros to add at the beginning and end of + the padding dimension (axis 1). + - If tuple of 2 ints: how many zeros to add at the beginning and the + end of the padding dimension (`(left_pad, right_pad)`). + data_format: A string, one of `"channels_last"` (default) or + `"channels_first"`. The ordering of the dimensions in the inputs. + `"channels_last"` corresponds to inputs with shape + `(batch_size, axis_to_pad, channels)` while `"channels_first"` + corresponds to inputs with shape + `(batch_size, channels, axis_to_pad)`. + When unspecified, uses `image_data_format` value found in your Keras + config file at `~/.keras/keras.json` (if exists). Defaults to + `"channels_last"`. + + Input shape: + 3D tensor with shape: + - If `data_format` is `"channels_last"`: + `(batch_size, axis_to_pad, features)` + - If `data_format` is `"channels_first"`: + `(batch_size, features, axis_to_pad)` + + Output shape: + 3D tensor with shape: + - If `data_format` is `"channels_last"`: + `(batch_size, padded_axis, features)` + - If `data_format` is `"channels_first"`: + `(batch_size, features, padded_axis)` + """ + + def __init__(self, padding=1, data_format=None, **kwargs): + super().__init__(**kwargs) + self.data_format = backend.standardize_data_format(data_format) + self.padding = argument_validation.standardize_tuple( + padding, 2, "padding", allow_zero=True + ) + self.input_spec = InputSpec(ndim=3) + + def compute_output_shape(self, input_shape): + output_shape = list(input_shape) + padding_dim = 2 if self.data_format == "channels_first" else 1 + if output_shape[padding_dim] is not None: + output_shape[padding_dim] += self.padding[0] + self.padding[1] + return tuple(output_shape) + + def call(self, inputs): + if self.data_format == "channels_first": + all_dims_padding = ((0, 0), (0, 0), self.padding) + else: + all_dims_padding = ((0, 0), self.padding, (0, 0)) + return ops.pad(inputs, all_dims_padding) + + def get_config(self): + config = {"padding": self.padding, "data_format": self.data_format} + base_config = super().get_config() + return {**base_config, **config} diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/zero_padding2d.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/zero_padding2d.py new file mode 100644 index 0000000000000000000000000000000000000000..e5d88d16d76d882b988756d5175a814d695413c8 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/zero_padding2d.py @@ -0,0 +1,119 @@ +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation + + +@keras_export("keras.layers.ZeroPadding2D") +class ZeroPadding2D(Layer): + """Zero-padding layer for 2D input (e.g. picture). + + This layer can add rows and columns of zeros at the top, bottom, left and + right side of an image tensor. + + Example: + + >>> input_shape = (1, 1, 2, 2) + >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) + >>> x + [[[[0 1] + [2 3]]]] + >>> y = keras.layers.ZeroPadding2D(padding=1)(x) + >>> y + [[[[0 0] + [0 0] + [0 0] + [0 0]] + [[0 0] + [0 1] + [2 3] + [0 0]] + [[0 0] + [0 0] + [0 0] + [0 0]]]] + + Args: + padding: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. + - If int: the same symmetric padding is applied to height and width. + - If tuple of 2 ints: interpreted as two different symmetric padding + values for height and width: + `(symmetric_height_pad, symmetric_width_pad)`. + - If tuple of 2 tuples of 2 ints: interpreted as + `((top_pad, bottom_pad), (left_pad, right_pad))`. + data_format: A string, one of `"channels_last"` (default) or + `"channels_first"`. The ordering of the dimensions in the inputs. + `"channels_last"` corresponds to inputs with shape + `(batch_size, height, width, channels)` while `"channels_first"` + corresponds to inputs with shape + `(batch_size, channels, height, width)`. + When unspecified, uses `image_data_format` value found in your Keras + config file at `~/.keras/keras.json` (if exists). Defaults to + `"channels_last"`. + + Input shape: + 4D tensor with shape: + - If `data_format` is `"channels_last"`: + `(batch_size, height, width, channels)` + - If `data_format` is `"channels_first"`: + `(batch_size, channels, height, width)` + + Output shape: + 4D tensor with shape: + - If `data_format` is `"channels_last"`: + `(batch_size, padded_height, padded_width, channels)` + - If `data_format` is `"channels_first"`: + `(batch_size, channels, padded_height, padded_width)` + """ + + def __init__(self, padding=(1, 1), data_format=None, **kwargs): + super().__init__(**kwargs) + self.data_format = backend.standardize_data_format(data_format) + if isinstance(padding, int): + self.padding = ((padding, padding), (padding, padding)) + elif hasattr(padding, "__len__"): + if len(padding) != 2: + raise ValueError( + "`padding` should have two elements. " + f"Received: padding={padding}." + ) + height_padding = argument_validation.standardize_tuple( + padding[0], 2, "1st entry of padding", allow_zero=True + ) + width_padding = argument_validation.standardize_tuple( + padding[1], 2, "2nd entry of padding", allow_zero=True + ) + self.padding = (height_padding, width_padding) + else: + raise ValueError( + "`padding` should be either an int, a tuple of 2 ints " + "(symmetric_height_crop, symmetric_width_crop), " + "or a tuple of 2 tuples of 2 ints " + "((top_crop, bottom_crop), (left_crop, right_crop)). " + f"Received: padding={padding}." + ) + self.input_spec = InputSpec(ndim=4) + + def compute_output_shape(self, input_shape): + output_shape = list(input_shape) + spatial_dims_offset = 2 if self.data_format == "channels_first" else 1 + for index in range(0, 2): + if output_shape[index + spatial_dims_offset] is not None: + output_shape[index + spatial_dims_offset] += ( + self.padding[index][0] + self.padding[index][1] + ) + return tuple(output_shape) + + def call(self, inputs): + if self.data_format == "channels_first": + all_dims_padding = ((0, 0), (0, 0), *self.padding) + else: + all_dims_padding = ((0, 0), *self.padding, (0, 0)) + return ops.pad(inputs, all_dims_padding) + + def get_config(self): + config = {"padding": self.padding, "data_format": self.data_format} + base_config = super().get_config() + return {**base_config, **config} diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/zero_padding3d.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/zero_padding3d.py new file mode 100644 index 0000000000000000000000000000000000000000..87e39bf0006047343327094d56962dd3386835c1 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/reshaping/zero_padding3d.py @@ -0,0 +1,118 @@ +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.utils import argument_validation + + +@keras_export("keras.layers.ZeroPadding3D") +class ZeroPadding3D(Layer): + """Zero-padding layer for 3D data (spatial or spatio-temporal). + + Example: + + >>> input_shape = (1, 1, 2, 2, 3) + >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) + >>> y = keras.layers.ZeroPadding3D(padding=2)(x) + >>> y.shape + (1, 5, 6, 6, 3) + + Args: + padding: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints. + - If int: the same symmetric padding is applied to depth, height, + and width. + - If tuple of 3 ints: interpreted as three different symmetric + padding values for depth, height, and width: + `(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`. + - If tuple of 3 tuples of 2 ints: interpreted as + `((left_dim1_pad, right_dim1_pad), (left_dim2_pad, + right_dim2_pad), (left_dim3_pad, right_dim3_pad))`. + data_format: A string, one of `"channels_last"` (default) or + `"channels_first"`. The ordering of the dimensions in the inputs. + `"channels_last"` corresponds to inputs with shape + `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` + while `"channels_first"` corresponds to inputs with shape + `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. + When unspecified, uses `image_data_format` value found in your Keras + config file at `~/.keras/keras.json` (if exists). Defaults to + `"channels_last"`. + + Input shape: + 5D tensor with shape: + - If `data_format` is `"channels_last"`: + `(batch_size, first_axis_to_pad, second_axis_to_pad, + third_axis_to_pad, depth)` + - If `data_format` is `"channels_first"`: + `(batch_size, depth, first_axis_to_pad, second_axis_to_pad, + third_axis_to_pad)` + + Output shape: + 5D tensor with shape: + - If `data_format` is `"channels_last"`: + `(batch_size, first_padded_axis, second_padded_axis, + third_axis_to_pad, depth)` + - If `data_format` is `"channels_first"`: + `(batch_size, depth, first_padded_axis, second_padded_axis, + third_axis_to_pad)` + """ + + def __init__( + self, padding=((1, 1), (1, 1), (1, 1)), data_format=None, **kwargs + ): + super().__init__(**kwargs) + self.data_format = backend.standardize_data_format(data_format) + if isinstance(padding, int): + self.padding = ( + (padding, padding), + (padding, padding), + (padding, padding), + ) + elif hasattr(padding, "__len__"): + if len(padding) != 3: + raise ValueError( + f"`padding` should have 3 elements. Received: {padding}." + ) + dim1_padding = argument_validation.standardize_tuple( + padding[0], 2, "1st entry of padding", allow_zero=True + ) + dim2_padding = argument_validation.standardize_tuple( + padding[1], 2, "2nd entry of padding", allow_zero=True + ) + dim3_padding = argument_validation.standardize_tuple( + padding[2], 2, "3rd entry of padding", allow_zero=True + ) + self.padding = (dim1_padding, dim2_padding, dim3_padding) + else: + raise ValueError( + "`padding` should be either an int, a tuple of 3 ints " + "(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad), " + "or a tuple of 3 tuples of 2 ints " + "((left_dim1_pad, right_dim1_pad)," + " (left_dim2_pad, right_dim2_pad)," + " (left_dim3_pad, right_dim2_pad)). " + f"Received: padding={padding}." + ) + self.input_spec = InputSpec(ndim=5) + + def compute_output_shape(self, input_shape): + output_shape = list(input_shape) + spatial_dims_offset = 2 if self.data_format == "channels_first" else 1 + for index in range(0, 3): + if output_shape[index + spatial_dims_offset] is not None: + output_shape[index + spatial_dims_offset] += ( + self.padding[index][0] + self.padding[index][1] + ) + return tuple(output_shape) + + def call(self, inputs): + if self.data_format == "channels_first": + all_dims_padding = ((0, 0), (0, 0), *self.padding) + else: + all_dims_padding = ((0, 0), *self.padding, (0, 0)) + return ops.pad(inputs, all_dims_padding) + + def get_config(self): + config = {"padding": self.padding, "data_format": self.data_format} + base_config = super().get_config() + return {**base_config, **config} diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0cbd748afd0836928b7f14ac6621ac08790ccb53 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/bidirectional.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/bidirectional.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..552c8621917c20118870c87e59a30ad43bb829ec Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/bidirectional.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/conv_lstm.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/conv_lstm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b52c4a2eaaaeeb4210a859d0908526a7ccc58be Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/conv_lstm.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/conv_lstm1d.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/conv_lstm1d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac35d1b7e1743f33e025a7e654b8700dcc8ea323 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/conv_lstm1d.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/conv_lstm2d.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/conv_lstm2d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3eda1d09b43c306909eae26973f5a52dc9559f9 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/conv_lstm2d.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/conv_lstm3d.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/conv_lstm3d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc033823b57b8c36d2c5238a1265486937c93b48 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/conv_lstm3d.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/dropout_rnn_cell.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/dropout_rnn_cell.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c6c112b34b4831e79bce8fb87dd9251a5559bd9 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/dropout_rnn_cell.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/gru.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/gru.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c853ecc38c9b6b1bf41f6dcecc11c3b7d498b118 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/gru.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/lstm.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/lstm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2db0346621c2b85c9ce6849175c2ca32e935da9d Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/lstm.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/rnn.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/rnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a048579ccbc1e569a66cf3f1347f916e4613db83 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/rnn.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/simple_rnn.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/simple_rnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b88572381115ed30c60e90749362a24f80d37b51 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/simple_rnn.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/stacked_rnn_cells.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/stacked_rnn_cells.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efeff556203e2ec92680af217c40de5eab9740e8 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/stacked_rnn_cells.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/time_distributed.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/time_distributed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9338db7ed505bc894388a47e7006b9e61c1a44d9 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/__pycache__/time_distributed.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/bidirectional.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/bidirectional.py new file mode 100644 index 0000000000000000000000000000000000000000..a89c30f9a4eebe9a4fec3b1a8838bdec3411f50b --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/bidirectional.py @@ -0,0 +1,326 @@ +import copy + +from keras.src import ops +from keras.src import utils +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.saving import serialization_lib + + +@keras_export("keras.layers.Bidirectional") +class Bidirectional(Layer): + """Bidirectional wrapper for RNNs. + + Args: + layer: `keras.layers.RNN` instance, such as + `keras.layers.LSTM` or `keras.layers.GRU`. + It could also be a `keras.layers.Layer` instance + that meets the following criteria: + 1. Be a sequence-processing layer (accepts 3D+ inputs). + 2. Have a `go_backwards`, `return_sequences` and `return_state` + attribute (with the same semantics as for the `RNN` class). + 3. Have an `input_spec` attribute. + 4. Implement serialization via `get_config()` and `from_config()`. + Note that the recommended way to create new RNN layers is to write a + custom RNN cell and use it with `keras.layers.RNN`, instead of + subclassing `keras.layers.Layer` directly. + When `return_sequences` is `True`, the output of the masked + timestep will be zero regardless of the layer's original + `zero_output_for_mask` value. + merge_mode: Mode by which outputs of the forward and backward RNNs + will be combined. One of `{"sum", "mul", "concat", "ave", None}`. + If `None`, the outputs will not be combined, + they will be returned as a list. Defaults to `"concat"`. + backward_layer: Optional `keras.layers.RNN`, + or `keras.layers.Layer` instance to be used to handle + backwards input processing. + If `backward_layer` is not provided, the layer instance passed + as the `layer` argument will be used to generate the backward layer + automatically. + Note that the provided `backward_layer` layer should have properties + matching those of the `layer` argument, in particular + it should have the same values for `stateful`, `return_states`, + `return_sequences`, etc. In addition, `backward_layer` + and `layer` should have different `go_backwards` argument values. + A `ValueError` will be raised if these requirements are not met. + + Call arguments: + The call arguments for this layer are the same as those of the + wrapped RNN layer. Beware that when passing the `initial_state` + argument during the call of this layer, the first half in the + list of elements in the `initial_state` list will be passed to + the forward RNN call and the last half in the list of elements + will be passed to the backward RNN call. + + Note: instantiating a `Bidirectional` layer from an existing RNN layer + instance will not reuse the weights state of the RNN layer instance -- the + `Bidirectional` layer will have freshly initialized weights. + + Examples: + + ```python + model = Sequential([ + Input(shape=(5, 10)), + Bidirectional(LSTM(10, return_sequences=True), + Bidirectional(LSTM(10)), + Dense(5, activation="softmax"), + ]) + model.compile(loss='categorical_crossentropy', optimizer='rmsprop') + + # With custom backward layer + forward_layer = LSTM(10, return_sequences=True) + backward_layer = LSTM(10, activation='relu', return_sequences=True, + go_backwards=True) + model = Sequential([ + Input(shape=(5, 10)), + Bidirectional(forward_layer, backward_layer=backward_layer), + Dense(5, activation="softmax"), + ]) + model.compile(loss='categorical_crossentropy', optimizer='rmsprop') + ``` + """ + + def __init__( + self, + layer, + merge_mode="concat", + weights=None, + backward_layer=None, + **kwargs, + ): + if not isinstance(layer, Layer): + raise ValueError( + "Please initialize `Bidirectional` layer with a " + f"`keras.layers.Layer` instance. Received: {layer}" + ) + if backward_layer is not None and not isinstance(backward_layer, Layer): + raise ValueError( + "`backward_layer` need to be a `keras.layers.Layer` " + f"instance. Received: {backward_layer}" + ) + if merge_mode not in ["sum", "mul", "ave", "concat", None]: + raise ValueError( + f"Invalid merge mode. Received: {merge_mode}. " + "Merge mode should be one of " + '{"sum", "mul", "ave", "concat", None}' + ) + super().__init__(**kwargs) + + # Recreate the forward layer from the original layer config, so that it + # will not carry over any state from the layer. + config = serialization_lib.serialize_keras_object(layer) + config["config"]["name"] = "forward_" + utils.removeprefix( + layer.name, "forward_" + ) + self.forward_layer = serialization_lib.deserialize_keras_object(config) + + if backward_layer is None: + config = serialization_lib.serialize_keras_object(layer) + config["config"]["go_backwards"] = True + config["config"]["name"] = "backward_" + utils.removeprefix( + layer.name, "backward_" + ) + self.backward_layer = serialization_lib.deserialize_keras_object( + config + ) + else: + self.backward_layer = backward_layer + self._verify_layer_config() + + def force_zero_output_for_mask(layer): + # Force the zero_output_for_mask to be True if returning sequences. + if getattr(layer, "zero_output_for_mask", None) is not None: + layer.zero_output_for_mask = layer.return_sequences + + force_zero_output_for_mask(self.forward_layer) + force_zero_output_for_mask(self.backward_layer) + + self.merge_mode = merge_mode + if weights: + nw = len(weights) + self.forward_layer.initial_weights = weights[: nw // 2] + self.backward_layer.initial_weights = weights[nw // 2 :] + self.stateful = layer.stateful + self.return_sequences = layer.return_sequences + self.return_state = layer.return_state + self.supports_masking = True + self.input_spec = layer.input_spec + + def _verify_layer_config(self): + """Ensure the forward and backward layers have valid common property.""" + if self.forward_layer.go_backwards == self.backward_layer.go_backwards: + raise ValueError( + "Forward layer and backward layer should have different " + "`go_backwards` value. Received: " + "forward_layer.go_backwards " + f"{self.forward_layer.go_backwards}, " + "backward_layer.go_backwards=" + f"{self.backward_layer.go_backwards}" + ) + + common_attributes = ("stateful", "return_sequences", "return_state") + for a in common_attributes: + forward_value = getattr(self.forward_layer, a) + backward_value = getattr(self.backward_layer, a) + if forward_value != backward_value: + raise ValueError( + "Forward layer and backward layer are expected to have " + f'the same value for attribute "{a}", got ' + f'"{forward_value}" for forward layer and ' + f'"{backward_value}" for backward layer' + ) + + def compute_output_shape(self, sequences_shape, initial_state_shape=None): + output_shape = self.forward_layer.compute_output_shape(sequences_shape) + + if self.return_state: + output_shape, state_shape = output_shape[0], output_shape[1:] + + if self.merge_mode == "concat": + output_shape = list(output_shape) + output_shape[-1] *= 2 + output_shape = tuple(output_shape) + elif self.merge_mode is None: + output_shape = [output_shape, output_shape] + + if self.return_state: + if self.merge_mode is None: + return tuple(output_shape) + state_shape + state_shape + return tuple([output_shape]) + (state_shape) + (state_shape) + return tuple(output_shape) + + def call( + self, + sequences, + initial_state=None, + mask=None, + training=None, + ): + kwargs = {} + if self.forward_layer._call_has_training_arg: + kwargs["training"] = training + if self.forward_layer._call_has_mask_arg: + kwargs["mask"] = mask + + if initial_state is not None: + # initial_states are not keras tensors, eg eager tensor from np + # array. They are only passed in from kwarg initial_state, and + # should be passed to forward/backward layer via kwarg + # initial_state as well. + forward_inputs, backward_inputs = sequences, sequences + half = len(initial_state) // 2 + forward_state = initial_state[:half] + backward_state = initial_state[half:] + else: + forward_inputs, backward_inputs = sequences, sequences + forward_state, backward_state = None, None + + y = self.forward_layer( + forward_inputs, initial_state=forward_state, **kwargs + ) + y_rev = self.backward_layer( + backward_inputs, initial_state=backward_state, **kwargs + ) + + if self.return_state: + states = tuple(y[1:] + y_rev[1:]) + y = y[0] + y_rev = y_rev[0] + + y = ops.cast(y, self.compute_dtype) + y_rev = ops.cast(y_rev, self.compute_dtype) + + if self.return_sequences: + y_rev = ops.flip(y_rev, axis=1) + if self.merge_mode == "concat": + output = ops.concatenate([y, y_rev], axis=-1) + elif self.merge_mode == "sum": + output = y + y_rev + elif self.merge_mode == "ave": + output = (y + y_rev) / 2 + elif self.merge_mode == "mul": + output = y * y_rev + elif self.merge_mode is None: + output = (y, y_rev) + else: + raise ValueError( + "Unrecognized value for `merge_mode`. " + f"Received: {self.merge_mode}" + 'Expected one of {"concat", "sum", "ave", "mul"}.' + ) + if self.return_state: + if self.merge_mode is None: + return output + states + return (output,) + states + return output + + def reset_states(self): + # Compatibility alias. + self.reset_state() + + def reset_state(self): + if not self.stateful: + raise AttributeError("Layer must be stateful.") + self.forward_layer.reset_state() + self.backward_layer.reset_state() + + @property + def states(self): + if self.forward_layer.states and self.backward_layer.states: + return tuple(self.forward_layer.states + self.backward_layer.states) + return None + + def build(self, sequences_shape, initial_state_shape=None): + if not self.forward_layer.built: + self.forward_layer.build(sequences_shape) + if not self.backward_layer.built: + self.backward_layer.build(sequences_shape) + self.built = True + + def compute_mask(self, _, mask): + if isinstance(mask, list): + mask = mask[0] + if self.return_sequences: + if not self.merge_mode: + output_mask = (mask, mask) + else: + output_mask = mask + else: + output_mask = (None, None) if not self.merge_mode else None + + if self.return_state and self.states is not None: + state_mask = (None for _ in self.states) + if isinstance(output_mask, list): + return output_mask + state_mask * 2 + return (output_mask,) + state_mask * 2 + return output_mask + + def get_config(self): + config = {"merge_mode": self.merge_mode} + config["layer"] = serialization_lib.serialize_keras_object( + self.forward_layer + ) + config["backward_layer"] = serialization_lib.serialize_keras_object( + self.backward_layer + ) + base_config = super().get_config() + return {**base_config, **config} + + @classmethod + def from_config(cls, config, custom_objects=None): + # Instead of updating the input, create a copy and use that. + config = copy.deepcopy(config) + + config["layer"] = serialization_lib.deserialize_keras_object( + config["layer"], custom_objects=custom_objects + ) + # Handle (optional) backward layer instantiation. + backward_layer_config = config.pop("backward_layer", None) + if backward_layer_config is not None: + backward_layer = serialization_lib.deserialize_keras_object( + backward_layer_config, custom_objects=custom_objects + ) + config["backward_layer"] = backward_layer + # Instantiate the wrapper, adjust it and return it. + layer = cls(**config) + return layer diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/conv_lstm.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/conv_lstm.py new file mode 100644 index 0000000000000000000000000000000000000000..cd5c6a0a25b3412b1962e8cd4d073072826885fe --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/conv_lstm.py @@ -0,0 +1,696 @@ +from keras.src import activations +from keras.src import backend +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src import tree +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.layers.rnn.dropout_rnn_cell import DropoutRNNCell +from keras.src.layers.rnn.rnn import RNN +from keras.src.ops import operation_utils +from keras.src.utils import argument_validation + + +class ConvLSTMCell(Layer, DropoutRNNCell): + """Cell class for the ConvLSTM layer. + + Args: + rank: Integer, rank of the convolution, e.g. "2" for 2D convolutions. + filters: Integer, the dimensionality of the output space + (i.e. the number of output filters in the convolution). + kernel_size: An integer or tuple/list of n integers, specifying the + dimensions of the convolution window. + strides: An integer or tuple/list of n integers, specifying the strides + of the convolution. Specifying any stride value != 1 + is incompatible with specifying any `dilation_rate` value != 1. + padding: One of `"valid"` or `"same"` (case-insensitive). + `"valid"` means no padding. `"same"` results in padding evenly + to the left/right or up/down of the input such that output + has the same height/width dimension as the input. + data_format: A string, one of `channels_last` (default) or + `channels_first`. When unspecified, uses + `image_data_format` value found in your Keras config file at + `~/.keras/keras.json` (if exists) else 'channels_last'. + Defaults to `'channels_last'`. + dilation_rate: An integer or tuple/list of n integers, specifying the + dilation rate to use for dilated convolution. + Currently, specifying any `dilation_rate` value != 1 is + incompatible with specifying any `strides` value != 1. + activation: Activation function. If `None`, no activation is applied. + recurrent_activation: Activation function to use for the recurrent step. + use_bias: Boolean, (default `True`), whether the layer + should use a bias vector. + kernel_initializer: Initializer for the `kernel` weights matrix, + used for the linear transformation of the inputs. Default: + `"glorot_uniform"`. + recurrent_initializer: Initializer for the `recurrent_kernel` + weights matrix, used for the linear transformation of the recurrent + state. Default: `"orthogonal"`. + bias_initializer: Initializer for the bias vector. Default: `"zeros"`. + unit_forget_bias: Boolean (default `True`). If `True`, + add 1 to the bias of the forget gate at initialization. + Setting it to `True` will also force `bias_initializer="zeros"`. + This is recommended in [Jozefowicz et al.]( + https://github.com/mlresearch/v37/blob/gh-pages/jozefowicz15.pdf) + kernel_regularizer: Regularizer function applied to the `kernel` weights + matrix. Default: `None`. + recurrent_regularizer: Regularizer function applied to the + `recurrent_kernel` weights matrix. Default: `None`. + bias_regularizer: Regularizer function applied to the bias vector. + Default: `None`. + activity_regularizer: Regularizer function applied to the output of the + layer (its "activation"). Default: `None`. + kernel_constraint: Constraint function applied to the `kernel` weights + matrix. Default: `None`. + recurrent_constraint: Constraint function applied to the + `recurrent_kernel` weights matrix. Default: `None`. + bias_constraint: Constraint function applied to the bias vector. + Default: `None`. + dropout: Float between 0 and 1. Fraction of the units to drop for the + linear transformation of the inputs. Default: 0. + recurrent_dropout: Float between 0 and 1. Fraction of the units to drop + for the linear transformation of the recurrent state. Default: 0. + seed: Random seed for dropout. + + Call arguments: + inputs: A (2+ `rank`)D tensor. + states: List of state tensors corresponding to the previous timestep. + training: Python boolean indicating whether the layer should behave in + training mode or in inference mode. Only relevant when `dropout` or + `recurrent_dropout` is used. + """ + + def __init__( + self, + rank, + filters, + kernel_size, + strides=1, + padding="valid", + data_format=None, + dilation_rate=1, + activation="tanh", + recurrent_activation="sigmoid", + use_bias=True, + kernel_initializer="glorot_uniform", + recurrent_initializer="orthogonal", + bias_initializer="zeros", + unit_forget_bias=True, + kernel_regularizer=None, + recurrent_regularizer=None, + bias_regularizer=None, + kernel_constraint=None, + recurrent_constraint=None, + bias_constraint=None, + dropout=0.0, + recurrent_dropout=0.0, + seed=None, + **kwargs, + ): + super().__init__(**kwargs) + self.seed = seed + self.seed_generator = backend.random.SeedGenerator(seed=seed) + self.rank = rank + if self.rank > 3: + raise ValueError( + f"Rank {rank} convolutions are not currently " + f"implemented. Received: rank={rank}" + ) + self.filters = filters + self.kernel_size = argument_validation.standardize_tuple( + kernel_size, self.rank, "kernel_size" + ) + self.strides = argument_validation.standardize_tuple( + strides, self.rank, "strides", allow_zero=True + ) + self.padding = argument_validation.standardize_padding(padding) + self.data_format = backend.standardize_data_format(data_format) + self.dilation_rate = argument_validation.standardize_tuple( + dilation_rate, self.rank, "dilation_rate" + ) + self.activation = activations.get(activation) + self.recurrent_activation = activations.get(recurrent_activation) + self.use_bias = use_bias + + self.kernel_initializer = initializers.get(kernel_initializer) + self.recurrent_initializer = initializers.get(recurrent_initializer) + self.bias_initializer = initializers.get(bias_initializer) + self.unit_forget_bias = unit_forget_bias + + self.kernel_regularizer = regularizers.get(kernel_regularizer) + self.recurrent_regularizer = regularizers.get(recurrent_regularizer) + self.bias_regularizer = regularizers.get(bias_regularizer) + + self.kernel_constraint = constraints.get(kernel_constraint) + self.recurrent_constraint = constraints.get(recurrent_constraint) + self.bias_constraint = constraints.get(bias_constraint) + + self.dropout = min(1.0, max(0.0, dropout)) + self.recurrent_dropout = min(1.0, max(0.0, recurrent_dropout)) + self.dropout_mask_count = 4 + self.input_spec = InputSpec(ndim=rank + 2) + self.state_size = -1 # Custom, defined in methods + + def build(self, inputs_shape, states_shape=None): + if self.data_format == "channels_first": + channel_axis = 1 + self.spatial_dims = inputs_shape[2:] + else: + channel_axis = -1 + self.spatial_dims = inputs_shape[1:-1] + if None in self.spatial_dims: + raise ValueError( + "ConvLSTM layers only support static " + "input shapes for the spatial dimension. " + f"Received invalid input shape: input_shape={inputs_shape}" + ) + if inputs_shape[channel_axis] is None: + raise ValueError( + "The channel dimension of the inputs (last axis) should be " + "defined. Found None. Full input shape received: " + f"input_shape={inputs_shape}" + ) + self.input_spec = InputSpec( + ndim=self.rank + 3, shape=(None,) + inputs_shape[1:] + ) + + input_dim = inputs_shape[channel_axis] + self.input_dim = input_dim + self.kernel_shape = self.kernel_size + (input_dim, self.filters * 4) + recurrent_kernel_shape = self.kernel_size + ( + self.filters, + self.filters * 4, + ) + + self.kernel = self.add_weight( + shape=self.kernel_shape, + initializer=self.kernel_initializer, + name="kernel", + regularizer=self.kernel_regularizer, + constraint=self.kernel_constraint, + ) + self.recurrent_kernel = self.add_weight( + shape=recurrent_kernel_shape, + initializer=self.recurrent_initializer, + name="recurrent_kernel", + regularizer=self.recurrent_regularizer, + constraint=self.recurrent_constraint, + ) + + if self.use_bias: + if self.unit_forget_bias: + + def bias_initializer(_, *args, **kwargs): + return ops.concatenate( + [ + self.bias_initializer( + (self.filters,), *args, **kwargs + ), + initializers.get("ones")( + (self.filters,), *args, **kwargs + ), + self.bias_initializer( + (self.filters * 2,), *args, **kwargs + ), + ] + ) + + else: + bias_initializer = self.bias_initializer + self.bias = self.add_weight( + shape=(self.filters * 4,), + name="bias", + initializer=bias_initializer, + regularizer=self.bias_regularizer, + constraint=self.bias_constraint, + ) + else: + self.bias = None + self.built = True + + def call(self, inputs, states, training=False): + h_tm1 = states[0] # previous memory state + c_tm1 = states[1] # previous carry state + + if training and 0.0 < self.dropout < 1.0: + dp_mask = self.get_dropout_mask(inputs) + inputs_i = inputs * dp_mask[0] + inputs_f = inputs * dp_mask[1] + inputs_c = inputs * dp_mask[2] + inputs_o = inputs * dp_mask[3] + else: + inputs_i = inputs + inputs_f = inputs + inputs_c = inputs + inputs_o = inputs + + if training and 0.0 < self.recurrent_dropout < 1.0: + rec_dp_mask = self.get_recurrent_dropout_mask(h_tm1) + h_tm1_i = h_tm1 * rec_dp_mask[0] + h_tm1_f = h_tm1 * rec_dp_mask[1] + h_tm1_c = h_tm1 * rec_dp_mask[2] + h_tm1_o = h_tm1 * rec_dp_mask[3] + else: + h_tm1_i = h_tm1 + h_tm1_f = h_tm1 + h_tm1_c = h_tm1 + h_tm1_o = h_tm1 + + (kernel_i, kernel_f, kernel_c, kernel_o) = ops.split( + self.kernel, 4, axis=self.rank + 1 + ) + ( + recurrent_kernel_i, + recurrent_kernel_f, + recurrent_kernel_c, + recurrent_kernel_o, + ) = ops.split(self.recurrent_kernel, 4, axis=self.rank + 1) + + if self.use_bias: + bias_i, bias_f, bias_c, bias_o = ops.split(self.bias, 4) + else: + bias_i, bias_f, bias_c, bias_o = None, None, None, None + + x_i = self.input_conv(inputs_i, kernel_i, bias_i, padding=self.padding) + x_f = self.input_conv(inputs_f, kernel_f, bias_f, padding=self.padding) + x_c = self.input_conv(inputs_c, kernel_c, bias_c, padding=self.padding) + x_o = self.input_conv(inputs_o, kernel_o, bias_o, padding=self.padding) + + h_i = self.recurrent_conv(h_tm1_i, recurrent_kernel_i) + h_f = self.recurrent_conv(h_tm1_f, recurrent_kernel_f) + h_c = self.recurrent_conv(h_tm1_c, recurrent_kernel_c) + h_o = self.recurrent_conv(h_tm1_o, recurrent_kernel_o) + + i = self.recurrent_activation(x_i + h_i) + f = self.recurrent_activation(x_f + h_f) + c = f * c_tm1 + i * self.activation(x_c + h_c) + o = self.recurrent_activation(x_o + h_o) + h = o * self.activation(c) + return h, [h, c] + + def compute_output_shape(self, inputs_shape, states_shape=None): + conv_output_shape = operation_utils.compute_conv_output_shape( + inputs_shape, + self.filters, + self.kernel_size, + strides=self.strides, + padding=self.padding, + data_format=self.data_format, + dilation_rate=self.dilation_rate, + ) + return conv_output_shape, [conv_output_shape, conv_output_shape] + + def get_initial_state(self, batch_size=None): + if self.data_format == "channels_last": + input_shape = (batch_size,) + self.spatial_dims + (self.input_dim,) + else: + input_shape = (batch_size, self.input_dim) + self.spatial_dims + state_shape = self.compute_output_shape(input_shape)[0] + return [ + ops.zeros(state_shape, dtype=self.compute_dtype), + ops.zeros(state_shape, dtype=self.compute_dtype), + ] + + def input_conv(self, x, w, b=None, padding="valid"): + conv_out = ops.conv( + x, + w, + strides=self.strides, + padding=padding, + data_format=self.data_format, + dilation_rate=self.dilation_rate, + ) + if b is not None: + if self.data_format == "channels_last": + bias_shape = (1,) * (self.rank + 1) + (self.filters,) + else: + bias_shape = (1, self.filters) + (1,) * self.rank + bias = ops.reshape(b, bias_shape) + conv_out += bias + return conv_out + + def recurrent_conv(self, x, w): + strides = argument_validation.standardize_tuple( + 1, self.rank, "strides", allow_zero=True + ) + conv_out = ops.conv( + x, w, strides=strides, padding="same", data_format=self.data_format + ) + return conv_out + + def get_config(self): + config = { + "filters": self.filters, + "kernel_size": self.kernel_size, + "strides": self.strides, + "padding": self.padding, + "data_format": self.data_format, + "dilation_rate": self.dilation_rate, + "activation": activations.serialize(self.activation), + "recurrent_activation": activations.serialize( + self.recurrent_activation + ), + "use_bias": self.use_bias, + "kernel_initializer": initializers.serialize( + self.kernel_initializer + ), + "recurrent_initializer": initializers.serialize( + self.recurrent_initializer + ), + "bias_initializer": initializers.serialize(self.bias_initializer), + "unit_forget_bias": self.unit_forget_bias, + "kernel_regularizer": regularizers.serialize( + self.kernel_regularizer + ), + "recurrent_regularizer": regularizers.serialize( + self.recurrent_regularizer + ), + "bias_regularizer": regularizers.serialize(self.bias_regularizer), + "kernel_constraint": constraints.serialize(self.kernel_constraint), + "recurrent_constraint": constraints.serialize( + self.recurrent_constraint + ), + "bias_constraint": constraints.serialize(self.bias_constraint), + "dropout": self.dropout, + "recurrent_dropout": self.recurrent_dropout, + "seed": self.seed, + } + base_config = super().get_config() + return {**base_config, **config} + + +class ConvLSTM(RNN): + """Abstract N-D Convolutional LSTM layer (used as implementation base). + + Similar to an LSTM layer, but the input transformations + and recurrent transformations are both convolutional. + + Args: + rank: Integer, rank of the convolution, e.g. "2" for 2D convolutions. + filters: Integer, the dimensionality of the output space + (i.e. the number of output filters in the convolution). + kernel_size: An integer or tuple/list of n integers, specifying the + dimensions of the convolution window. + strides: An integer or tuple/list of n integers, + specifying the strides of the convolution. + Specifying any stride value != 1 is incompatible with specifying + any `dilation_rate` value != 1. + padding: One of `"valid"` or `"same"` (case-insensitive). + `"valid"` means no padding. `"same"` results in padding evenly to + the left/right or up/down of the input such that output has the same + height/width dimension as the input. + data_format: A string, + one of `channels_last` (default) or `channels_first`. + The ordering of the dimensions in the inputs. + `channels_last` corresponds to inputs with shape + `(batch, time, ..., channels)` + while `channels_first` corresponds to + inputs with shape `(batch, time, channels, ...)`. + When unspecified, uses + `image_data_format` value found in your Keras config file at + `~/.keras/keras.json` (if exists) else 'channels_last'. + Defaults to `'channels_last'`. + dilation_rate: An integer or tuple/list of n integers, specifying + the dilation rate to use for dilated convolution. + Currently, specifying any `dilation_rate` value != 1 is + incompatible with specifying any `strides` value != 1. + activation: Activation function to use. + By default hyperbolic tangent activation function is applied + (`tanh(x)`). + recurrent_activation: Activation function to use + for the recurrent step. + use_bias: Boolean, whether the layer uses a bias vector. + kernel_initializer: Initializer for the `kernel` weights matrix, + used for the linear transformation of the inputs. + recurrent_initializer: Initializer for the `recurrent_kernel` + weights matrix, + used for the linear transformation of the recurrent state. + bias_initializer: Initializer for the bias vector. + unit_forget_bias: Boolean. + If True, add 1 to the bias of the forget gate at initialization. + Use in combination with `bias_initializer="zeros"`. + This is recommended in [Jozefowicz et al., 2015]( + http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf) + kernel_regularizer: Regularizer function applied to + the `kernel` weights matrix. + recurrent_regularizer: Regularizer function applied to + the `recurrent_kernel` weights matrix. + bias_regularizer: Regularizer function applied to the bias vector. + activity_regularizer: Regularizer function applied to. + kernel_constraint: Constraint function applied to + the `kernel` weights matrix. + recurrent_constraint: Constraint function applied to + the `recurrent_kernel` weights matrix. + bias_constraint: Constraint function applied to the bias vector. + dropout: Float between 0 and 1. + Fraction of the units to drop for + the linear transformation of the inputs. + recurrent_dropout: Float between 0 and 1. + Fraction of the units to drop for + the linear transformation of the recurrent state. + seed: Random seed for dropout. + return_sequences: Boolean. Whether to return the last output + in the output sequence, or the full sequence. (default False) + return_state: Boolean Whether to return the last state + in addition to the output. (default False) + go_backwards: Boolean (default False). + If True, process the input sequence backwards. + stateful: Boolean (default False). If True, the last state + for each sample at index i in a batch will be used as initial + state for the sample of index i in the following batch. + """ + + def __init__( + self, + rank, + filters, + kernel_size, + strides=1, + padding="valid", + data_format=None, + dilation_rate=1, + activation="tanh", + recurrent_activation="sigmoid", + use_bias=True, + kernel_initializer="glorot_uniform", + recurrent_initializer="orthogonal", + bias_initializer="zeros", + unit_forget_bias=True, + kernel_regularizer=None, + recurrent_regularizer=None, + bias_regularizer=None, + kernel_constraint=None, + recurrent_constraint=None, + bias_constraint=None, + dropout=0.0, + recurrent_dropout=0.0, + seed=None, + return_sequences=False, + return_state=False, + go_backwards=False, + stateful=False, + **kwargs, + ): + cell = ConvLSTMCell( + rank=rank, + filters=filters, + kernel_size=kernel_size, + strides=strides, + padding=padding, + data_format=data_format, + dilation_rate=dilation_rate, + activation=activation, + recurrent_activation=recurrent_activation, + use_bias=use_bias, + kernel_initializer=kernel_initializer, + recurrent_initializer=recurrent_initializer, + bias_initializer=bias_initializer, + unit_forget_bias=unit_forget_bias, + kernel_regularizer=kernel_regularizer, + recurrent_regularizer=recurrent_regularizer, + bias_regularizer=bias_regularizer, + kernel_constraint=kernel_constraint, + recurrent_constraint=recurrent_constraint, + bias_constraint=bias_constraint, + dropout=dropout, + recurrent_dropout=recurrent_dropout, + seed=seed, + name="conv_lstm_cell", + dtype=kwargs.get("dtype"), + ) + super().__init__( + cell, + return_sequences=return_sequences, + return_state=return_state, + go_backwards=go_backwards, + stateful=stateful, + **kwargs, + ) + self.input_spec = InputSpec(ndim=rank + 3) + + def call(self, sequences, initial_state=None, mask=None, training=False): + return super().call( + sequences, initial_state=initial_state, mask=mask, training=training + ) + + def compute_output_shape(self, sequences_shape, initial_state_shape=None): + batch_size = sequences_shape[0] + steps = sequences_shape[1] + step_shape = (batch_size,) + sequences_shape[2:] + state_shape = self.cell.compute_output_shape(step_shape)[0][1:] + + if self.return_sequences: + output_shape = ( + batch_size, + steps, + ) + state_shape + else: + output_shape = (batch_size,) + state_shape + + if self.return_state: + batched_state_shape = (batch_size,) + state_shape + return output_shape, batched_state_shape, batched_state_shape + return output_shape + + def compute_mask(self, _, mask): + mask = tree.flatten(mask)[0] + output_mask = mask if self.return_sequences else None + if self.return_state: + state_mask = [None, None] + return [output_mask] + state_mask + else: + return output_mask + + @property + def filters(self): + return self.cell.filters + + @property + def kernel_size(self): + return self.cell.kernel_size + + @property + def strides(self): + return self.cell.strides + + @property + def padding(self): + return self.cell.padding + + @property + def data_format(self): + return self.cell.data_format + + @property + def dilation_rate(self): + return self.cell.dilation_rate + + @property + def activation(self): + return self.cell.activation + + @property + def recurrent_activation(self): + return self.cell.recurrent_activation + + @property + def use_bias(self): + return self.cell.use_bias + + @property + def kernel_initializer(self): + return self.cell.kernel_initializer + + @property + def recurrent_initializer(self): + return self.cell.recurrent_initializer + + @property + def bias_initializer(self): + return self.cell.bias_initializer + + @property + def unit_forget_bias(self): + return self.cell.unit_forget_bias + + @property + def kernel_regularizer(self): + return self.cell.kernel_regularizer + + @property + def recurrent_regularizer(self): + return self.cell.recurrent_regularizer + + @property + def bias_regularizer(self): + return self.cell.bias_regularizer + + @property + def kernel_constraint(self): + return self.cell.kernel_constraint + + @property + def recurrent_constraint(self): + return self.cell.recurrent_constraint + + @property + def bias_constraint(self): + return self.cell.bias_constraint + + @property + def dropout(self): + return self.cell.dropout + + @property + def recurrent_dropout(self): + return self.cell.recurrent_dropout + + def get_config(self): + config = { + "filters": self.filters, + "kernel_size": self.kernel_size, + "strides": self.strides, + "padding": self.padding, + "data_format": self.data_format, + "dilation_rate": self.dilation_rate, + "activation": activations.serialize(self.activation), + "recurrent_activation": activations.serialize( + self.recurrent_activation + ), + "use_bias": self.use_bias, + "kernel_initializer": initializers.serialize( + self.kernel_initializer + ), + "recurrent_initializer": initializers.serialize( + self.recurrent_initializer + ), + "bias_initializer": initializers.serialize(self.bias_initializer), + "unit_forget_bias": self.unit_forget_bias, + "kernel_regularizer": regularizers.serialize( + self.kernel_regularizer + ), + "recurrent_regularizer": regularizers.serialize( + self.recurrent_regularizer + ), + "bias_regularizer": regularizers.serialize(self.bias_regularizer), + "activity_regularizer": regularizers.serialize( + self.activity_regularizer + ), + "kernel_constraint": constraints.serialize(self.kernel_constraint), + "recurrent_constraint": constraints.serialize( + self.recurrent_constraint + ), + "bias_constraint": constraints.serialize(self.bias_constraint), + "dropout": self.dropout, + "recurrent_dropout": self.recurrent_dropout, + "seed": self.cell.seed, + } + base_config = super().get_config() + del base_config["cell"] + return {**base_config, **config} + + @classmethod + def from_config(cls, config): + return cls(**config) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/conv_lstm1d.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/conv_lstm1d.py new file mode 100644 index 0000000000000000000000000000000000000000..2d68eb748a4047386a7de4f4c24a826839313d1f --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/conv_lstm1d.py @@ -0,0 +1,184 @@ +from keras.src.api_export import keras_export +from keras.src.layers.rnn.conv_lstm import ConvLSTM + + +@keras_export("keras.layers.ConvLSTM1D") +class ConvLSTM1D(ConvLSTM): + """1D Convolutional LSTM. + + Similar to an LSTM layer, but the input transformations + and recurrent transformations are both convolutional. + + Args: + filters: int, the dimension of the output space (the number of filters + in the convolution). + kernel_size: int or tuple/list of 1 integer, specifying the size of + the convolution window. + strides: int or tuple/list of 1 integer, specifying the stride length + of the convolution. `strides > 1` is incompatible with + `dilation_rate > 1`. + padding: string, `"valid"` or `"same"` (case-insensitive). + `"valid"` means no padding. `"same"` results in padding evenly to + the left/right or up/down of the input such that output has the + same height/width dimension as the input. + data_format: string, either `"channels_last"` or `"channels_first"`. + The ordering of the dimensions in the inputs. `"channels_last"` + corresponds to inputs with shape `(batch, steps, features)` + while `"channels_first"` corresponds to inputs with shape + `(batch, features, steps)`. It defaults to the `image_data_format` + value found in your Keras config file at `~/.keras/keras.json`. + If you never set it, then it will be `"channels_last"`. + dilation_rate: int or tuple/list of 1 integers, specifying the dilation + rate to use for dilated convolution. + activation: Activation function to use. By default hyperbolic tangent + activation function is applied (`tanh(x)`). + recurrent_activation: Activation function to use for the recurrent step. + use_bias: Boolean, whether the layer uses a bias vector. + kernel_initializer: Initializer for the `kernel` weights matrix, + used for the linear transformation of the inputs. + recurrent_initializer: Initializer for the `recurrent_kernel` weights + matrix, used for the linear transformation of the recurrent state. + bias_initializer: Initializer for the bias vector. + unit_forget_bias: Boolean. If `True`, add 1 to the bias of + the forget gate at initialization. + Use in combination with `bias_initializer="zeros"`. + This is recommended in [Jozefowicz et al., 2015]( + http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf) + kernel_regularizer: Regularizer function applied to the `kernel` weights + matrix. + recurrent_regularizer: Regularizer function applied to the + `recurrent_kernel` weights matrix. + bias_regularizer: Regularizer function applied to the bias vector. + activity_regularizer: Regularizer function applied to. + kernel_constraint: Constraint function applied to the `kernel` weights + matrix. + recurrent_constraint: Constraint function applied to the + `recurrent_kernel` weights matrix. + bias_constraint: Constraint function applied to the bias vector. + dropout: Float between 0 and 1. Fraction of the units to drop for the + linear transformation of the inputs. + recurrent_dropout: Float between 0 and 1. Fraction of the units to drop + for the linear transformation of the recurrent state. + seed: Random seed for dropout. + return_sequences: Boolean. Whether to return the last output + in the output sequence, or the full sequence. Default: `False`. + return_state: Boolean. Whether to return the last state in addition + to the output. Default: `False`. + go_backwards: Boolean (default: `False`). + If `True`, process the input sequence backwards and return the + reversed sequence. + stateful: Boolean (default False). If `True`, the last state + for each sample at index i in a batch will be used as initial + state for the sample of index i in the following batch. + unroll: Boolean (default: `False`). + If `True`, the network will be unrolled, + else a symbolic loop will be used. + Unrolling can speed-up a RNN, + although it tends to be more memory-intensive. + Unrolling is only suitable for short sequences. + + + Call arguments: + inputs: A 4D tensor. + initial_state: List of initial state tensors to be passed to the first + call of the cell. + mask: Binary tensor of shape `(samples, timesteps)` indicating whether a + given timestep should be masked. + training: Python boolean indicating whether the layer should behave in + training mode or in inference mode. + This is only relevant if `dropout` or `recurrent_dropout` are set. + + Input shape: + + - If `data_format="channels_first"`: + 4D tensor with shape: `(samples, time, channels, rows)` + - If `data_format="channels_last"`: + 4D tensor with shape: `(samples, time, rows, channels)` + + Output shape: + + - If `return_state`: a list of tensors. The first tensor is the output. + The remaining tensors are the last states, + each 3D tensor with shape: `(samples, filters, new_rows)` if + `data_format='channels_first'` + or shape: `(samples, new_rows, filters)` if + `data_format='channels_last'`. + `rows` values might have changed due to padding. + - If `return_sequences`: 4D tensor with shape: `(samples, timesteps, + filters, new_rows)` if data_format='channels_first' + or shape: `(samples, timesteps, new_rows, filters)` if + `data_format='channels_last'`. + - Else, 3D tensor with shape: `(samples, filters, new_rows)` if + `data_format='channels_first'` + or shape: `(samples, new_rows, filters)` if + `data_format='channels_last'`. + + References: + + - [Shi et al., 2015](http://arxiv.org/abs/1506.04214v1) + (the current implementation does not include the feedback loop on the + cells output). + """ + + def __init__( + self, + filters, + kernel_size, + strides=1, + padding="valid", + data_format=None, + dilation_rate=1, + activation="tanh", + recurrent_activation="sigmoid", + use_bias=True, + kernel_initializer="glorot_uniform", + recurrent_initializer="orthogonal", + bias_initializer="zeros", + unit_forget_bias=True, + kernel_regularizer=None, + recurrent_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + kernel_constraint=None, + recurrent_constraint=None, + bias_constraint=None, + dropout=0.0, + recurrent_dropout=0.0, + seed=None, + return_sequences=False, + return_state=False, + go_backwards=False, + stateful=False, + **kwargs, + ): + super().__init__( + rank=1, + filters=filters, + kernel_size=kernel_size, + strides=strides, + padding=padding, + data_format=data_format, + dilation_rate=dilation_rate, + activation=activation, + recurrent_activation=recurrent_activation, + use_bias=use_bias, + kernel_initializer=kernel_initializer, + recurrent_initializer=recurrent_initializer, + bias_initializer=bias_initializer, + unit_forget_bias=unit_forget_bias, + kernel_regularizer=kernel_regularizer, + recurrent_regularizer=recurrent_regularizer, + bias_regularizer=bias_regularizer, + activity_regularizer=activity_regularizer, + kernel_constraint=kernel_constraint, + recurrent_constraint=recurrent_constraint, + bias_constraint=bias_constraint, + return_sequences=return_sequences, + return_state=return_state, + go_backwards=go_backwards, + stateful=stateful, + dropout=dropout, + recurrent_dropout=recurrent_dropout, + seed=seed, + **kwargs, + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/conv_lstm2d.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/conv_lstm2d.py new file mode 100644 index 0000000000000000000000000000000000000000..5e14eadc25aa4da7f641c371b7a73bf965ef71da --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/conv_lstm2d.py @@ -0,0 +1,184 @@ +from keras.src.api_export import keras_export +from keras.src.layers.rnn.conv_lstm import ConvLSTM + + +@keras_export("keras.layers.ConvLSTM2D") +class ConvLSTM2D(ConvLSTM): + """2D Convolutional LSTM. + + Similar to an LSTM layer, but the input transformations + and recurrent transformations are both convolutional. + + Args: + filters: int, the dimension of the output space (the number of filters + in the convolution). + kernel_size: int or tuple/list of 2 integers, specifying the size of the + convolution window. + strides: int or tuple/list of 2 integers, specifying the stride length + of the convolution. `strides > 1` is incompatible with + `dilation_rate > 1`. + padding: string, `"valid"` or `"same"` (case-insensitive). + `"valid"` means no padding. `"same"` results in padding evenly to + the left/right or up/down of the input such that output has the same + height/width dimension as the input. + data_format: string, either `"channels_last"` or `"channels_first"`. + The ordering of the dimensions in the inputs. `"channels_last"` + corresponds to inputs with shape `(batch, steps, features)` + while `"channels_first"` corresponds to inputs with shape + `(batch, features, steps)`. It defaults to the `image_data_format` + value found in your Keras config file at `~/.keras/keras.json`. + If you never set it, then it will be `"channels_last"`. + dilation_rate: int or tuple/list of 2 integers, specifying the dilation + rate to use for dilated convolution. + activation: Activation function to use. By default hyperbolic tangent + activation function is applied (`tanh(x)`). + recurrent_activation: Activation function to use for the recurrent step. + use_bias: Boolean, whether the layer uses a bias vector. + kernel_initializer: Initializer for the `kernel` weights matrix, + used for the linear transformation of the inputs. + recurrent_initializer: Initializer for the `recurrent_kernel` weights + matrix, used for the linear transformation of the recurrent state. + bias_initializer: Initializer for the bias vector. + unit_forget_bias: Boolean. If `True`, add 1 to the bias of the forget + gate at initialization. + Use in combination with `bias_initializer="zeros"`. + This is recommended in [Jozefowicz et al., 2015]( + http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf) + kernel_regularizer: Regularizer function applied to the `kernel` weights + matrix. + recurrent_regularizer: Regularizer function applied to the + `recurrent_kernel` weights matrix. + bias_regularizer: Regularizer function applied to the bias vector. + activity_regularizer: Regularizer function applied to. + kernel_constraint: Constraint function applied to the `kernel` weights + matrix. + recurrent_constraint: Constraint function applied to the + `recurrent_kernel` weights matrix. + bias_constraint: Constraint function applied to the bias vector. + dropout: Float between 0 and 1. Fraction of the units to drop for the + linear transformation of the inputs. + recurrent_dropout: Float between 0 and 1. Fraction of the units to drop + for the linear transformation of the recurrent state. + seed: Random seed for dropout. + return_sequences: Boolean. Whether to return the last output + in the output sequence, or the full sequence. Default: `False`. + return_state: Boolean. Whether to return the last state in addition + to the output. Default: `False`. + go_backwards: Boolean (default: `False`). + If `True`, process the input sequence backwards and return the + reversed sequence. + stateful: Boolean (default False). If `True`, the last state + for each sample at index i in a batch will be used as initial + state for the sample of index i in the following batch. + unroll: Boolean (default: `False`). + If `True`, the network will be unrolled, + else a symbolic loop will be used. + Unrolling can speed-up a RNN, + although it tends to be more memory-intensive. + Unrolling is only suitable for short sequences. + + + Call arguments: + inputs: A 5D tensor. + mask: Binary tensor of shape `(samples, timesteps)` indicating whether a + given timestep should be masked. + training: Python boolean indicating whether the layer should behave in + training mode or in inference mode. + This is only relevant if `dropout` or `recurrent_dropout` are set. + initial_state: List of initial state tensors to be passed to the first + call of the cell. + + Input shape: + + - If `data_format='channels_first'`: + 5D tensor with shape: `(samples, time, channels, rows, cols)` + - If `data_format='channels_last'`: + 5D tensor with shape: `(samples, time, rows, cols, channels)` + + Output shape: + + - If `return_state`: a list of tensors. The first tensor is the output. + The remaining tensors are the last states, + each 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if + `data_format='channels_first'` + or shape: `(samples, new_rows, new_cols, filters)` if + `data_format='channels_last'`. `rows` and `cols` values might have + changed due to padding. + - If `return_sequences`: 5D tensor with shape: `(samples, timesteps, + filters, new_rows, new_cols)` if data_format='channels_first' + or shape: `(samples, timesteps, new_rows, new_cols, filters)` if + `data_format='channels_last'`. + - Else, 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if + `data_format='channels_first'` + or shape: `(samples, new_rows, new_cols, filters)` if + `data_format='channels_last'`. + + References: + + - [Shi et al., 2015](http://arxiv.org/abs/1506.04214v1) + (the current implementation does not include the feedback loop on the + cells output). + """ + + def __init__( + self, + filters, + kernel_size, + strides=1, + padding="valid", + data_format=None, + dilation_rate=1, + activation="tanh", + recurrent_activation="sigmoid", + use_bias=True, + kernel_initializer="glorot_uniform", + recurrent_initializer="orthogonal", + bias_initializer="zeros", + unit_forget_bias=True, + kernel_regularizer=None, + recurrent_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + kernel_constraint=None, + recurrent_constraint=None, + bias_constraint=None, + dropout=0.0, + recurrent_dropout=0.0, + seed=None, + return_sequences=False, + return_state=False, + go_backwards=False, + stateful=False, + **kwargs, + ): + super().__init__( + rank=2, + filters=filters, + kernel_size=kernel_size, + strides=strides, + padding=padding, + data_format=data_format, + dilation_rate=dilation_rate, + activation=activation, + recurrent_activation=recurrent_activation, + use_bias=use_bias, + kernel_initializer=kernel_initializer, + recurrent_initializer=recurrent_initializer, + bias_initializer=bias_initializer, + unit_forget_bias=unit_forget_bias, + kernel_regularizer=kernel_regularizer, + recurrent_regularizer=recurrent_regularizer, + bias_regularizer=bias_regularizer, + activity_regularizer=activity_regularizer, + kernel_constraint=kernel_constraint, + recurrent_constraint=recurrent_constraint, + bias_constraint=bias_constraint, + return_sequences=return_sequences, + return_state=return_state, + go_backwards=go_backwards, + stateful=stateful, + dropout=dropout, + recurrent_dropout=recurrent_dropout, + seed=seed, + **kwargs, + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/conv_lstm3d.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/conv_lstm3d.py new file mode 100644 index 0000000000000000000000000000000000000000..a36ed1dddf92c9cb974f67ef3eb6b976f82b4371 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/conv_lstm3d.py @@ -0,0 +1,183 @@ +from keras.src.api_export import keras_export +from keras.src.layers.rnn.conv_lstm import ConvLSTM + + +@keras_export("keras.layers.ConvLSTM3D") +class ConvLSTM3D(ConvLSTM): + """3D Convolutional LSTM. + + Similar to an LSTM layer, but the input transformations + and recurrent transformations are both convolutional. + + Args: + filters: int, the dimension of the output space (the number of filters + in the convolution). + kernel_size: int or tuple/list of 3 integers, specifying the size of the + convolution window. + strides: int or tuple/list of 3 integers, specifying the stride length + of the convolution. `strides > 1` is incompatible with + `dilation_rate > 1`. + padding: string, `"valid"` or `"same"` (case-insensitive). + `"valid"` means no padding. `"same"` results in padding evenly to + the left/right or up/down of the input such that output has the same + height/width dimension as the input. + data_format: string, either `"channels_last"` or `"channels_first"`. + The ordering of the dimensions in the inputs. `"channels_last"` + corresponds to inputs with shape `(batch, steps, features)` + while `"channels_first"` corresponds to inputs with shape + `(batch, features, steps)`. It defaults to the `image_data_format` + value found in your Keras config file at `~/.keras/keras.json`. + If you never set it, then it will be `"channels_last"`. + dilation_rate: int or tuple/list of 3 integers, specifying the dilation + rate to use for dilated convolution. + activation: Activation function to use. By default hyperbolic tangent + activation function is applied (`tanh(x)`). + recurrent_activation: Activation function to use for the recurrent step. + use_bias: Boolean, whether the layer uses a bias vector. + kernel_initializer: Initializer for the `kernel` weights matrix, + used for the linear transformation of the inputs. + recurrent_initializer: Initializer for the `recurrent_kernel` weights + matrix, used for the linear transformation of the recurrent state. + bias_initializer: Initializer for the bias vector. + unit_forget_bias: Boolean. If `True`, add 1 to the bias of the forget + gate at initialization. + Use in combination with `bias_initializer="zeros"`. + This is recommended in [Jozefowicz et al., 2015]( + http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf) + kernel_regularizer: Regularizer function applied to the `kernel` weights + matrix. + recurrent_regularizer: Regularizer function applied to the + `recurrent_kernel` weights matrix. + bias_regularizer: Regularizer function applied to the bias vector. + activity_regularizer: Regularizer function applied to. + kernel_constraint: Constraint function applied to the `kernel` weights + matrix. + recurrent_constraint: Constraint function applied to the + `recurrent_kernel` weights matrix. + bias_constraint: Constraint function applied to the bias vector. + dropout: Float between 0 and 1. Fraction of the units to drop for the + linear transformation of the inputs. + recurrent_dropout: Float between 0 and 1. Fraction of the units to drop + for the linear transformation of the recurrent state. + seed: Random seed for dropout. + return_sequences: Boolean. Whether to return the last output + in the output sequence, or the full sequence. Default: `False`. + return_state: Boolean. Whether to return the last state in addition + to the output. Default: `False`. + go_backwards: Boolean (default: `False`). + If `True`, process the input sequence backwards and return the + reversed sequence. + stateful: Boolean (default False). If `True`, the last state + for each sample at index i in a batch will be used as initial + state for the sample of index i in the following batch. + unroll: Boolean (default: `False`). + If `True`, the network will be unrolled, + else a symbolic loop will be used. + Unrolling can speed-up a RNN, + although it tends to be more memory-intensive. + Unrolling is only suitable for short sequences. + + + Call arguments: + inputs: A 6D tensor. + mask: Binary tensor of shape `(samples, timesteps)` indicating whether a + given timestep should be masked. + training: Python boolean indicating whether the layer should behave in + training mode or in inference mode. + This is only relevant if `dropout` or `recurrent_dropout` are set. + initial_state: List of initial state tensors to be passed to the first + call of the cell. + + Input shape: + + - If `data_format='channels_first'`: + 5D tensor with shape: `(samples, time, channels, *spatial_dims)` + - If `data_format='channels_last'`: + 5D tensor with shape: `(samples, time, *spatial_dims, channels)` + + Output shape: + + - If `return_state`: a list of tensors. The first tensor is the output. + The remaining tensors are the last states, + each 4D tensor with shape: `(samples, filters, *spatial_dims)` if + `data_format='channels_first'` + or shape: `(samples, *spatial_dims, filters)` if + `data_format='channels_last'`. + - If `return_sequences`: 5D tensor with shape: `(samples, timesteps, + filters, *spatial_dims)` if data_format='channels_first' + or shape: `(samples, timesteps, *spatial_dims, filters)` if + `data_format='channels_last'`. + - Else, 4D tensor with shape: `(samples, filters, *spatial_dims)` if + `data_format='channels_first'` + or shape: `(samples, *spatial_dims, filters)` if + `data_format='channels_last'`. + + References: + + - [Shi et al., 2015](http://arxiv.org/abs/1506.04214v1) + (the current implementation does not include the feedback loop on the + cells output). + """ + + def __init__( + self, + filters, + kernel_size, + strides=1, + padding="valid", + data_format=None, + dilation_rate=1, + activation="tanh", + recurrent_activation="sigmoid", + use_bias=True, + kernel_initializer="glorot_uniform", + recurrent_initializer="orthogonal", + bias_initializer="zeros", + unit_forget_bias=True, + kernel_regularizer=None, + recurrent_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + kernel_constraint=None, + recurrent_constraint=None, + bias_constraint=None, + dropout=0.0, + recurrent_dropout=0.0, + seed=None, + return_sequences=False, + return_state=False, + go_backwards=False, + stateful=False, + **kwargs, + ): + super().__init__( + rank=3, + filters=filters, + kernel_size=kernel_size, + strides=strides, + padding=padding, + data_format=data_format, + dilation_rate=dilation_rate, + activation=activation, + recurrent_activation=recurrent_activation, + use_bias=use_bias, + kernel_initializer=kernel_initializer, + recurrent_initializer=recurrent_initializer, + bias_initializer=bias_initializer, + unit_forget_bias=unit_forget_bias, + kernel_regularizer=kernel_regularizer, + recurrent_regularizer=recurrent_regularizer, + bias_regularizer=bias_regularizer, + activity_regularizer=activity_regularizer, + kernel_constraint=kernel_constraint, + recurrent_constraint=recurrent_constraint, + bias_constraint=bias_constraint, + return_sequences=return_sequences, + return_state=return_state, + go_backwards=go_backwards, + stateful=stateful, + dropout=dropout, + recurrent_dropout=recurrent_dropout, + seed=seed, + **kwargs, + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/dropout_rnn_cell.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/dropout_rnn_cell.py new file mode 100644 index 0000000000000000000000000000000000000000..3dd39b0ca6b272a052fb4a27343080f8901bfc86 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/dropout_rnn_cell.py @@ -0,0 +1,66 @@ +from keras.src import backend +from keras.src import ops + + +class DropoutRNNCell: + """Object that holds dropout-related functionality for RNN cells. + + This class is not a standalone RNN cell. It suppose to be used with a RNN + cell by multiple inheritance. Any cell that mix with class should have + following fields: + + - `dropout`: a float number in the range `[0, 1]`. + Dropout rate for the input tensor. + - `recurrent_dropout`: a float number in the range `[0, 1]`. + Dropout rate for the recurrent connections. + - `seed_generator`, an instance of `backend.random.SeedGenerator`. + + This object will create and cache dropout masks, and reuse them for + all incoming steps, so that the same mask is used for every step. + """ + + def _create_dropout_mask(self, step_input, dropout_rate): + count = getattr(self, "dropout_mask_count", None) + ones = ops.ones_like(step_input) + if count is None: + return backend.random.dropout( + ones, rate=dropout_rate, seed=self.seed_generator + ) + else: + return [ + backend.random.dropout( + ones, rate=dropout_rate, seed=self.seed_generator + ) + for _ in range(count) + ] + + def get_dropout_mask(self, step_input): + if not hasattr(self, "_dropout_mask"): + self._dropout_mask = None + if self._dropout_mask is None and self.dropout > 0: + self._dropout_mask = self._create_dropout_mask( + step_input, self.dropout + ) + return self._dropout_mask + + def get_recurrent_dropout_mask(self, step_input): + if not hasattr(self, "_recurrent_dropout_mask"): + self._recurrent_dropout_mask = None + if self._recurrent_dropout_mask is None and self.recurrent_dropout > 0: + self._recurrent_dropout_mask = self._create_dropout_mask( + step_input, self.recurrent_dropout + ) + return self._recurrent_dropout_mask + + def reset_dropout_mask(self): + """Reset the cached dropout mask if any. + + The RNN layer invokes this in the `call()` method + so that the cached mask is cleared after calling `cell.call()`. The + mask should be cached across all timestep within the same batch, but + shouldn't be cached between batches. + """ + self._dropout_mask = None + + def reset_recurrent_dropout_mask(self): + self._recurrent_dropout_mask = None diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/gru.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/gru.py new file mode 100644 index 0000000000000000000000000000000000000000..65bdcb09dde5b9a3dd4de4eb9d5fa35032998093 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/gru.py @@ -0,0 +1,711 @@ +from keras.src import activations +from keras.src import backend +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.layers.rnn.dropout_rnn_cell import DropoutRNNCell +from keras.src.layers.rnn.rnn import RNN + + +@keras_export("keras.layers.GRUCell") +class GRUCell(Layer, DropoutRNNCell): + """Cell class for the GRU layer. + + This class processes one step within the whole time sequence input, whereas + `keras.layer.GRU` processes the whole sequence. + + Args: + units: Positive integer, dimensionality of the output space. + activation: Activation function to use. Default: hyperbolic tangent + (`tanh`). If you pass None, no activation is applied + (ie. "linear" activation: `a(x) = x`). + recurrent_activation: Activation function to use for the recurrent step. + Default: sigmoid (`sigmoid`). If you pass `None`, no activation is + applied (ie. "linear" activation: `a(x) = x`). + use_bias: Boolean, (default `True`), whether the layer + should use a bias vector. + kernel_initializer: Initializer for the `kernel` weights matrix, + used for the linear transformation of the inputs. Default: + `"glorot_uniform"`. + recurrent_initializer: Initializer for the `recurrent_kernel` + weights matrix, used for the linear transformation + of the recurrent state. Default: `"orthogonal"`. + bias_initializer: Initializer for the bias vector. Default: `"zeros"`. + kernel_regularizer: Regularizer function applied to the `kernel` weights + matrix. Default: `None`. + recurrent_regularizer: Regularizer function applied to the + `recurrent_kernel` weights matrix. Default: `None`. + bias_regularizer: Regularizer function applied to the bias vector. + Default: `None`. + kernel_constraint: Constraint function applied to the `kernel` weights + matrix. Default: `None`. + recurrent_constraint: Constraint function applied to the + `recurrent_kernel` weights matrix. Default: `None`. + bias_constraint: Constraint function applied to the bias vector. + Default: `None`. + dropout: Float between 0 and 1. Fraction of the units to drop for the + linear transformation of the inputs. Default: 0. + recurrent_dropout: Float between 0 and 1. Fraction of the units to drop + for the linear transformation of the recurrent state. Default: 0. + reset_after: GRU convention (whether to apply reset gate after or + before matrix multiplication). False = "before", + True = "after" (default and cuDNN compatible). + seed: Random seed for dropout. + + Call arguments: + inputs: A 2D tensor, with shape `(batch, features)`. + states: A 2D tensor with shape `(batch, units)`, which is the state + from the previous time step. + training: Python boolean indicating whether the layer should behave in + training mode or in inference mode. Only relevant when `dropout` or + `recurrent_dropout` is used. + + Example: + + >>> inputs = np.random.random((32, 10, 8)) + >>> rnn = keras.layers.RNN(keras.layers.GRUCell(4)) + >>> output = rnn(inputs) + >>> output.shape + (32, 4) + >>> rnn = keras.layers.RNN( + ... keras.layers.GRUCell(4), + ... return_sequences=True, + ... return_state=True) + >>> whole_sequence_output, final_state = rnn(inputs) + >>> whole_sequence_output.shape + (32, 10, 4) + >>> final_state.shape + (32, 4) + """ + + def __init__( + self, + units, + activation="tanh", + recurrent_activation="sigmoid", + use_bias=True, + kernel_initializer="glorot_uniform", + recurrent_initializer="orthogonal", + bias_initializer="zeros", + kernel_regularizer=None, + recurrent_regularizer=None, + bias_regularizer=None, + kernel_constraint=None, + recurrent_constraint=None, + bias_constraint=None, + dropout=0.0, + recurrent_dropout=0.0, + reset_after=True, + seed=None, + **kwargs, + ): + if units <= 0: + raise ValueError( + "Received an invalid value for argument `units`, " + f"expected a positive integer, got {units}." + ) + implementation = kwargs.pop("implementation", 2) + super().__init__(**kwargs) + self.implementation = implementation + self.units = units + self.activation = activations.get(activation) + self.recurrent_activation = activations.get(recurrent_activation) + self.use_bias = use_bias + + self.kernel_initializer = initializers.get(kernel_initializer) + self.recurrent_initializer = initializers.get(recurrent_initializer) + self.bias_initializer = initializers.get(bias_initializer) + + self.kernel_regularizer = regularizers.get(kernel_regularizer) + self.recurrent_regularizer = regularizers.get(recurrent_regularizer) + self.bias_regularizer = regularizers.get(bias_regularizer) + + self.kernel_constraint = constraints.get(kernel_constraint) + self.recurrent_constraint = constraints.get(recurrent_constraint) + self.bias_constraint = constraints.get(bias_constraint) + + self.dropout = min(1.0, max(0.0, dropout)) + self.recurrent_dropout = min(1.0, max(0.0, recurrent_dropout)) + if self.recurrent_dropout != 0.0: + self.implementation = 1 + if self.implementation == 1: + self.dropout_mask_count = 3 + self.seed = seed + self.seed_generator = backend.random.SeedGenerator(seed=seed) + + self.reset_after = reset_after + self.state_size = self.units + self.output_size = self.units + + def build(self, input_shape): + super().build(input_shape) + input_dim = input_shape[-1] + self.kernel = self.add_weight( + shape=(input_dim, self.units * 3), + name="kernel", + initializer=self.kernel_initializer, + regularizer=self.kernel_regularizer, + constraint=self.kernel_constraint, + ) + self.recurrent_kernel = self.add_weight( + shape=(self.units, self.units * 3), + name="recurrent_kernel", + initializer=self.recurrent_initializer, + regularizer=self.recurrent_regularizer, + constraint=self.recurrent_constraint, + ) + + if self.use_bias: + if not self.reset_after: + bias_shape = (3 * self.units,) + else: + # separate biases for input and recurrent kernels + # Note: the shape is intentionally different from CuDNNGRU + # biases `(2 * 3 * self.units,)`, so that we can distinguish the + # classes when loading and converting saved weights. + bias_shape = (2, 3 * self.units) + self.bias = self.add_weight( + shape=bias_shape, + name="bias", + initializer=self.bias_initializer, + regularizer=self.bias_regularizer, + constraint=self.bias_constraint, + ) + else: + self.bias = None + self.built = True + + def call(self, inputs, states, training=False): + h_tm1 = ( + states[0] if tree.is_nested(states) else states + ) # previous state + + if self.use_bias: + if not self.reset_after: + input_bias, recurrent_bias = self.bias, None + else: + input_bias, recurrent_bias = ( + ops.squeeze(e, axis=0) + for e in ops.split(self.bias, self.bias.shape[0], axis=0) + ) + + if self.implementation == 1: + if training and 0.0 < self.dropout < 1.0: + dp_mask = self.get_dropout_mask(inputs) + inputs_z = inputs * dp_mask[0] + inputs_r = inputs * dp_mask[1] + inputs_h = inputs * dp_mask[2] + else: + inputs_z = inputs + inputs_r = inputs + inputs_h = inputs + + x_z = ops.matmul(inputs_z, self.kernel[:, : self.units]) + x_r = ops.matmul( + inputs_r, self.kernel[:, self.units : self.units * 2] + ) + x_h = ops.matmul(inputs_h, self.kernel[:, self.units * 2 :]) + + if self.use_bias: + x_z += input_bias[: self.units] + x_r += input_bias[self.units : self.units * 2] + x_h += input_bias[self.units * 2 :] + + if training and 0.0 < self.recurrent_dropout < 1.0: + rec_dp_mask = self.get_recurrent_dropout_mask(h_tm1) + h_tm1_z = h_tm1 * rec_dp_mask[0] + h_tm1_r = h_tm1 * rec_dp_mask[1] + h_tm1_h = h_tm1 * rec_dp_mask[2] + else: + h_tm1_z = h_tm1 + h_tm1_r = h_tm1 + h_tm1_h = h_tm1 + + recurrent_z = ops.matmul( + h_tm1_z, self.recurrent_kernel[:, : self.units] + ) + recurrent_r = ops.matmul( + h_tm1_r, self.recurrent_kernel[:, self.units : self.units * 2] + ) + if self.reset_after and self.use_bias: + recurrent_z += recurrent_bias[: self.units] + recurrent_r += recurrent_bias[self.units : self.units * 2] + + z = self.recurrent_activation(x_z + recurrent_z) + r = self.recurrent_activation(x_r + recurrent_r) + + # reset gate applied after/before matrix multiplication + if self.reset_after: + recurrent_h = ops.matmul( + h_tm1_h, self.recurrent_kernel[:, self.units * 2 :] + ) + if self.use_bias: + recurrent_h += recurrent_bias[self.units * 2 :] + recurrent_h = r * recurrent_h + else: + recurrent_h = ops.matmul( + r * h_tm1_h, self.recurrent_kernel[:, self.units * 2 :] + ) + + hh = self.activation(x_h + recurrent_h) + else: + if training and 0.0 < self.dropout < 1.0: + dp_mask = self.get_dropout_mask(inputs) + inputs = inputs * dp_mask + + # inputs projected by all gate matrices at once + matrix_x = ops.matmul(inputs, self.kernel) + if self.use_bias: + # biases: bias_z_i, bias_r_i, bias_h_i + matrix_x += input_bias + + x_z, x_r, x_h = ops.split(matrix_x, 3, axis=-1) + + if self.reset_after: + # hidden state projected by all gate matrices at once + matrix_inner = ops.matmul(h_tm1, self.recurrent_kernel) + if self.use_bias: + matrix_inner += recurrent_bias + else: + # hidden state projected separately for update/reset and new + matrix_inner = ops.matmul( + h_tm1, self.recurrent_kernel[:, : 2 * self.units] + ) + + recurrent_z = matrix_inner[:, : self.units] + recurrent_r = matrix_inner[:, self.units : self.units * 2] + recurrent_h = matrix_inner[:, self.units * 2 :] + + z = self.recurrent_activation(x_z + recurrent_z) + r = self.recurrent_activation(x_r + recurrent_r) + + if self.reset_after: + recurrent_h = r * recurrent_h + else: + recurrent_h = ops.matmul( + r * h_tm1, self.recurrent_kernel[:, 2 * self.units :] + ) + + hh = self.activation(x_h + recurrent_h) + + # previous and candidate state mixed by update gate + h = z * h_tm1 + (1 - z) * hh + new_state = [h] if tree.is_nested(states) else h + return h, new_state + + def get_config(self): + config = { + "units": self.units, + "activation": activations.serialize(self.activation), + "recurrent_activation": activations.serialize( + self.recurrent_activation + ), + "use_bias": self.use_bias, + "kernel_initializer": initializers.serialize( + self.kernel_initializer + ), + "recurrent_initializer": initializers.serialize( + self.recurrent_initializer + ), + "bias_initializer": initializers.serialize(self.bias_initializer), + "kernel_regularizer": regularizers.serialize( + self.kernel_regularizer + ), + "recurrent_regularizer": regularizers.serialize( + self.recurrent_regularizer + ), + "bias_regularizer": regularizers.serialize(self.bias_regularizer), + "kernel_constraint": constraints.serialize(self.kernel_constraint), + "recurrent_constraint": constraints.serialize( + self.recurrent_constraint + ), + "bias_constraint": constraints.serialize(self.bias_constraint), + "dropout": self.dropout, + "recurrent_dropout": self.recurrent_dropout, + "reset_after": self.reset_after, + "seed": self.seed, + } + base_config = super().get_config() + return {**base_config, **config} + + def get_initial_state(self, batch_size=None): + return [ + ops.zeros((batch_size, self.state_size), dtype=self.compute_dtype) + ] + + +@keras_export("keras.layers.GRU") +class GRU(RNN): + """Gated Recurrent Unit - Cho et al. 2014. + + Based on available runtime hardware and constraints, this layer + will choose different implementations (cuDNN-based or backend-native) + to maximize the performance. If a GPU is available and all + the arguments to the layer meet the requirement of the cuDNN kernel + (see below for details), the layer will use a fast cuDNN implementation + when using the TensorFlow backend. + + The requirements to use the cuDNN implementation are: + + 1. `activation` == `tanh` + 2. `recurrent_activation` == `sigmoid` + 3. `dropout` == 0 and `recurrent_dropout` == 0 + 4. `unroll` is `False` + 5. `use_bias` is `True` + 6. `reset_after` is `True` + 7. Inputs, if use masking, are strictly right-padded. + 8. Eager execution is enabled in the outermost context. + + There are two variants of the GRU implementation. The default one is based + on [v3](https://arxiv.org/abs/1406.1078v3) and has reset gate applied to + hidden state before matrix multiplication. The other one is based on + [original](https://arxiv.org/abs/1406.1078v1) and has the order reversed. + + The second variant is compatible with CuDNNGRU (GPU-only) and allows + inference on CPU. Thus it has separate biases for `kernel` and + `recurrent_kernel`. To use this variant, set `reset_after=True` and + `recurrent_activation='sigmoid'`. + + For example: + + >>> inputs = np.random.random((32, 10, 8)) + >>> gru = keras.layers.GRU(4) + >>> output = gru(inputs) + >>> output.shape + (32, 4) + >>> gru = keras.layers.GRU(4, return_sequences=True, return_state=True) + >>> whole_sequence_output, final_state = gru(inputs) + >>> whole_sequence_output.shape + (32, 10, 4) + >>> final_state.shape + (32, 4) + + Args: + units: Positive integer, dimensionality of the output space. + activation: Activation function to use. + Default: hyperbolic tangent (`tanh`). + If you pass `None`, no activation is applied + (ie. "linear" activation: `a(x) = x`). + recurrent_activation: Activation function to use + for the recurrent step. + Default: sigmoid (`sigmoid`). + If you pass `None`, no activation is applied + (ie. "linear" activation: `a(x) = x`). + use_bias: Boolean, (default `True`), whether the layer + should use a bias vector. + kernel_initializer: Initializer for the `kernel` weights matrix, + used for the linear transformation of the inputs. Default: + `"glorot_uniform"`. + recurrent_initializer: Initializer for the `recurrent_kernel` + weights matrix, used for the linear transformation of the recurrent + state. Default: `"orthogonal"`. + bias_initializer: Initializer for the bias vector. Default: `"zeros"`. + kernel_regularizer: Regularizer function applied to the `kernel` weights + matrix. Default: `None`. + recurrent_regularizer: Regularizer function applied to the + `recurrent_kernel` weights matrix. Default: `None`. + bias_regularizer: Regularizer function applied to the bias vector. + Default: `None`. + activity_regularizer: Regularizer function applied to the output of the + layer (its "activation"). Default: `None`. + kernel_constraint: Constraint function applied to the `kernel` weights + matrix. Default: `None`. + recurrent_constraint: Constraint function applied to the + `recurrent_kernel` weights matrix. Default: `None`. + bias_constraint: Constraint function applied to the bias vector. + Default: `None`. + dropout: Float between 0 and 1. Fraction of the units to drop for the + linear transformation of the inputs. Default: 0. + recurrent_dropout: Float between 0 and 1. Fraction of the units to drop + for the linear transformation of the recurrent state. Default: 0. + seed: Random seed for dropout. + return_sequences: Boolean. Whether to return the last output + in the output sequence, or the full sequence. Default: `False`. + return_state: Boolean. Whether to return the last state in addition + to the output. Default: `False`. + go_backwards: Boolean (default `False`). + If `True`, process the input sequence backwards and return the + reversed sequence. + stateful: Boolean (default: `False`). If `True`, the last state + for each sample at index i in a batch will be used as initial + state for the sample of index i in the following batch. + unroll: Boolean (default: `False`). + If `True`, the network will be unrolled, + else a symbolic loop will be used. + Unrolling can speed-up a RNN, + although it tends to be more memory-intensive. + Unrolling is only suitable for short sequences. + reset_after: GRU convention (whether to apply reset gate after or + before matrix multiplication). `False` is `"before"`, + `True` is `"after"` (default and cuDNN compatible). + use_cudnn: Whether to use a cuDNN-backed implementation. `"auto"` will + attempt to use cuDNN when feasible, and will fallback to the + default implementation if not. + + Call arguments: + inputs: A 3D tensor, with shape `(batch, timesteps, feature)`. + mask: Binary tensor of shape `(samples, timesteps)` indicating whether + a given timestep should be masked (optional). + An individual `True` entry indicates that the corresponding timestep + should be utilized, while a `False` entry indicates that the + corresponding timestep should be ignored. Defaults to `None`. + training: Python boolean indicating whether the layer should behave in + training mode or in inference mode. This argument is passed to the + cell when calling it. This is only relevant if `dropout` or + `recurrent_dropout` is used (optional). Defaults to `None`. + initial_state: List of initial state tensors to be passed to the first + call of the cell (optional, `None` causes creation + of zero-filled initial state tensors). Defaults to `None`. + """ + + def __init__( + self, + units, + activation="tanh", + recurrent_activation="sigmoid", + use_bias=True, + kernel_initializer="glorot_uniform", + recurrent_initializer="orthogonal", + bias_initializer="zeros", + kernel_regularizer=None, + recurrent_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + kernel_constraint=None, + recurrent_constraint=None, + bias_constraint=None, + dropout=0.0, + recurrent_dropout=0.0, + seed=None, + return_sequences=False, + return_state=False, + go_backwards=False, + stateful=False, + unroll=False, + reset_after=True, + use_cudnn="auto", + **kwargs, + ): + cell = GRUCell( + units, + activation=activation, + recurrent_activation=recurrent_activation, + use_bias=use_bias, + kernel_initializer=kernel_initializer, + recurrent_initializer=recurrent_initializer, + bias_initializer=bias_initializer, + kernel_regularizer=kernel_regularizer, + recurrent_regularizer=recurrent_regularizer, + bias_regularizer=bias_regularizer, + kernel_constraint=kernel_constraint, + recurrent_constraint=recurrent_constraint, + bias_constraint=bias_constraint, + dropout=dropout, + recurrent_dropout=recurrent_dropout, + reset_after=reset_after, + dtype=kwargs.get("dtype", None), + trainable=kwargs.get("trainable", True), + name="gru_cell", + seed=seed, + implementation=kwargs.pop("implementation", 2), + ) + super().__init__( + cell, + return_sequences=return_sequences, + return_state=return_state, + go_backwards=go_backwards, + stateful=stateful, + unroll=unroll, + activity_regularizer=activity_regularizer, + **kwargs, + ) + self.input_spec = InputSpec(ndim=3) + if use_cudnn not in ("auto", True, False): + raise ValueError( + "Invalid valid received for argument `use_cudnn`. " + "Expected one of {'auto', True, False}. " + f"Received: use_cudnn={use_cudnn}" + ) + self.use_cudnn = use_cudnn + if ( + backend.backend() == "tensorflow" + and backend.cudnn_ok( + cell.activation, + cell.recurrent_activation, + self.unroll, + cell.use_bias, + reset_after=reset_after, + ) + and use_cudnn in (True, "auto") + ): + self.supports_jit = False + + def inner_loop(self, sequences, initial_state, mask, training=False): + if tree.is_nested(initial_state): + initial_state = initial_state[0] + if tree.is_nested(mask): + mask = mask[0] + if self.use_cudnn in ("auto", True): + if not self.recurrent_dropout: + try: + if self.dropout: + dp_mask = self.cell.get_dropout_mask(sequences[:, 0, :]) + dp_mask = ops.expand_dims(dp_mask, axis=1) + dp_mask = ops.broadcast_to( + dp_mask, ops.shape(sequences) + ) + dp_sequences = sequences * dp_mask + else: + dp_sequences = sequences + # Backends are allowed to specify (optionally) optimized + # implementation of the inner GRU loop. In the case of + # TF for instance, it will leverage cuDNN when feasible, and + # it will raise NotImplementedError otherwise. + out = backend.gru( + dp_sequences, + initial_state, + mask, + kernel=self.cell.kernel, + recurrent_kernel=self.cell.recurrent_kernel, + bias=self.cell.bias, + activation=self.cell.activation, + recurrent_activation=self.cell.recurrent_activation, + return_sequences=self.return_sequences, + go_backwards=self.go_backwards, + unroll=self.unroll, + reset_after=self.cell.reset_after, + ) + # We disable jit_compile for the model in this case, + # since cuDNN ops aren't XLA compatible. + if backend.backend() == "tensorflow": + self.supports_jit = False + return out + except NotImplementedError: + pass + if self.use_cudnn is True: + raise ValueError( + "use_cudnn=True was specified, " + "but cuDNN is not supported for this layer configuration " + "with this backend. Pass use_cudnn='auto' to fallback " + "to a non-cuDNN implementation." + ) + return super().inner_loop( + sequences, initial_state, mask=mask, training=training + ) + + def call(self, sequences, initial_state=None, mask=None, training=False): + return super().call( + sequences, mask=mask, training=training, initial_state=initial_state + ) + + @property + def units(self): + return self.cell.units + + @property + def activation(self): + return self.cell.activation + + @property + def recurrent_activation(self): + return self.cell.recurrent_activation + + @property + def use_bias(self): + return self.cell.use_bias + + @property + def kernel_initializer(self): + return self.cell.kernel_initializer + + @property + def recurrent_initializer(self): + return self.cell.recurrent_initializer + + @property + def bias_initializer(self): + return self.cell.bias_initializer + + @property + def kernel_regularizer(self): + return self.cell.kernel_regularizer + + @property + def recurrent_regularizer(self): + return self.cell.recurrent_regularizer + + @property + def bias_regularizer(self): + return self.cell.bias_regularizer + + @property + def kernel_constraint(self): + return self.cell.kernel_constraint + + @property + def recurrent_constraint(self): + return self.cell.recurrent_constraint + + @property + def bias_constraint(self): + return self.cell.bias_constraint + + @property + def dropout(self): + return self.cell.dropout + + @property + def recurrent_dropout(self): + return self.cell.recurrent_dropout + + @property + def reset_after(self): + return self.cell.reset_after + + def get_config(self): + config = { + "units": self.units, + "activation": activations.serialize(self.activation), + "recurrent_activation": activations.serialize( + self.recurrent_activation + ), + "use_bias": self.use_bias, + "kernel_initializer": initializers.serialize( + self.kernel_initializer + ), + "recurrent_initializer": initializers.serialize( + self.recurrent_initializer + ), + "bias_initializer": initializers.serialize(self.bias_initializer), + "kernel_regularizer": regularizers.serialize( + self.kernel_regularizer + ), + "recurrent_regularizer": regularizers.serialize( + self.recurrent_regularizer + ), + "bias_regularizer": regularizers.serialize(self.bias_regularizer), + "activity_regularizer": regularizers.serialize( + self.activity_regularizer + ), + "kernel_constraint": constraints.serialize(self.kernel_constraint), + "recurrent_constraint": constraints.serialize( + self.recurrent_constraint + ), + "bias_constraint": constraints.serialize(self.bias_constraint), + "dropout": self.dropout, + "recurrent_dropout": self.recurrent_dropout, + "reset_after": self.reset_after, + "seed": self.cell.seed, + } + base_config = super().get_config() + del base_config["cell"] + return {**base_config, **config} + + @classmethod + def from_config(cls, config): + return cls(**config) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/lstm.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/lstm.py new file mode 100644 index 0000000000000000000000000000000000000000..735fcb48f61fd251d71b059f392d0508edb78c60 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/lstm.py @@ -0,0 +1,693 @@ +from keras.src import activations +from keras.src import backend +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.layers.rnn.dropout_rnn_cell import DropoutRNNCell +from keras.src.layers.rnn.rnn import RNN + + +@keras_export("keras.layers.LSTMCell") +class LSTMCell(Layer, DropoutRNNCell): + """Cell class for the LSTM layer. + + This class processes one step within the whole time sequence input, whereas + `keras.layer.LSTM` processes the whole sequence. + + Args: + units: Positive integer, dimensionality of the output space. + activation: Activation function to use. Default: hyperbolic tangent + (`tanh`). If you pass None, no activation is applied + (ie. "linear" activation: `a(x) = x`). + recurrent_activation: Activation function to use for the recurrent step. + Default: sigmoid (`sigmoid`). If you pass `None`, no activation is + applied (ie. "linear" activation: `a(x) = x`). + use_bias: Boolean, (default `True`), whether the layer + should use a bias vector. + kernel_initializer: Initializer for the `kernel` weights matrix, + used for the linear transformation of the inputs. Default: + `"glorot_uniform"`. + recurrent_initializer: Initializer for the `recurrent_kernel` + weights matrix, used for the linear transformation + of the recurrent state. Default: `"orthogonal"`. + bias_initializer: Initializer for the bias vector. Default: `"zeros"`. + unit_forget_bias: Boolean (default `True`). If `True`, + add 1 to the bias of the forget gate at initialization. + Setting it to `True` will also force `bias_initializer="zeros"`. + This is recommended in [Jozefowicz et al.]( + https://github.com/mlresearch/v37/blob/gh-pages/jozefowicz15.pdf) + kernel_regularizer: Regularizer function applied to the `kernel` weights + matrix. Default: `None`. + recurrent_regularizer: Regularizer function applied to the + `recurrent_kernel` weights matrix. Default: `None`. + bias_regularizer: Regularizer function applied to the bias vector. + Default: `None`. + kernel_constraint: Constraint function applied to the `kernel` weights + matrix. Default: `None`. + recurrent_constraint: Constraint function applied to the + `recurrent_kernel` weights matrix. Default: `None`. + bias_constraint: Constraint function applied to the bias vector. + Default: `None`. + dropout: Float between 0 and 1. Fraction of the units to drop for the + linear transformation of the inputs. Default: 0. + recurrent_dropout: Float between 0 and 1. Fraction of the units to drop + for the linear transformation of the recurrent state. Default: 0. + seed: Random seed for dropout. + + Call arguments: + inputs: A 2D tensor, with shape `(batch, features)`. + states: A 2D tensor with shape `(batch, units)`, which is the state + from the previous time step. + training: Python boolean indicating whether the layer should behave in + training mode or in inference mode. Only relevant when `dropout` or + `recurrent_dropout` is used. + + Example: + + >>> inputs = np.random.random((32, 10, 8)) + >>> rnn = keras.layers.RNN(keras.layers.LSTMCell(4)) + >>> output = rnn(inputs) + >>> output.shape + (32, 4) + >>> rnn = keras.layers.RNN( + ... keras.layers.LSTMCell(4), + ... return_sequences=True, + ... return_state=True) + >>> whole_sequence_output, final_state = rnn(inputs) + >>> whole_sequence_output.shape + (32, 10, 4) + >>> final_state.shape + (32, 4) + """ + + def __init__( + self, + units, + activation="tanh", + recurrent_activation="sigmoid", + use_bias=True, + kernel_initializer="glorot_uniform", + recurrent_initializer="orthogonal", + bias_initializer="zeros", + unit_forget_bias=True, + kernel_regularizer=None, + recurrent_regularizer=None, + bias_regularizer=None, + kernel_constraint=None, + recurrent_constraint=None, + bias_constraint=None, + dropout=0.0, + recurrent_dropout=0.0, + seed=None, + **kwargs, + ): + if units <= 0: + raise ValueError( + "Received an invalid value for argument `units`, " + f"expected a positive integer, got {units}." + ) + implementation = kwargs.pop("implementation", 2) + super().__init__(**kwargs) + self.implementation = implementation + self.units = units + self.activation = activations.get(activation) + self.recurrent_activation = activations.get(recurrent_activation) + self.use_bias = use_bias + + self.kernel_initializer = initializers.get(kernel_initializer) + self.recurrent_initializer = initializers.get(recurrent_initializer) + self.bias_initializer = initializers.get(bias_initializer) + + self.kernel_regularizer = regularizers.get(kernel_regularizer) + self.recurrent_regularizer = regularizers.get(recurrent_regularizer) + self.bias_regularizer = regularizers.get(bias_regularizer) + + self.kernel_constraint = constraints.get(kernel_constraint) + self.recurrent_constraint = constraints.get(recurrent_constraint) + self.bias_constraint = constraints.get(bias_constraint) + + self.dropout = min(1.0, max(0.0, dropout)) + self.recurrent_dropout = min(1.0, max(0.0, recurrent_dropout)) + if self.recurrent_dropout != 0.0: + self.implementation = 1 + if self.implementation == 1: + self.dropout_mask_count = 4 + self.seed = seed + self.seed_generator = backend.random.SeedGenerator(seed=seed) + + self.unit_forget_bias = unit_forget_bias + self.state_size = [self.units, self.units] + self.output_size = self.units + + def build(self, input_shape): + super().build(input_shape) + input_dim = input_shape[-1] + self.kernel = self.add_weight( + shape=(input_dim, self.units * 4), + name="kernel", + initializer=self.kernel_initializer, + regularizer=self.kernel_regularizer, + constraint=self.kernel_constraint, + ) + self.recurrent_kernel = self.add_weight( + shape=(self.units, self.units * 4), + name="recurrent_kernel", + initializer=self.recurrent_initializer, + regularizer=self.recurrent_regularizer, + constraint=self.recurrent_constraint, + ) + + if self.use_bias: + if self.unit_forget_bias: + + def bias_initializer(_, *args, **kwargs): + return ops.concatenate( + [ + self.bias_initializer( + (self.units,), *args, **kwargs + ), + initializers.get("ones")( + (self.units,), *args, **kwargs + ), + self.bias_initializer( + (self.units * 2,), *args, **kwargs + ), + ] + ) + + else: + bias_initializer = self.bias_initializer + self.bias = self.add_weight( + shape=(self.units * 4,), + name="bias", + initializer=bias_initializer, + regularizer=self.bias_regularizer, + constraint=self.bias_constraint, + ) + else: + self.bias = None + self.built = True + + def _compute_carry_and_output(self, x, h_tm1, c_tm1): + """Computes carry and output using split kernels.""" + x_i, x_f, x_c, x_o = x + h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1 + i = self.recurrent_activation( + x_i + ops.matmul(h_tm1_i, self.recurrent_kernel[:, : self.units]) + ) + f = self.recurrent_activation( + x_f + + ops.matmul( + h_tm1_f, self.recurrent_kernel[:, self.units : self.units * 2] + ) + ) + c = f * c_tm1 + i * self.activation( + x_c + + ops.matmul( + h_tm1_c, + self.recurrent_kernel[:, self.units * 2 : self.units * 3], + ) + ) + o = self.recurrent_activation( + x_o + + ops.matmul(h_tm1_o, self.recurrent_kernel[:, self.units * 3 :]) + ) + return c, o + + def _compute_carry_and_output_fused(self, z, c_tm1): + """Computes carry and output using fused kernels.""" + z0, z1, z2, z3 = z + i = self.recurrent_activation(z0) + f = self.recurrent_activation(z1) + c = f * c_tm1 + i * self.activation(z2) + o = self.recurrent_activation(z3) + return c, o + + def call(self, inputs, states, training=False): + h_tm1 = states[0] # previous memory state + c_tm1 = states[1] # previous carry state + + if self.implementation == 1: + if training and 0.0 < self.dropout < 1.0: + dp_mask = self.get_dropout_mask(inputs) + inputs_i = inputs * dp_mask[0] + inputs_f = inputs * dp_mask[1] + inputs_c = inputs * dp_mask[2] + inputs_o = inputs * dp_mask[3] + else: + inputs_i = inputs + inputs_f = inputs + inputs_c = inputs + inputs_o = inputs + k_i, k_f, k_c, k_o = ops.split(self.kernel, 4, axis=1) + x_i = ops.matmul(inputs_i, k_i) + x_f = ops.matmul(inputs_f, k_f) + x_c = ops.matmul(inputs_c, k_c) + x_o = ops.matmul(inputs_o, k_o) + if self.use_bias: + b_i, b_f, b_c, b_o = ops.split(self.bias, 4, axis=0) + x_i += b_i + x_f += b_f + x_c += b_c + x_o += b_o + + if training and 0.0 < self.recurrent_dropout < 1.0: + rec_dp_mask = self.get_recurrent_dropout_mask(h_tm1) + h_tm1_i = h_tm1 * rec_dp_mask[0] + h_tm1_f = h_tm1 * rec_dp_mask[1] + h_tm1_c = h_tm1 * rec_dp_mask[2] + h_tm1_o = h_tm1 * rec_dp_mask[3] + else: + h_tm1_i = h_tm1 + h_tm1_f = h_tm1 + h_tm1_c = h_tm1 + h_tm1_o = h_tm1 + x = (x_i, x_f, x_c, x_o) + h_tm1 = (h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o) + c, o = self._compute_carry_and_output(x, h_tm1, c_tm1) + else: + if training and 0.0 < self.dropout < 1.0: + dp_mask = self.get_dropout_mask(inputs) + inputs = inputs * dp_mask + + z = ops.matmul(inputs, self.kernel) + + z += ops.matmul(h_tm1, self.recurrent_kernel) + if self.use_bias: + z += self.bias + + z = ops.split(z, 4, axis=1) + c, o = self._compute_carry_and_output_fused(z, c_tm1) + + h = o * self.activation(c) + return h, [h, c] + + def get_config(self): + config = { + "units": self.units, + "activation": activations.serialize(self.activation), + "recurrent_activation": activations.serialize( + self.recurrent_activation + ), + "use_bias": self.use_bias, + "unit_forget_bias": self.unit_forget_bias, + "kernel_initializer": initializers.serialize( + self.kernel_initializer + ), + "recurrent_initializer": initializers.serialize( + self.recurrent_initializer + ), + "bias_initializer": initializers.serialize(self.bias_initializer), + "kernel_regularizer": regularizers.serialize( + self.kernel_regularizer + ), + "recurrent_regularizer": regularizers.serialize( + self.recurrent_regularizer + ), + "bias_regularizer": regularizers.serialize(self.bias_regularizer), + "kernel_constraint": constraints.serialize(self.kernel_constraint), + "recurrent_constraint": constraints.serialize( + self.recurrent_constraint + ), + "bias_constraint": constraints.serialize(self.bias_constraint), + "dropout": self.dropout, + "recurrent_dropout": self.recurrent_dropout, + "seed": self.seed, + } + base_config = super().get_config() + return {**base_config, **config} + + def get_initial_state(self, batch_size=None): + return [ + ops.zeros((batch_size, d), dtype=self.compute_dtype) + for d in self.state_size + ] + + +@keras_export("keras.layers.LSTM") +class LSTM(RNN): + """Long Short-Term Memory layer - Hochreiter 1997. + + Based on available runtime hardware and constraints, this layer + will choose different implementations (cuDNN-based or backend-native) + to maximize the performance. If a GPU is available and all + the arguments to the layer meet the requirement of the cuDNN kernel + (see below for details), the layer will use a fast cuDNN implementation + when using the TensorFlow backend. + The requirements to use the cuDNN implementation are: + + 1. `activation` == `tanh` + 2. `recurrent_activation` == `sigmoid` + 3. `dropout` == 0 and `recurrent_dropout` == 0 + 4. `unroll` is `False` + 5. `use_bias` is `True` + 6. Inputs, if use masking, are strictly right-padded. + 7. Eager execution is enabled in the outermost context. + + For example: + + >>> inputs = np.random.random((32, 10, 8)) + >>> lstm = keras.layers.LSTM(4) + >>> output = lstm(inputs) + >>> output.shape + (32, 4) + >>> lstm = keras.layers.LSTM( + ... 4, return_sequences=True, return_state=True) + >>> whole_seq_output, final_memory_state, final_carry_state = lstm(inputs) + >>> whole_seq_output.shape + (32, 10, 4) + >>> final_memory_state.shape + (32, 4) + >>> final_carry_state.shape + (32, 4) + + Args: + units: Positive integer, dimensionality of the output space. + activation: Activation function to use. + Default: hyperbolic tangent (`tanh`). + If you pass `None`, no activation is applied + (ie. "linear" activation: `a(x) = x`). + recurrent_activation: Activation function to use + for the recurrent step. + Default: sigmoid (`sigmoid`). + If you pass `None`, no activation is applied + (ie. "linear" activation: `a(x) = x`). + use_bias: Boolean, (default `True`), whether the layer + should use a bias vector. + kernel_initializer: Initializer for the `kernel` weights matrix, + used for the linear transformation of the inputs. Default: + `"glorot_uniform"`. + recurrent_initializer: Initializer for the `recurrent_kernel` + weights matrix, used for the linear transformation of the recurrent + state. Default: `"orthogonal"`. + bias_initializer: Initializer for the bias vector. Default: `"zeros"`. + unit_forget_bias: Boolean (default `True`). If `True`, + add 1 to the bias of the forget gate at initialization. + Setting it to `True` will also force `bias_initializer="zeros"`. + This is recommended in [Jozefowicz et al.]( + https://github.com/mlresearch/v37/blob/gh-pages/jozefowicz15.pdf) + kernel_regularizer: Regularizer function applied to the `kernel` weights + matrix. Default: `None`. + recurrent_regularizer: Regularizer function applied to the + `recurrent_kernel` weights matrix. Default: `None`. + bias_regularizer: Regularizer function applied to the bias vector. + Default: `None`. + activity_regularizer: Regularizer function applied to the output of the + layer (its "activation"). Default: `None`. + kernel_constraint: Constraint function applied to the `kernel` weights + matrix. Default: `None`. + recurrent_constraint: Constraint function applied to the + `recurrent_kernel` weights matrix. Default: `None`. + bias_constraint: Constraint function applied to the bias vector. + Default: `None`. + dropout: Float between 0 and 1. Fraction of the units to drop for the + linear transformation of the inputs. Default: 0. + recurrent_dropout: Float between 0 and 1. Fraction of the units to drop + for the linear transformation of the recurrent state. Default: 0. + seed: Random seed for dropout. + return_sequences: Boolean. Whether to return the last output + in the output sequence, or the full sequence. Default: `False`. + return_state: Boolean. Whether to return the last state in addition + to the output. Default: `False`. + go_backwards: Boolean (default: `False`). + If `True`, process the input sequence backwards and return the + reversed sequence. + stateful: Boolean (default: `False`). If `True`, the last state + for each sample at index i in a batch will be used as initial + state for the sample of index i in the following batch. + unroll: Boolean (default False). + If `True`, the network will be unrolled, + else a symbolic loop will be used. + Unrolling can speed-up a RNN, + although it tends to be more memory-intensive. + Unrolling is only suitable for short sequences. + use_cudnn: Whether to use a cuDNN-backed implementation. `"auto"` will + attempt to use cuDNN when feasible, and will fallback to the + default implementation if not. + + Call arguments: + inputs: A 3D tensor, with shape `(batch, timesteps, feature)`. + mask: Binary tensor of shape `(samples, timesteps)` indicating whether + a given timestep should be masked (optional). + An individual `True` entry indicates that the corresponding timestep + should be utilized, while a `False` entry indicates that the + corresponding timestep should be ignored. Defaults to `None`. + training: Python boolean indicating whether the layer should behave in + training mode or in inference mode. This argument is passed to the + cell when calling it. This is only relevant if `dropout` or + `recurrent_dropout` is used (optional). Defaults to `None`. + initial_state: List of initial state tensors to be passed to the first + call of the cell (optional, `None` causes creation + of zero-filled initial state tensors). Defaults to `None`. + """ + + def __init__( + self, + units, + activation="tanh", + recurrent_activation="sigmoid", + use_bias=True, + kernel_initializer="glorot_uniform", + recurrent_initializer="orthogonal", + bias_initializer="zeros", + unit_forget_bias=True, + kernel_regularizer=None, + recurrent_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + kernel_constraint=None, + recurrent_constraint=None, + bias_constraint=None, + dropout=0.0, + recurrent_dropout=0.0, + seed=None, + return_sequences=False, + return_state=False, + go_backwards=False, + stateful=False, + unroll=False, + use_cudnn="auto", + **kwargs, + ): + cell = LSTMCell( + units, + activation=activation, + recurrent_activation=recurrent_activation, + use_bias=use_bias, + kernel_initializer=kernel_initializer, + unit_forget_bias=unit_forget_bias, + recurrent_initializer=recurrent_initializer, + bias_initializer=bias_initializer, + kernel_regularizer=kernel_regularizer, + recurrent_regularizer=recurrent_regularizer, + bias_regularizer=bias_regularizer, + kernel_constraint=kernel_constraint, + recurrent_constraint=recurrent_constraint, + bias_constraint=bias_constraint, + dropout=dropout, + recurrent_dropout=recurrent_dropout, + dtype=kwargs.get("dtype", None), + trainable=kwargs.get("trainable", True), + name="lstm_cell", + seed=seed, + implementation=kwargs.pop("implementation", 2), + ) + super().__init__( + cell, + return_sequences=return_sequences, + return_state=return_state, + go_backwards=go_backwards, + stateful=stateful, + unroll=unroll, + activity_regularizer=activity_regularizer, + **kwargs, + ) + self.input_spec = InputSpec(ndim=3) + if use_cudnn not in ("auto", True, False): + raise ValueError( + "Invalid valid received for argument `use_cudnn`. " + "Expected one of {'auto', True, False}. " + f"Received: use_cudnn={use_cudnn}" + ) + self.use_cudnn = use_cudnn + if ( + backend.backend() == "tensorflow" + and backend.cudnn_ok( + cell.activation, + cell.recurrent_activation, + self.unroll, + cell.use_bias, + ) + and use_cudnn in (True, "auto") + ): + self.supports_jit = False + + def inner_loop(self, sequences, initial_state, mask, training=False): + if tree.is_nested(mask): + mask = mask[0] + + if self.use_cudnn in ("auto", True): + if not self.recurrent_dropout: + try: + if self.dropout: + dp_mask = self.cell.get_dropout_mask(sequences[:, 0, :]) + dp_mask = ops.expand_dims(dp_mask, axis=1) + dp_mask = ops.broadcast_to( + dp_mask, ops.shape(sequences) + ) + dp_sequences = sequences * dp_mask + else: + dp_sequences = sequences + + # Backends are allowed to specify (optionally) optimized + # implementation of the inner LSTM loop. In the case of + # TF for instance, it will leverage cuDNN when feasible, and + # it will raise NotImplementedError otherwise. + out = backend.lstm( + dp_sequences, + initial_state[0], + initial_state[1], + mask, + kernel=self.cell.kernel, + recurrent_kernel=self.cell.recurrent_kernel, + bias=self.cell.bias, + activation=self.cell.activation, + recurrent_activation=self.cell.recurrent_activation, + return_sequences=self.return_sequences, + go_backwards=self.go_backwards, + unroll=self.unroll, + ) + # We disable jit_compile for the model in this case, + # since cuDNN ops aren't XLA compatible. + if backend.backend() == "tensorflow": + self.supports_jit = False + return out + except NotImplementedError: + pass + if self.use_cudnn is True: + raise ValueError( + "use_cudnn=True was specified, " + "but cuDNN is not supported for this layer configuration " + "with this backend. Pass use_cudnn='auto' to fallback " + "to a non-cuDNN implementation." + ) + return super().inner_loop( + sequences, initial_state, mask=mask, training=training + ) + + def call(self, sequences, initial_state=None, mask=None, training=False): + return super().call( + sequences, mask=mask, training=training, initial_state=initial_state + ) + + @property + def units(self): + return self.cell.units + + @property + def activation(self): + return self.cell.activation + + @property + def recurrent_activation(self): + return self.cell.recurrent_activation + + @property + def use_bias(self): + return self.cell.use_bias + + @property + def unit_forget_bias(self): + return self.cell.unit_forget_bias + + @property + def kernel_initializer(self): + return self.cell.kernel_initializer + + @property + def recurrent_initializer(self): + return self.cell.recurrent_initializer + + @property + def bias_initializer(self): + return self.cell.bias_initializer + + @property + def kernel_regularizer(self): + return self.cell.kernel_regularizer + + @property + def recurrent_regularizer(self): + return self.cell.recurrent_regularizer + + @property + def bias_regularizer(self): + return self.cell.bias_regularizer + + @property + def kernel_constraint(self): + return self.cell.kernel_constraint + + @property + def recurrent_constraint(self): + return self.cell.recurrent_constraint + + @property + def bias_constraint(self): + return self.cell.bias_constraint + + @property + def dropout(self): + return self.cell.dropout + + @property + def recurrent_dropout(self): + return self.cell.recurrent_dropout + + def get_config(self): + config = { + "units": self.units, + "activation": activations.serialize(self.activation), + "recurrent_activation": activations.serialize( + self.recurrent_activation + ), + "use_bias": self.use_bias, + "kernel_initializer": initializers.serialize( + self.kernel_initializer + ), + "recurrent_initializer": initializers.serialize( + self.recurrent_initializer + ), + "bias_initializer": initializers.serialize(self.bias_initializer), + "unit_forget_bias": self.unit_forget_bias, + "kernel_regularizer": regularizers.serialize( + self.kernel_regularizer + ), + "recurrent_regularizer": regularizers.serialize( + self.recurrent_regularizer + ), + "bias_regularizer": regularizers.serialize(self.bias_regularizer), + "activity_regularizer": regularizers.serialize( + self.activity_regularizer + ), + "kernel_constraint": constraints.serialize(self.kernel_constraint), + "recurrent_constraint": constraints.serialize( + self.recurrent_constraint + ), + "bias_constraint": constraints.serialize(self.bias_constraint), + "dropout": self.dropout, + "recurrent_dropout": self.recurrent_dropout, + "seed": self.cell.seed, + } + base_config = super().get_config() + del base_config["cell"] + return {**base_config, **config} + + @classmethod + def from_config(cls, config): + return cls(**config) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/rnn.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..b0cbc795aeb5e96c1baf721773ef61197434078a --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/rnn.py @@ -0,0 +1,475 @@ +from keras.src import backend +from keras.src import ops +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.layers.rnn.dropout_rnn_cell import DropoutRNNCell +from keras.src.layers.rnn.stacked_rnn_cells import StackedRNNCells +from keras.src.saving import serialization_lib +from keras.src.utils import tracking + + +@keras_export("keras.layers.RNN") +class RNN(Layer): + """Base class for recurrent layers. + + Args: + cell: A RNN cell instance or a list of RNN cell instances. + A RNN cell is a class that has: + - A `call(input_at_t, states_at_t)` method, returning + `(output_at_t, states_at_t_plus_1)`. The call method of the + cell can also take the optional argument `constants`, see + section "Note on passing external constants" below. + - A `state_size` attribute. This can be a single integer + (single state) in which case it is the size of the recurrent + state. This can also be a list/tuple of integers + (one size per state). + - A `output_size` attribute, a single integer. + - A `get_initial_state(batch_size=None)` + method that creates a tensor meant to be fed to `call()` as the + initial state, if the user didn't specify any initial state + via other means. The returned initial state should have + shape `(batch_size, cell.state_size)`. + The cell might choose to create a tensor full of zeros, + or other values based on the cell's implementation. + `inputs` is the input tensor to the RNN layer, with shape + `(batch_size, timesteps, features)`. + If this method is not implemented + by the cell, the RNN layer will create a zero filled tensor + with shape `(batch_size, cell.state_size)`. + In the case that `cell` is a list of RNN cell instances, the cells + will be stacked on top of each other in the RNN, resulting in an + efficient stacked RNN. + return_sequences: Boolean (default `False`). Whether to return the last + output in the output sequence, or the full sequence. + return_state: Boolean (default `False`). + Whether to return the last state in addition to the output. + go_backwards: Boolean (default `False`). + If `True`, process the input sequence backwards and return the + reversed sequence. + stateful: Boolean (default `False`). If True, the last state + for each sample at index `i` in a batch will be used as initial + state for the sample of index `i` in the following batch. + unroll: Boolean (default `False`). + If True, the network will be unrolled, else a symbolic loop will be + used. Unrolling can speed-up a RNN, although it tends to be more + memory-intensive. Unrolling is only suitable for short sequences. + zero_output_for_mask: Boolean (default `False`). + Whether the output should use zeros for the masked timesteps. + Note that this field is only used when `return_sequences` + is `True` and `mask` is provided. + It can useful if you want to reuse the raw output sequence of + the RNN without interference from the masked timesteps, e.g., + merging bidirectional RNNs. + + Call arguments: + sequences: A 3-D tensor with shape `(batch_size, timesteps, features)`. + initial_state: List of initial state tensors to be passed to the first + call of the cell. + mask: Binary tensor of shape `[batch_size, timesteps]` + indicating whether a given timestep should be masked. + An individual `True` entry indicates that the corresponding + timestep should be utilized, while a `False` entry indicates + that the corresponding timestep should be ignored. + training: Python boolean indicating whether the layer should behave in + training mode or in inference mode. This argument is passed + to the cell when calling it. + This is for use with cells that use dropout. + + Output shape: + + - If `return_state`: a list of tensors. The first tensor is + the output. The remaining tensors are the last states, + each with shape `(batch_size, state_size)`, where `state_size` could + be a high dimension tensor shape. + - If `return_sequences`: 3D tensor with shape + `(batch_size, timesteps, output_size)`. + + Masking: + + This layer supports masking for input data with a variable number + of timesteps. To introduce masks to your data, + use a `keras.layers.Embedding` layer with the `mask_zero` parameter + set to `True`. + + Note on using statefulness in RNNs: + + You can set RNN layers to be 'stateful', which means that the states + computed for the samples in one batch will be reused as initial states + for the samples in the next batch. This assumes a one-to-one mapping + between samples in different successive batches. + + To enable statefulness: + + - Specify `stateful=True` in the layer constructor. + - Specify a fixed batch size for your model, by passing + `batch_size=...` to the `Input` layer(s) of your model. + Remember to also specify the same `batch_size=...` when + calling `fit()`, or otherwise use a generator-like + data source like a `keras.utils.PyDataset` or a + `tf.data.Dataset`. + - Specify `shuffle=False` when calling `fit()`, since your + batches are expected to be temporally ordered. + + To reset the states of your model, call `.reset_state()` on either + a specific layer, or on your entire model. + + Note on specifying the initial state of RNNs: + + You can specify the initial state of RNN layers symbolically by + calling them with the keyword argument `initial_state`. The value of + `initial_state` should be a tensor or list of tensors representing + the initial state of the RNN layer. + + You can specify the initial state of RNN layers numerically by + calling `reset_state()` with the keyword argument `states`. The value of + `states` should be a numpy array or list of numpy arrays representing + the initial state of the RNN layer. + + Examples: + + ```python + from keras.layers import RNN + from keras import ops + + # First, let's define a RNN Cell, as a layer subclass. + class MinimalRNNCell(keras.Layer): + + def __init__(self, units, **kwargs): + super().__init__(**kwargs) + self.units = units + self.state_size = units + + def build(self, input_shape): + self.kernel = self.add_weight(shape=(input_shape[-1], self.units), + initializer='uniform', + name='kernel') + self.recurrent_kernel = self.add_weight( + shape=(self.units, self.units), + initializer='uniform', + name='recurrent_kernel') + self.built = True + + def call(self, inputs, states): + prev_output = states[0] + h = ops.matmul(inputs, self.kernel) + output = h + ops.matmul(prev_output, self.recurrent_kernel) + return output, [output] + + # Let's use this cell in a RNN layer: + + cell = MinimalRNNCell(32) + x = keras.Input((None, 5)) + layer = RNN(cell) + y = layer(x) + + # Here's how to use the cell to build a stacked RNN: + + cells = [MinimalRNNCell(32), MinimalRNNCell(64)] + x = keras.Input((None, 5)) + layer = RNN(cells) + y = layer(x) + ``` + """ + + def __init__( + self, + cell, + return_sequences=False, + return_state=False, + go_backwards=False, + stateful=False, + unroll=False, + zero_output_for_mask=False, + **kwargs, + ): + if isinstance(cell, (list, tuple)): + cell = StackedRNNCells(cell) + if "call" not in dir(cell): + raise ValueError( + "Argument `cell` should have a `call` method. " + f"Received: cell={cell}" + ) + if "state_size" not in dir(cell): + raise ValueError( + "The RNN cell should have a `state_size` attribute " + "(single integer or list of integers, " + "one integer per RNN state). " + f"Received: cell={cell}" + ) + super().__init__(**kwargs) + + # If True, the output for masked timestep will be zeros, whereas in the + # False case, output from previous timestep is returned for masked + # timestep. + self.zero_output_for_mask = zero_output_for_mask + self.cell = cell + self.return_sequences = return_sequences + self.return_state = return_state + self.go_backwards = go_backwards + self.stateful = stateful + self.unroll = unroll + + self.supports_masking = True + self.input_spec = None + self.states = None + + state_size = getattr(self.cell, "state_size", None) + if state_size is None: + raise ValueError( + "state_size must be specified as property on the RNN cell." + ) + if not isinstance(state_size, (list, tuple, int)): + raise ValueError( + "state_size must be an integer, or a list/tuple of integers " + "(one for each state tensor)." + ) + if isinstance(state_size, int): + self.state_size = [state_size] + self.single_state = True + else: + self.state_size = list(state_size) + self.single_state = False + + def compute_output_shape(self, sequences_shape, initial_state_shape=None): + batch_size = sequences_shape[0] + length = sequences_shape[1] + states_shape = [] + for state_size in self.state_size: + if isinstance(state_size, int): + states_shape.append((batch_size, state_size)) + elif isinstance(state_size, (list, tuple)): + states_shape.append([(batch_size, s) for s in state_size]) + + output_size = getattr(self.cell, "output_size", None) + if output_size is None: + output_size = self.state_size[0] + if not isinstance(output_size, int): + raise ValueError("output_size must be an integer.") + if self.return_sequences: + output_shape = (batch_size, length, output_size) + else: + output_shape = (batch_size, output_size) + if self.return_state: + return output_shape, *states_shape + return output_shape + + def compute_mask(self, _, mask): + # Time step masks must be the same for each input. + # This is because the mask for an RNN is of size [batch, time_steps, 1], + # and specifies which time steps should be skipped, and a time step + # must be skipped for all inputs. + mask = tree.flatten(mask)[0] + output_mask = mask if self.return_sequences else None + if self.return_state: + state_mask = [None for _ in self.state_size] + return [output_mask] + state_mask + else: + return output_mask + + def build(self, sequences_shape, initial_state_shape=None): + # Build cell (if layer). + step_input_shape = (sequences_shape[0],) + tuple(sequences_shape[2:]) + if isinstance(self.cell, Layer) and not self.cell.built: + self.cell.build(step_input_shape) + self.cell.built = True + if self.stateful: + if self.states is not None: + self.reset_state() + else: + if sequences_shape[0] is None: + raise ValueError( + "When using `stateful=True` in a RNN, the " + "batch size must be static. Found dynamic " + f"batch size: sequence.shape={sequences_shape}" + ) + self._create_state_variables(sequences_shape[0]) + self.built = True + + @tracking.no_automatic_dependency_tracking + def _create_state_variables(self, batch_size): + with backend.name_scope(self.name, caller=self): + self.states = tree.map_structure( + lambda value: backend.Variable( + value, + trainable=False, + dtype=self.variable_dtype, + name="rnn_state", + ), + self.get_initial_state(batch_size), + ) + + def get_initial_state(self, batch_size): + get_initial_state_fn = getattr(self.cell, "get_initial_state", None) + if get_initial_state_fn: + init_state = get_initial_state_fn(batch_size=batch_size) + else: + return [ + ops.zeros((batch_size, d), dtype=self.cell.compute_dtype) + for d in self.state_size + ] + + # RNN expect the states in a list, even if single state. + if not tree.is_nested(init_state): + init_state = [init_state] + # Force the state to be a list in case it is a namedtuple eg + # LSTMStateTuple. + return list(init_state) + + def reset_states(self): + # Compatibility alias. + self.reset_state() + + def reset_state(self): + if self.states is not None: + for v in self.states: + v.assign(ops.zeros_like(v)) + + def inner_loop(self, sequences, initial_state, mask, training=False): + cell_kwargs = {} + if isinstance(self.cell, Layer) and self.cell._call_has_training_arg: + cell_kwargs["training"] = training + + def step(inputs, states): + output, new_states = self.cell(inputs, states, **cell_kwargs) + if not tree.is_nested(new_states): + new_states = [new_states] + return output, new_states + + if not tree.is_nested(initial_state): + initial_state = [initial_state] + + return backend.rnn( + step, + sequences, + initial_state, + go_backwards=self.go_backwards, + mask=mask, + unroll=self.unroll, + input_length=sequences.shape[1], + zero_output_for_mask=self.zero_output_for_mask, + return_all_outputs=self.return_sequences, + ) + + def call( + self, + sequences, + initial_state=None, + mask=None, + training=False, + ): + timesteps = sequences.shape[1] + if self.unroll and timesteps is None: + raise ValueError( + "Cannot unroll a RNN if the " + "time dimension is undefined. \n" + "- If using a Sequential model, " + "specify the time dimension by passing " + "an `Input()` as your first layer.\n" + "- If using the functional API, specify " + "the time dimension by passing a `shape` " + "or `batch_shape` argument to your `Input()`." + ) + + if initial_state is None: + if self.stateful: + initial_state = self.states + else: + initial_state = self.get_initial_state( + batch_size=ops.shape(sequences)[0] + ) + # RNN expect the states in a list, even if single state. + if not tree.is_nested(initial_state): + initial_state = [initial_state] + initial_state = list(initial_state) + + # Cast states to compute dtype. + # Note that states may be deeply nested + # (e.g. in the stacked cells case). + initial_state = tree.map_structure( + lambda x: backend.convert_to_tensor( + x, dtype=self.cell.compute_dtype + ), + initial_state, + ) + + # Prepopulate the dropout state so that the inner_loop is stateless + # this is particularly important for JAX backend. + self._maybe_config_dropout_masks( + self.cell, sequences[:, 0, :], initial_state + ) + + last_output, outputs, states = self.inner_loop( + sequences=sequences, + initial_state=initial_state, + mask=mask, + training=training, + ) + last_output = ops.cast(last_output, self.compute_dtype) + outputs = ops.cast(outputs, self.compute_dtype) + states = tree.map_structure( + lambda x: ops.cast(x, dtype=self.compute_dtype), states + ) + self._maybe_reset_dropout_masks(self.cell) + + if self.stateful: + for self_state, state in zip( + tree.flatten(self.states), tree.flatten(states) + ): + self_state.assign(state) + + if self.return_sequences: + output = outputs + else: + output = last_output + + if self.return_state: + return output, *states + return output + + def _maybe_config_dropout_masks(self, cell, input_sequence, input_state): + state = ( + input_state[0] + if isinstance(input_state, (list, tuple)) + else input_state + ) + if isinstance(cell, DropoutRNNCell): + cell.get_dropout_mask(input_sequence) + cell.get_recurrent_dropout_mask(state) + if isinstance(cell, StackedRNNCells): + for c, s in zip(cell.cells, input_state): + self._maybe_config_dropout_masks(c, input_sequence, s) + # Replicate the behavior of `StackedRNNCells.call` to compute + # the inputs for the next cell. + s = list(s) if tree.is_nested(s) else [s] + cell_call_fn = c.__call__ if callable(c) else c.call + input_sequence, _ = cell_call_fn(input_sequence, s) + + def _maybe_reset_dropout_masks(self, cell): + if isinstance(cell, DropoutRNNCell): + cell.reset_dropout_mask() + cell.reset_recurrent_dropout_mask() + if isinstance(cell, StackedRNNCells): + for c in cell.cells: + self._maybe_reset_dropout_masks(c) + + def get_config(self): + config = { + "return_sequences": self.return_sequences, + "return_state": self.return_state, + "go_backwards": self.go_backwards, + "stateful": self.stateful, + "unroll": self.unroll, + "zero_output_for_mask": self.zero_output_for_mask, + } + config["cell"] = serialization_lib.serialize_keras_object(self.cell) + base_config = super().get_config() + return {**base_config, **config} + + @classmethod + def from_config(cls, config, custom_objects=None): + cell = serialization_lib.deserialize_keras_object( + config.pop("cell"), custom_objects=custom_objects + ) + layer = cls(cell, **config) + return layer diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/simple_rnn.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/simple_rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..e2811e9621660c6989df1815c7738c404fc05b24 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/simple_rnn.py @@ -0,0 +1,450 @@ +from keras.src import activations +from keras.src import backend +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.layers.rnn.dropout_rnn_cell import DropoutRNNCell +from keras.src.layers.rnn.rnn import RNN + + +@keras_export("keras.layers.SimpleRNNCell") +class SimpleRNNCell(Layer, DropoutRNNCell): + """Cell class for SimpleRNN. + + This class processes one step within the whole time sequence input, whereas + `keras.layer.SimpleRNN` processes the whole sequence. + + Args: + units: Positive integer, dimensionality of the output space. + activation: Activation function to use. + Default: hyperbolic tangent (`tanh`). + If you pass `None`, no activation is applied + (ie. "linear" activation: `a(x) = x`). + use_bias: Boolean, (default `True`), whether the layer + should use a bias vector. + kernel_initializer: Initializer for the `kernel` weights matrix, + used for the linear transformation of the inputs. Default: + `"glorot_uniform"`. + recurrent_initializer: Initializer for the `recurrent_kernel` + weights matrix, used for the linear transformation + of the recurrent state. Default: `"orthogonal"`. + bias_initializer: Initializer for the bias vector. Default: `"zeros"`. + kernel_regularizer: Regularizer function applied to the `kernel` weights + matrix. Default: `None`. + recurrent_regularizer: Regularizer function applied to the + `recurrent_kernel` weights matrix. Default: `None`. + bias_regularizer: Regularizer function applied to the bias vector. + Default: `None`. + kernel_constraint: Constraint function applied to the `kernel` weights + matrix. Default: `None`. + recurrent_constraint: Constraint function applied to the + `recurrent_kernel` weights matrix. Default: `None`. + bias_constraint: Constraint function applied to the bias vector. + Default: `None`. + dropout: Float between 0 and 1. Fraction of the units to drop for the + linear transformation of the inputs. Default: 0. + recurrent_dropout: Float between 0 and 1. Fraction of the units to drop + for the linear transformation of the recurrent state. Default: 0. + seed: Random seed for dropout. + + Call arguments: + sequence: A 2D tensor, with shape `(batch, features)`. + states: A 2D tensor with shape `(batch, units)`, which is the state + from the previous time step. + training: Python boolean indicating whether the layer should behave in + training mode or in inference mode. Only relevant when `dropout` or + `recurrent_dropout` is used. + + Example: + + ```python + inputs = np.random.random([32, 10, 8]).astype(np.float32) + rnn = keras.layers.RNN(keras.layers.SimpleRNNCell(4)) + output = rnn(inputs) # The output has shape `(32, 4)`. + rnn = keras.layers.RNN( + keras.layers.SimpleRNNCell(4), + return_sequences=True, + return_state=True + ) + # whole_sequence_output has shape `(32, 10, 4)`. + # final_state has shape `(32, 4)`. + whole_sequence_output, final_state = rnn(inputs) + ``` + """ + + def __init__( + self, + units, + activation="tanh", + use_bias=True, + kernel_initializer="glorot_uniform", + recurrent_initializer="orthogonal", + bias_initializer="zeros", + kernel_regularizer=None, + recurrent_regularizer=None, + bias_regularizer=None, + kernel_constraint=None, + recurrent_constraint=None, + bias_constraint=None, + dropout=0.0, + recurrent_dropout=0.0, + seed=None, + **kwargs, + ): + if units <= 0: + raise ValueError( + "Received an invalid value for argument `units`, " + f"expected a positive integer, got {units}." + ) + super().__init__(**kwargs) + self.seed = seed + self.seed_generator = backend.random.SeedGenerator(seed) + + self.units = units + self.activation = activations.get(activation) + self.use_bias = use_bias + + self.kernel_initializer = initializers.get(kernel_initializer) + self.recurrent_initializer = initializers.get(recurrent_initializer) + self.bias_initializer = initializers.get(bias_initializer) + + self.kernel_regularizer = regularizers.get(kernel_regularizer) + self.recurrent_regularizer = regularizers.get(recurrent_regularizer) + self.bias_regularizer = regularizers.get(bias_regularizer) + + self.kernel_constraint = constraints.get(kernel_constraint) + self.recurrent_constraint = constraints.get(recurrent_constraint) + self.bias_constraint = constraints.get(bias_constraint) + + self.dropout = min(1.0, max(0.0, dropout)) + self.recurrent_dropout = min(1.0, max(0.0, recurrent_dropout)) + self.state_size = self.units + self.output_size = self.units + + def build(self, input_shape): + self.kernel = self.add_weight( + shape=(input_shape[-1], self.units), + name="kernel", + initializer=self.kernel_initializer, + regularizer=self.kernel_regularizer, + constraint=self.kernel_constraint, + ) + self.recurrent_kernel = self.add_weight( + shape=(self.units, self.units), + name="recurrent_kernel", + initializer=self.recurrent_initializer, + regularizer=self.recurrent_regularizer, + constraint=self.recurrent_constraint, + ) + if self.use_bias: + self.bias = self.add_weight( + shape=(self.units,), + name="bias", + initializer=self.bias_initializer, + regularizer=self.bias_regularizer, + constraint=self.bias_constraint, + ) + else: + self.bias = None + self.built = True + + def call(self, sequence, states, training=False): + prev_output = states[0] if isinstance(states, (list, tuple)) else states + dp_mask = self.get_dropout_mask(sequence) + rec_dp_mask = self.get_recurrent_dropout_mask(prev_output) + + if training and dp_mask is not None: + sequence = sequence * dp_mask + h = ops.matmul(sequence, self.kernel) + if self.bias is not None: + h += self.bias + + if training and rec_dp_mask is not None: + prev_output = prev_output * rec_dp_mask + output = h + ops.matmul(prev_output, self.recurrent_kernel) + if self.activation is not None: + output = self.activation(output) + + new_state = [output] if isinstance(states, (list, tuple)) else output + return output, new_state + + def get_initial_state(self, batch_size=None): + return [ + ops.zeros((batch_size, self.state_size), dtype=self.compute_dtype) + ] + + def get_config(self): + config = { + "units": self.units, + "activation": activations.serialize(self.activation), + "use_bias": self.use_bias, + "kernel_initializer": initializers.serialize( + self.kernel_initializer + ), + "recurrent_initializer": initializers.serialize( + self.recurrent_initializer + ), + "bias_initializer": initializers.serialize(self.bias_initializer), + "kernel_regularizer": regularizers.serialize( + self.kernel_regularizer + ), + "recurrent_regularizer": regularizers.serialize( + self.recurrent_regularizer + ), + "bias_regularizer": regularizers.serialize(self.bias_regularizer), + "kernel_constraint": constraints.serialize(self.kernel_constraint), + "recurrent_constraint": constraints.serialize( + self.recurrent_constraint + ), + "bias_constraint": constraints.serialize(self.bias_constraint), + "dropout": self.dropout, + "recurrent_dropout": self.recurrent_dropout, + "seed": self.seed, + } + base_config = super().get_config() + return {**base_config, **config} + + +@keras_export("keras.layers.SimpleRNN") +class SimpleRNN(RNN): + """Fully-connected RNN where the output is to be fed back as the new input. + + Args: + units: Positive integer, dimensionality of the output space. + activation: Activation function to use. + Default: hyperbolic tangent (`tanh`). + If you pass None, no activation is applied + (ie. "linear" activation: `a(x) = x`). + use_bias: Boolean, (default `True`), whether the layer uses + a bias vector. + kernel_initializer: Initializer for the `kernel` weights matrix, + used for the linear transformation of the inputs. Default: + `"glorot_uniform"`. + recurrent_initializer: Initializer for the `recurrent_kernel` + weights matrix, used for the linear transformation of the recurrent + state. Default: `"orthogonal"`. + bias_initializer: Initializer for the bias vector. Default: `"zeros"`. + kernel_regularizer: Regularizer function applied to the `kernel` weights + matrix. Default: `None`. + recurrent_regularizer: Regularizer function applied to the + `recurrent_kernel` weights matrix. Default: `None`. + bias_regularizer: Regularizer function applied to the bias vector. + Default: `None`. + activity_regularizer: Regularizer function applied to the output of the + layer (its "activation"). Default: `None`. + kernel_constraint: Constraint function applied to the `kernel` weights + matrix. Default: `None`. + recurrent_constraint: Constraint function applied to the + `recurrent_kernel` weights matrix. Default: `None`. + bias_constraint: Constraint function applied to the bias vector. + Default: `None`. + dropout: Float between 0 and 1. + Fraction of the units to drop for the linear transformation + of the inputs. Default: 0. + recurrent_dropout: Float between 0 and 1. + Fraction of the units to drop for the linear transformation of the + recurrent state. Default: 0. + return_sequences: Boolean. Whether to return the last output + in the output sequence, or the full sequence. Default: `False`. + return_state: Boolean. Whether to return the last state + in addition to the output. Default: `False`. + go_backwards: Boolean (default: `False`). + If `True`, process the input sequence backwards and return the + reversed sequence. + stateful: Boolean (default: `False`). If `True`, the last state + for each sample at index i in a batch will be used as the + initial state for the sample of index i in the following batch. + unroll: Boolean (default: `False`). + If `True`, the network will be unrolled, + else a symbolic loop will be used. + Unrolling can speed-up an RNN, + although it tends to be more memory-intensive. + Unrolling is only suitable for short sequences. + + Call arguments: + sequence: A 3D tensor, with shape `[batch, timesteps, feature]`. + mask: Binary tensor of shape `[batch, timesteps]` indicating whether + a given timestep should be masked. An individual `True` entry + indicates that the corresponding timestep should be utilized, + while a `False` entry indicates that the corresponding timestep + should be ignored. + training: Python boolean indicating whether the layer should behave in + training mode or in inference mode. + This argument is passed to the cell when calling it. + This is only relevant if `dropout` or `recurrent_dropout` is used. + initial_state: List of initial state tensors to be passed to the first + call of the cell. + + Example: + + ```python + inputs = np.random.random((32, 10, 8)) + simple_rnn = keras.layers.SimpleRNN(4) + output = simple_rnn(inputs) # The output has shape `(32, 4)`. + simple_rnn = keras.layers.SimpleRNN( + 4, return_sequences=True, return_state=True + ) + # whole_sequence_output has shape `(32, 10, 4)`. + # final_state has shape `(32, 4)`. + whole_sequence_output, final_state = simple_rnn(inputs) + ``` + """ + + def __init__( + self, + units, + activation="tanh", + use_bias=True, + kernel_initializer="glorot_uniform", + recurrent_initializer="orthogonal", + bias_initializer="zeros", + kernel_regularizer=None, + recurrent_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + kernel_constraint=None, + recurrent_constraint=None, + bias_constraint=None, + dropout=0.0, + recurrent_dropout=0.0, + return_sequences=False, + return_state=False, + go_backwards=False, + stateful=False, + unroll=False, + seed=None, + **kwargs, + ): + cell = SimpleRNNCell( + units, + activation=activation, + use_bias=use_bias, + kernel_initializer=kernel_initializer, + recurrent_initializer=recurrent_initializer, + bias_initializer=bias_initializer, + kernel_regularizer=kernel_regularizer, + recurrent_regularizer=recurrent_regularizer, + bias_regularizer=bias_regularizer, + kernel_constraint=kernel_constraint, + recurrent_constraint=recurrent_constraint, + bias_constraint=bias_constraint, + dropout=dropout, + recurrent_dropout=recurrent_dropout, + seed=seed, + dtype=kwargs.get("dtype", None), + trainable=kwargs.get("trainable", True), + name="simple_rnn_cell", + ) + super().__init__( + cell, + return_sequences=return_sequences, + return_state=return_state, + go_backwards=go_backwards, + stateful=stateful, + unroll=unroll, + **kwargs, + ) + self.input_spec = [InputSpec(ndim=3)] + + def call(self, sequences, initial_state=None, mask=None, training=False): + return super().call( + sequences, mask=mask, training=training, initial_state=initial_state + ) + + @property + def units(self): + return self.cell.units + + @property + def activation(self): + return self.cell.activation + + @property + def use_bias(self): + return self.cell.use_bias + + @property + def kernel_initializer(self): + return self.cell.kernel_initializer + + @property + def recurrent_initializer(self): + return self.cell.recurrent_initializer + + @property + def bias_initializer(self): + return self.cell.bias_initializer + + @property + def kernel_regularizer(self): + return self.cell.kernel_regularizer + + @property + def recurrent_regularizer(self): + return self.cell.recurrent_regularizer + + @property + def bias_regularizer(self): + return self.cell.bias_regularizer + + @property + def kernel_constraint(self): + return self.cell.kernel_constraint + + @property + def recurrent_constraint(self): + return self.cell.recurrent_constraint + + @property + def bias_constraint(self): + return self.cell.bias_constraint + + @property + def dropout(self): + return self.cell.dropout + + @property + def recurrent_dropout(self): + return self.cell.recurrent_dropout + + def get_config(self): + config = { + "units": self.units, + "activation": activations.serialize(self.activation), + "use_bias": self.use_bias, + "kernel_initializer": initializers.serialize( + self.kernel_initializer + ), + "recurrent_initializer": initializers.serialize( + self.recurrent_initializer + ), + "bias_initializer": initializers.serialize(self.bias_initializer), + "kernel_regularizer": regularizers.serialize( + self.kernel_regularizer + ), + "recurrent_regularizer": regularizers.serialize( + self.recurrent_regularizer + ), + "bias_regularizer": regularizers.serialize(self.bias_regularizer), + "activity_regularizer": regularizers.serialize( + self.activity_regularizer + ), + "kernel_constraint": constraints.serialize(self.kernel_constraint), + "recurrent_constraint": constraints.serialize( + self.recurrent_constraint + ), + "bias_constraint": constraints.serialize(self.bias_constraint), + "dropout": self.dropout, + "recurrent_dropout": self.recurrent_dropout, + } + base_config = super().get_config() + del base_config["cell"] + return {**base_config, **config} + + @classmethod + def from_config(cls, config): + return cls(**config) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/stacked_rnn_cells.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/stacked_rnn_cells.py new file mode 100644 index 0000000000000000000000000000000000000000..a3e1b601d4c71dbd4d4606bdf736a42f58a324d1 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/stacked_rnn_cells.py @@ -0,0 +1,139 @@ +from keras.src import ops +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.saving import serialization_lib + + +@keras_export("keras.layers.StackedRNNCells") +class StackedRNNCells(Layer): + """Wrapper allowing a stack of RNN cells to behave as a single cell. + + Used to implement efficient stacked RNNs. + + Args: + cells: List of RNN cell instances. + + Example: + + ```python + batch_size = 3 + sentence_length = 5 + num_features = 2 + new_shape = (batch_size, sentence_length, num_features) + x = np.reshape(np.arange(30), new_shape) + + rnn_cells = [keras.layers.LSTMCell(128) for _ in range(2)] + stacked_lstm = keras.layers.StackedRNNCells(rnn_cells) + lstm_layer = keras.layers.RNN(stacked_lstm) + + result = lstm_layer(x) + ``` + """ + + def __init__(self, cells, **kwargs): + super().__init__(**kwargs) + for cell in cells: + if "call" not in dir(cell): + raise ValueError( + "All cells must have a `call` method. " + f"Received cell without a `call` method: {cell}" + ) + if "state_size" not in dir(cell): + raise ValueError( + "All cells must have a `state_size` attribute. " + f"Received cell without a `state_size`: {cell}" + ) + self.cells = cells + + @property + def state_size(self): + return [c.state_size for c in self.cells] + + @property + def output_size(self): + if getattr(self.cells[-1], "output_size", None) is not None: + return self.cells[-1].output_size + elif isinstance(self.cells[-1].state_size, (list, tuple)): + return self.cells[-1].state_size[0] + else: + return self.cells[-1].state_size + + def get_initial_state(self, batch_size=None): + initial_states = [] + for cell in self.cells: + get_initial_state_fn = getattr(cell, "get_initial_state", None) + if get_initial_state_fn: + initial_states.append( + get_initial_state_fn(batch_size=batch_size) + ) + else: + if isinstance(cell.state_size, int): + initial_states.append( + ops.zeros( + (batch_size, cell.state_size), + dtype=self.compute_dtype, + ) + ) + else: + initial_states.append( + [ + ops.zeros((batch_size, d), dtype=self.compute_dtype) + for d in cell.state_size + ] + ) + return initial_states + + def call(self, inputs, states, training=False, **kwargs): + # Call the cells in order and store the returned states. + new_states = [] + for cell, states in zip(self.cells, states): + state_is_list = tree.is_nested(states) + states = list(states) if tree.is_nested(states) else [states] + if isinstance(cell, Layer) and cell._call_has_training_arg: + kwargs["training"] = training + else: + kwargs.pop("training", None) + cell_call_fn = cell.__call__ if callable(cell) else cell.call + inputs, states = cell_call_fn(inputs, states, **kwargs) + if len(states) == 1 and not state_is_list: + states = states[0] + new_states.append(states) + + if len(new_states) == 1: + new_states = new_states[0] + return inputs, new_states + + def build(self, input_shape): + for cell in self.cells: + if isinstance(cell, Layer) and not cell.built: + cell.build(input_shape) + cell.built = True + if getattr(cell, "output_size", None) is not None: + output_dim = cell.output_size + elif isinstance(cell.state_size, (list, tuple)): + output_dim = cell.state_size[0] + else: + output_dim = cell.state_size + batch_size = tree.flatten(input_shape)[0] + input_shape = (batch_size, output_dim) + self.built = True + + def get_config(self): + cells = [] + for cell in self.cells: + cells.append(serialization_lib.serialize_keras_object(cell)) + config = {"cells": cells} + base_config = super().get_config() + return {**base_config, **config} + + @classmethod + def from_config(cls, config, custom_objects=None): + cells = [] + for cell_config in config.pop("cells"): + cells.append( + serialization_lib.deserialize_keras_object( + cell_config, custom_objects=custom_objects + ) + ) + return cls(cells, **config) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/time_distributed.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/time_distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..e61274d96c084fbde0cd148785be003696b3d7e6 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/rnn/time_distributed.py @@ -0,0 +1,115 @@ +"""Wrapper layer to apply every temporal slice of an input.""" + +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.core.wrapper import Wrapper +from keras.src.layers.layer import Layer + + +@keras_export("keras.layers.TimeDistributed") +class TimeDistributed(Wrapper): + """This wrapper allows to apply a layer to every temporal slice of an input. + + Every input should be at least 3D, and the dimension of index one of the + first input will be considered to be the temporal dimension. + + Consider a batch of 32 video samples, where each sample is a 128x128 RGB + image with `channels_last` data format, across 10 timesteps. + The batch input shape is `(32, 10, 128, 128, 3)`. + + You can then use `TimeDistributed` to apply the same `Conv2D` layer to each + of the 10 timesteps, independently: + + >>> inputs = layers.Input(shape=(10, 128, 128, 3), batch_size=32) + >>> conv_2d_layer = layers.Conv2D(64, (3, 3)) + >>> outputs = layers.TimeDistributed(conv_2d_layer)(inputs) + >>> outputs.shape + (32, 10, 126, 126, 64) + + Because `TimeDistributed` applies the same instance of `Conv2D` to each of + the timestamps, the same set of weights are used at each timestamp. + + Args: + layer: a `keras.layers.Layer` instance. + + Call arguments: + inputs: Input tensor of shape (batch, time, ...) or nested tensors, + and each of which has shape (batch, time, ...). + training: Python boolean indicating whether the layer should behave in + training mode or in inference mode. This argument is passed to the + wrapped layer (only if the layer supports this argument). + mask: Binary tensor of shape `(samples, timesteps)` indicating whether + a given timestep should be masked. This argument is passed to the + wrapped layer (only if the layer supports this argument). + """ + + def __init__(self, layer, **kwargs): + if not isinstance(layer, Layer): + raise ValueError( + "Please initialize `TimeDistributed` layer with a " + f"`keras.layers.Layer` instance. Received: {layer}" + ) + super().__init__(layer, **kwargs) + self.supports_masking = True + + def _get_child_input_shape(self, input_shape): + if not isinstance(input_shape, (tuple, list)) or len(input_shape) < 3: + raise ValueError( + "`TimeDistributed` Layer should be passed an `input_shape` " + f"with at least 3 dimensions, received: {input_shape}" + ) + return (input_shape[0], *input_shape[2:]) + + def compute_output_shape(self, input_shape): + child_input_shape = self._get_child_input_shape(input_shape) + child_output_shape = self.layer.compute_output_shape(child_input_shape) + return (child_output_shape[0], input_shape[1], *child_output_shape[1:]) + + def build(self, input_shape): + child_input_shape = self._get_child_input_shape(input_shape) + super().build(child_input_shape) + self.built = True + + def call(self, inputs, training=None, mask=None): + input_shape = ops.shape(inputs) + mask_shape = None if mask is None else ops.shape(mask) + batch_size = input_shape[0] + timesteps = input_shape[1] + + if mask_shape is not None and mask_shape[:2] != (batch_size, timesteps): + raise ValueError( + "`TimeDistributed` Layer should be passed a `mask` of shape " + f"({batch_size}, {timesteps}, ...), " + f"received: mask.shape={mask_shape}" + ) + + def time_distributed_transpose(data): + """Swaps the timestep and batch dimensions of a tensor.""" + axes = [1, 0, *range(2, len(data.shape))] + return ops.transpose(data, axes=axes) + + inputs = time_distributed_transpose(inputs) + if mask is not None: + mask = time_distributed_transpose(mask) + + def step_function(i): + kwargs = {} + if self.layer._call_has_mask_arg and mask is not None: + kwargs["mask"] = mask[i] + if self.layer._call_has_training_arg: + kwargs["training"] = training + return self.layer.call(inputs[i], **kwargs) + + # Implementation #1: is the time axis is static, use a Python for loop. + + if inputs.shape[0] is not None: + outputs = ops.stack( + [step_function(i) for i in range(inputs.shape[0])] + ) + return time_distributed_transpose(outputs) + + # Implementation #2: use backend.vectorized_map. + + outputs = backend.vectorized_map(step_function, ops.arange(timesteps)) + return time_distributed_transpose(outputs) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/preprocessing/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/preprocessing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/preprocessing/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/preprocessing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71f29999eee33f8c6b1a983c7ebd2dc8f406be78 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/preprocessing/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/preprocessing/__pycache__/image.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/preprocessing/__pycache__/image.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ae7fb65b6ab94fd037d293c5ab754f091d97b4f Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/preprocessing/__pycache__/image.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/preprocessing/__pycache__/sequence.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/preprocessing/__pycache__/sequence.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1eda2eb59269951e8e71d0e803005f91df3d4da Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/preprocessing/__pycache__/sequence.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/preprocessing/__pycache__/text.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/preprocessing/__pycache__/text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e0efde4148a0346d0bebf32dad0b82612392778 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/preprocessing/__pycache__/text.cpython-310.pyc differ