diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/add.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/add.py new file mode 100644 index 0000000000000000000000000000000000000000..bf5f1b2a6aac301836c64446919eee7772199aa1 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/add.py @@ -0,0 +1,69 @@ +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.merging.base_merge import Merge + + +@keras_export("keras.layers.Add") +class Add(Merge): + """Performs elementwise addition operation. + + It takes as input a list of tensors, all of the same shape, + and returns a single tensor (also of the same shape). + + Examples: + + >>> input_shape = (2, 3, 4) + >>> x1 = np.random.rand(*input_shape) + >>> x2 = np.random.rand(*input_shape) + >>> y = keras.layers.Add()([x1, x2]) + + Usage in a Keras model: + + >>> input1 = keras.layers.Input(shape=(16,)) + >>> x1 = keras.layers.Dense(8, activation='relu')(input1) + >>> input2 = keras.layers.Input(shape=(32,)) + >>> x2 = keras.layers.Dense(8, activation='relu')(input2) + >>> # equivalent to `added = keras.layers.add([x1, x2])` + >>> added = keras.layers.Add()([x1, x2]) + >>> out = keras.layers.Dense(4)(added) + >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) + + """ + + def _merge_function(self, inputs): + output = inputs[0] + for i in range(1, len(inputs)): + output = ops.add(output, inputs[i]) + return output + + +@keras_export("keras.layers.add") +def add(inputs, **kwargs): + """Functional interface to the `keras.layers.Add` layer. + + Args: + inputs: A list of input tensors with the same shape. + **kwargs: Standard layer keyword arguments. + + Returns: + A tensor as the sum of the inputs. It has the same shape as the inputs. + + Examples: + + >>> input_shape = (2, 3, 4) + >>> x1 = np.random.rand(*input_shape) + >>> x2 = np.random.rand(*input_shape) + >>> y = keras.layers.add([x1, x2]) + + Usage in a Keras model: + + >>> input1 = keras.layers.Input(shape=(16,)) + >>> x1 = keras.layers.Dense(8, activation='relu')(input1) + >>> input2 = keras.layers.Input(shape=(32,)) + >>> x2 = keras.layers.Dense(8, activation='relu')(input2) + >>> added = keras.layers.add([x1, x2]) + >>> out = keras.layers.Dense(4)(added) + >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) + + """ + return Add(**kwargs)(inputs) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/average.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/average.py new file mode 100644 index 0000000000000000000000000000000000000000..f90f75beead0b1737bd2dbf724c0854e292cfad7 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/average.py @@ -0,0 +1,70 @@ +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.merging.base_merge import Merge + + +@keras_export("keras.layers.Average") +class Average(Merge): + """Averages a list of inputs element-wise.. + + It takes as input a list of tensors, all of the same shape, + and returns a single tensor (also of the same shape). + + Examples: + + >>> input_shape = (2, 3, 4) + >>> x1 = np.random.rand(*input_shape) + >>> x2 = np.random.rand(*input_shape) + >>> y = keras.layers.Average()([x1, x2]) + + Usage in a Keras model: + + >>> input1 = keras.layers.Input(shape=(16,)) + >>> x1 = keras.layers.Dense(8, activation='relu')(input1) + >>> input2 = keras.layers.Input(shape=(32,)) + >>> x2 = keras.layers.Dense(8, activation='relu')(input2) + >>> # equivalent to `y = keras.layers.average([x1, x2])` + >>> y = keras.layers.Average()([x1, x2]) + >>> out = keras.layers.Dense(4)(y) + >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) + + """ + + def _merge_function(self, inputs): + output = inputs[0] + for i in range(1, len(inputs)): + output = ops.add(output, inputs[i]) + return output / len(inputs) + + +@keras_export("keras.layers.average") +def average(inputs, **kwargs): + """Functional interface to the `keras.layers.Average` layer. + + Args: + inputs: A list of input tensors , all of the same shape. + **kwargs: Standard layer keyword arguments. + + Returns: + A tensor as the element-wise product of the inputs with the same + shape as the inputs. + + Examples: + + >>> input_shape = (2, 3, 4) + >>> x1 = np.random.rand(*input_shape) + >>> x2 = np.random.rand(*input_shape) + >>> y = keras.layers.average([x1, x2]) + + Usage in a Keras model: + + >>> input1 = keras.layers.Input(shape=(16,)) + >>> x1 = keras.layers.Dense(8, activation='relu')(input1) + >>> input2 = keras.layers.Input(shape=(32,)) + >>> x2 = keras.layers.Dense(8, activation='relu')(input2) + >>> y = keras.layers.average([x1, x2]) + >>> out = keras.layers.Dense(4)(y) + >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) + + """ + return Average(**kwargs)(inputs) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/base_merge.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/base_merge.py new file mode 100644 index 0000000000000000000000000000000000000000..360929719816537983bbff266c8332183e09d5b4 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/base_merge.py @@ -0,0 +1,281 @@ +from keras.src import backend +from keras.src import ops +from keras.src.backend.common.keras_tensor import KerasTensor +from keras.src.layers.layer import Layer + + +class Merge(Layer): + """Generic merge layer for elementwise merge functions. + + Used to implement `Sum`, `Average`, etc. + + Args: + **kwargs: standard layer keyword arguments. + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.supports_masking = True + + def _merge_function(self, inputs): + raise NotImplementedError + + def _apply_merge_op_and_or_mask(self, op_fn, inputs): + """Merge a set of inputs by applying `op_fn` and ORing the masks. + + We use this for `Minimum` and `Maximum` as it handles the fact that + there is no identity element. If applicable, the mask obtained by ORing + all masks is set on the output. + + Args: + op_fn: binary operation to apply to tensor pair. + inputs: array of tensors to apply operation on. + """ + output = None + output_mask = None + + for x in inputs: + mask = backend.get_keras_mask(x) + if mask is not None: + mask = ops.broadcast_to(ops.expand_dims(mask, -1), ops.shape(x)) + if output is None: + output = x + output_mask = mask + continue + if mask is not None: + x = ops.where(mask, x, output) + if output_mask is not None: + output = ops.where(output_mask, output, x) + if mask is not None and output_mask is not None: + output_mask = ops.logical_or(output_mask, mask) + else: + output_mask = None + output = op_fn(output, x) + + if output_mask is not None: + output_mask = ops.any(output_mask, axis=-1, keepdims=False) + backend.set_keras_mask(output, output_mask) + return output + + def _compute_elemwise_op_output_shape(self, shape1, shape2): + """Computes the shape of the resultant of an elementwise operation. + + Args: + shape1: Tuple or None. Shape of the first tensor + shape2: Tuple or None. Shape of the second tensor + + Returns: + Expected output shape when an element-wise operation is + carried out on 2 tensors with shapes shape1 and shape2. + tuple or None. + + Raises: + ValueError: If shape1 and shape2 are not compatible for + element-wise operations. + """ + + if None in [shape1, shape2]: + return None + elif len(shape1) < len(shape2): + return self._compute_elemwise_op_output_shape(shape2, shape1) + elif not shape2: + return shape1 + output_shape = list(shape1[: -len(shape2)]) + for i, j in zip(shape1[-len(shape2) :], shape2): + if i is None or j is None: + output_shape.append(None) + elif i == 1: + output_shape.append(j) + elif j == 1: + output_shape.append(i) + else: + if i != j: + raise ValueError( + "Inputs have incompatible shapes. " + f"Received shapes {shape1} and {shape2}" + ) + output_shape.append(i) + return tuple(output_shape) + + def build(self, input_shape): + # Used purely for shape validation. + if not isinstance(input_shape[0], (tuple, list)): + raise ValueError( + "A merge layer should be called on a list of inputs. " + f"Received: input_shape={input_shape} (not a list of shapes)" + ) + if len(input_shape) < 1: + raise ValueError( + "A merge layer should be called " + "on a list of at least 1 input. " + f"Received {len(input_shape)} inputs. " + f"Full input_shape received: {input_shape}" + ) + + batch_sizes = {s[0] for s in input_shape if s} - {None} + if len(batch_sizes) > 1: + raise ValueError( + "Cannot merge tensors with different batch sizes. " + f"Received tensors with shapes {input_shape}" + ) + + if input_shape[0] is None: + output_shape = None + else: + output_shape = input_shape[0][1:] + + for i in range(1, len(input_shape)): + if input_shape[i] is None: + shape = None + else: + shape = input_shape[i][1:] + output_shape = self._compute_elemwise_op_output_shape( + output_shape, shape + ) + + # If the inputs have different ranks, we have to reshape them + # to make them broadcastable. + if None not in input_shape and len(set(map(len, input_shape))) == 1: + self._reshape_required = False + else: + self._reshape_required = True + self.built = True + + def call(self, inputs): + if not isinstance(inputs, (list, tuple)): + raise ValueError( + "A merge layer should be called on a list of inputs. " + f"Received: inputs={inputs} (not a list of tensors)" + ) + if self._reshape_required: + reshaped_inputs = [] + input_ndims = list(map(ops.ndim, inputs)) + if None not in input_ndims: + # If ranks of all inputs are available, + # we simply expand each of them at axis=1 + # until all of them have the same rank. + max_ndim = max(input_ndims) + for x in inputs: + x_ndim = ops.ndim(x) + for _ in range(max_ndim - x_ndim): + x = ops.expand_dims(x, axis=1) + reshaped_inputs.append(x) + return self._merge_function(reshaped_inputs) + else: + # Transpose all inputs so that batch size is the last dimension. + # (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... , + # batch_size) + transposed = False + for x in inputs: + x_ndim = ops.ndim(x) + + if x_ndim is None: + x_shape = ops.shape(x) + batch_size = x_shape[0] + + new_shape = backend.concatenate( + [x_shape[1:], ops.expand_dims(batch_size, axis=-1)] + ) + x_transposed = ops.reshape( + x, + ops.stack( + [batch_size, ops.prod(x_shape[1:])], + axis=0, + ), + ) + x_transposed = ops.transpose(x_transposed, perm=(1, 0)) + x_transposed = ops.reshape(x_transposed, new_shape) + + reshaped_inputs.append(x_transposed) + transposed = True + + elif x_ndim > 1: + dims = list(range(1, x_ndim)) + [0] + reshaped_inputs.append(ops.transpose(x, perm=dims)) + print(dims) + transposed = True + else: + # We don't transpose inputs if they are 1D vectors or + # scalars. + reshaped_inputs.append(x) + + y = self._merge_function(reshaped_inputs) + y_ndim = ops.ndim(y) + + if transposed: + # If inputs have been transposed, we have to transpose the + # output too. + if y_ndim is None: + y_shape = ops.shape(y) + y_ndim = ops.shape(y_shape)[0] + batch_size = y_shape[y_ndim - 1] + new_shape = ops.concatenate( + [ + ops.expand_dims(batch_size, axis=-1), + y_shape[: y_ndim - 1], + ] + ) + y = ops.reshape(y, (-1, batch_size)) + y = ops.transpose(y, perm=(1, 0)) + y = ops.reshape(y, new_shape) + elif y_ndim > 1: + dims = [y_ndim - 1] + list(range(y_ndim - 1)) + y = ops.transpose(y, perm=dims) + return y + else: + return self._merge_function(inputs) + + def compute_output_shape(self, input_shape): + if input_shape[0] is None: + output_shape = None + else: + output_shape = input_shape[0][1:] + + for i in range(1, len(input_shape)): + if input_shape[i] is None: + shape = None + else: + shape = input_shape[i][1:] + output_shape = self._compute_elemwise_op_output_shape( + output_shape, shape + ) + batch_sizes = {s[0] for s in input_shape if s is not None} - {None} + if len(batch_sizes) == 1: + output_shape = (list(batch_sizes)[0],) + output_shape + else: + output_shape = (None,) + output_shape + return output_shape + + def compute_output_spec(self, inputs): + output_shape = self.compute_output_shape([x.shape for x in inputs]) + output_sparse = all(x.sparse for x in inputs) + return KerasTensor( + output_shape, dtype=self.compute_dtype, sparse=output_sparse + ) + + def compute_mask(self, inputs, mask=None): + if mask is None: + return None + if not isinstance(mask, (tuple, list)): + raise ValueError(f"`mask` should be a list. Received: mask={mask}") + if not isinstance(inputs, (tuple, list)): + raise ValueError( + f"`inputs` should be a list. Received: inputs={inputs}" + ) + if len(mask) != len(inputs): + raise ValueError( + "The lists `inputs` and `mask` should have the same length. " + f"Received: inputs={inputs} of length {len(inputs)}, and " + f"mask={mask} of length {len(mask)}" + ) + # Default implementation does an OR between the masks, which works + # for `Add`, `Subtract`, `Average`, `Maximum`, `Minimum`, `Multiply`. + if any(m is None for m in mask): + return None + output_mask = mask[0] + for m in mask[1:]: + output_mask = ops.logical_or(output_mask, m) + return output_mask + + def get_config(self): + return super().get_config() diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/concatenate.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/concatenate.py new file mode 100644 index 0000000000000000000000000000000000000000..f9d4d39ff3cda8c34aeac618094a861aa3beae26 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/concatenate.py @@ -0,0 +1,178 @@ +import copy + +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.merging.base_merge import Merge + + +@keras_export("keras.layers.Concatenate") +class Concatenate(Merge): + """Concatenates a list of inputs. + + It takes as input a list of tensors, all of the same shape except + for the concatenation axis, and returns a single tensor that is the + concatenation of all inputs. + + Examples: + + >>> x = np.arange(20).reshape(2, 2, 5) + >>> y = np.arange(20, 30).reshape(2, 1, 5) + >>> keras.layers.Concatenate(axis=1)([x, y]) + + Usage in a Keras model: + + >>> x1 = keras.layers.Dense(8)(np.arange(10).reshape(5, 2)) + >>> x2 = keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2)) + >>> y = keras.layers.Concatenate()([x1, x2]) + + Args: + axis: Axis along which to concatenate. + **kwargs: Standard layer keyword arguments. + + Returns: + A tensor, the concatenation of the inputs alongside axis `axis`. + """ + + def __init__(self, axis=-1, **kwargs): + super().__init__(**kwargs) + self.axis = axis + self.supports_masking = True + self._reshape_required = False + + def build(self, input_shape): + # Used purely for shape validation. + if len(input_shape) < 1 or not isinstance( + input_shape[0], (tuple, list) + ): + raise ValueError( + "A `Concatenate` layer should be called on a list of " + f"at least 1 input. Received: input_shape={input_shape}" + ) + if all(shape is None for shape in input_shape): + return + + reduced_inputs_shapes = [list(shape) for shape in input_shape] + reduced_inputs_shapes_copy = copy.copy(reduced_inputs_shapes) + shape_set = set() + for i in range(len(reduced_inputs_shapes_copy)): + # Convert self.axis to positive axis for each input + # in case self.axis is a negative number + concat_axis = self.axis % len(reduced_inputs_shapes_copy[i]) + # Skip batch axis. + for axis, axis_value in enumerate( + reduced_inputs_shapes_copy, start=1 + ): + # Remove squeezable axes (axes with value of 1) + # if not in the axis that will be used for concatenation + # otherwise leave it. + # This approach allows building the layer, + # but if tensor shapes are not the same when + # calling, an exception will be raised. + if axis != concat_axis and axis_value == 1: + del reduced_inputs_shapes[i][axis] + + if len(reduced_inputs_shapes[i]) > self.axis: + del reduced_inputs_shapes[i][self.axis] + shape_set.add(tuple(reduced_inputs_shapes[i])) + + if len(shape_set) != 1: + err_msg = ( + "A `Concatenate` layer requires inputs with matching shapes " + "except for the concatenation axis. " + f"Received: input_shape={input_shape}" + ) + # Make sure all the shapes have same ranks. + ranks = set(len(shape) for shape in shape_set) + if len(ranks) != 1: + raise ValueError(err_msg) + # Get the only rank for the set. + (rank,) = ranks + for axis in range(rank): + # Skip the Nones in the shape since they are dynamic, also the + # axis for concat has been removed above. + unique_dims = set( + shape[axis] + for shape in shape_set + if shape[axis] is not None + ) + if len(unique_dims) > 1: + raise ValueError(err_msg) + self.built = True + + def _merge_function(self, inputs): + return ops.concatenate(inputs, axis=self.axis) + + def compute_output_shape(self, input_shape): + if (not isinstance(input_shape, (tuple, list))) or ( + not isinstance(input_shape[0], (tuple, list)) + ): + raise ValueError( + "A `Concatenate` layer should be called on a list of inputs. " + f"Received: input_shape={input_shape}" + ) + input_shapes = input_shape + output_shape = list(input_shapes[0]) + + for shape in input_shapes[1:]: + if output_shape[self.axis] is None or shape[self.axis] is None: + output_shape[self.axis] = None + break + output_shape[self.axis] += shape[self.axis] + return tuple(output_shape) + + def compute_mask(self, inputs, mask=None): + if mask is None: + return None + if not isinstance(mask, (tuple, list)): + raise ValueError(f"`mask` should be a list. Received mask={mask}") + if not isinstance(inputs, (tuple, list)): + raise ValueError( + f"`inputs` should be a list. Received: inputs={inputs}" + ) + if len(mask) != len(inputs): + raise ValueError( + "The lists `inputs` and `mask` should have the same length. " + f"Received: inputs={inputs} of length {len(inputs)}, and " + f"mask={mask} of length {len(mask)}" + ) + if all(m is None for m in mask): + return None + # Make a list of masks while making sure + # the dimensionality of each mask + # is the same as the corresponding input. + masks = [] + for input_i, mask_i in zip(inputs, mask): + if mask_i is None: + # Input is unmasked. Append all 1s to masks, + masks.append(ops.ones_like(input_i, dtype="bool")) + elif mask_i.ndim < input_i.ndim: + # Mask is smaller than the input, expand it + masks.append( + ops.broadcast_to( + ops.expand_dims(mask_i, axis=-1), ops.shape(input_i) + ) + ) + else: + masks.append(mask_i) + concatenated = ops.concatenate(masks, axis=self.axis) + return ops.any(concatenated, axis=-1, keepdims=False) + + def get_config(self): + config = {"axis": self.axis} + base_config = super().get_config() + return dict(list(base_config.items()) + list(config.items())) + + +@keras_export("keras.layers.concatenate") +def concatenate(inputs, axis=-1, **kwargs): + """Functional interface to the `Concatenate` layer. + + Args: + inputs: A list of input tensors. + axis: Concatenation axis. + **kwargs: Standard layer keyword arguments. + + Returns: + A tensor, the concatenation of the inputs alongside axis `axis`. + """ + return Concatenate(axis=axis, **kwargs)(inputs) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/dot.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/dot.py new file mode 100644 index 0000000000000000000000000000000000000000..e580269bef67d6918d9ee9e214180a1950fb917c --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/dot.py @@ -0,0 +1,376 @@ +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.merging.base_merge import Merge +from keras.src.utils.numerical_utils import normalize + + +def batch_dot(x, y, axes=None): + """Batchwise dot product. + + `batch_dot` is used to compute dot product of `x` and `y` when + `x` and `y` are data in batch, i.e. in a shape of `(batch_size, :)`. + `batch_dot` results in a tensor or variable with less dimensions + than the input. If the number of dimensions is reduced to 1, + we use `expand_dims` to make sure that ndim is at least 2. + + Shape inference: + + Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`. + If `axes` is (1, 2), to find the output shape of resultant tensor, + loop through each dimension in `x`'s shape and `y`'s shape: + + * `x.shape[0]` : 100 : append to output shape + * `x.shape[1]` : 20 : do not append to output shape, dimension 1 of + `x` has been summed over. (`dot_axes[0]` = 1) + * `y.shape[0]` : 100 : do not append to output shape, always ignore + first dimension of `y` + * `y.shape[1]` : 30 : append to output shape + * `y.shape[2]` : 20 : do not append to output shape, dimension 2 of + `y` has been summed over. + (`dot_axes[1]` = 2) `output_shape` = `(100, 30)` + + Example: + + >>> x_batch = np.ones(shape=(32, 20, 1)) + >>> y_batch = np.ones(shape=(32, 30, 20)) + >>> xy_batch_dot = batch_dot(x_batch, y_batch, axes=(1, 2)) + + Args: + x: Keras tensor or variable with `ndim >= 2`. + y: Keras tensor or variable with `ndim >= 2`. + axes: Tuple or list of integers with target dimensions, or single + integer. The sizes of `x.shape[axes[0]]` and `y.shape[axes[1]]` + should be equal. + + Returns: + A tensor with shape equal to the concatenation of `x`'s shape + (less the dimension that was summed over) and `y`'s shape (less the + batch dimension and the dimension that was summed over). If the final + rank is 1, we reshape it to `(batch_size, 1)`. + """ + + x_shape = x.shape + y_shape = y.shape + + x_ndim = len(x_shape) + y_ndim = len(y_shape) + + if x_ndim < 2 or y_ndim < 2: + raise ValueError( + f"Cannot do batch_dot on inputs " + f"with rank < 2. " + f"Received inputs with shapes " + f"{x_shape} and {y_shape}." + ) + + x_batch_size = x_shape[0] + y_batch_size = y_shape[0] + + if x_batch_size is not None and y_batch_size is not None: + if x_batch_size != y_batch_size: + raise ValueError( + f"Cannot do batch_dot on inputs " + f"with different batch sizes. " + f"Received inputs with shapes " + f"{x_shape} and {y_shape}." + ) + if isinstance(axes, int): + axes = [axes, axes] + + if axes is None: + if y_ndim == 2: + axes = [x_ndim - 1, y_ndim - 1] + else: + axes = [x_ndim - 1, y_ndim - 2] + + if any(isinstance(a, (list, tuple)) for a in axes): + raise ValueError( + f"Multiple target dimensions are not supported. " + f"Expected: None, int, (int, int), " + f"Provided: {axes} " + ) + + # if tuple, convert to list. + axes = list(axes) + + # convert negative indices. + if axes[0] < 0: + axes[0] += x_ndim + if axes[1] < 0: + axes[1] += y_ndim + + # sanity checks + if 0 in axes: + raise ValueError( + "Cannot perform batch_dot over axis 0. " + "If your inputs are not batched, " + "add a dummy batch dimension to your " + "inputs using keras.ops.expand_dims(x, 0)" + ) + a0, a1 = axes + d1 = x_shape[a0] + d2 = y_shape[a1] + + if d1 is not None and d2 is not None and d1 != d2: + raise ValueError( + f"Cannot do batch_dot on inputs with shapes " + f"{x_shape} and {y_shape} with axes={axes}. " + f"x.shape[{axes[0]}] != y.shape[{axes[1]}] ({d1} != {d2})." + ) + + # backup ndims. Need them later. + orig_x_ndim = x_ndim + orig_y_ndim = y_ndim + + # if rank is 2, expand to 3. + if x_ndim == 2: + x = ops.expand_dims(x, 1) + a0 += 1 + x_ndim += 1 + if y_ndim == 2: + y = ops.expand_dims(y, 2) + y_ndim += 1 + + # bring x's dimension to be reduced to last axis. + if a0 != x_ndim - 1: + pattern = list(range(x_ndim)) + for i in range(a0, x_ndim - 1): + pattern[i] = pattern[i + 1] + pattern[-1] = a0 + x = ops.transpose(x, pattern) + + # bring y's dimension to be reduced to axis 1. + if a1 != 1: + pattern = list(range(y_ndim)) + for i in range(a1, 1, -1): + pattern[i] = pattern[i - 1] + pattern[1] = a1 + y = ops.transpose(y, pattern) + + # normalize both inputs to rank 3. + if x_ndim > 3: + # squash middle dimensions of x. + x_shape = ops.shape(x) + x_mid_dims = x_shape[1:-1] + x_squashed_shape = (x_shape[0], -1, x_shape[-1]) + x = ops.reshape(x, x_squashed_shape) + x_squashed = True + else: + x_squashed = False + + if y_ndim > 3: + # squash trailing dimensions of y. + y_shape = ops.shape(y) + y_trail_dims = y_shape[2:] + y_squashed_shape = (y_shape[0], y_shape[1], -1) + y = ops.reshape(y, y_squashed_shape) + y_squashed = True + else: + y_squashed = False + + result = ops.matmul(x, y) + + # if inputs were squashed, we have to reshape the matmul output. + output_shape = ops.shape(result) + do_reshape = False + + if x_squashed: + output_shape = output_shape[:1] + x_mid_dims + output_shape[-1:] + do_reshape = True + + if y_squashed: + output_shape = output_shape[:-1] + y_trail_dims + do_reshape = True + + if do_reshape: + result = ops.reshape(result, output_shape) + + # if the inputs were originally rank 2, we remove the added 1 dim. + if orig_x_ndim == 2: + result = ops.squeeze(result, 1) + elif orig_y_ndim == 2: + result = ops.squeeze(result, -1) + + return result + + +@keras_export("keras.layers.Dot") +class Dot(Merge): + """Computes element-wise dot product of two tensors. + + It takes a list of inputs of size 2, and the axes + corresponding to each input along with the dot product + is to be performed. + + Let's say `x` and `y` are the two input tensors with shapes + `(2, 3, 5)` and `(2, 10, 3)`. The batch dimension should be + of same size for both the inputs, and `axes` should correspond + to the dimensions that have the same size in the corresponding + inputs. e.g. with `axes=(1, 2)`, the dot product of `x`, and `y` + will result in a tensor with shape `(2, 5, 10)` + + Example: + + >>> x = np.arange(10).reshape(1, 5, 2) + >>> y = np.arange(10, 20).reshape(1, 2, 5) + >>> keras.layers.Dot(axes=(1, 2))([x, y]) + + Usage in a Keras model: + + >>> x1 = keras.layers.Dense(8)(np.arange(10).reshape(5, 2)) + >>> x2 = keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2)) + >>> y = keras.layers.Dot(axes=1)([x1, x2]) + + Args: + axes: Integer or tuple of integers, axis or axes along which to + take the dot product. If a tuple, should be two integers + corresponding to the desired axis from the first input and the + desired axis from the second input, respectively. Note that the + size of the two selected axes must match. + normalize: Whether to L2-normalize samples along the dot product axis + before taking the dot product. If set to `True`, then + the output of the dot product is the cosine proximity + between the two samples. + **kwargs: Standard layer keyword arguments. + + Returns: + A tensor, the dot product of the samples from the inputs. + """ + + def __init__(self, axes, normalize=False, **kwargs): + super().__init__(**kwargs) + if not isinstance(axes, int): + if not isinstance(axes, (list, tuple)): + raise TypeError( + f"Invalid type for argument `axes`: it should be " + f"a list or an int. Received: axes={axes}" + ) + if len(axes) != 2: + raise ValueError( + f"Invalid format for argument `axes`: it should contain " + f"two elements. Received: axes={axes}" + ) + if not isinstance(axes[0], int) or not isinstance(axes[1], int): + raise ValueError( + f"Invalid format for argument `axes`: list elements should " + f"be integers. Received: axes={axes}" + ) + self.axes = axes + self.normalize = normalize + self.supports_masking = True + self._reshape_required = False + + def build(self, input_shape): + # Used purely for shape validation. + if ( + not isinstance(input_shape[0], (tuple, list)) + or len(input_shape) != 2 + ): + raise ValueError( + f"A `Dot` layer should be called on a list of 2 inputs. " + f"Received: input_shape={input_shape}" + ) + shape1 = input_shape[0] + shape2 = input_shape[1] + if shape1 is None or shape2 is None: + return + if isinstance(self.axes, int): + if self.axes < 0: + axes = [self.axes % len(shape1), self.axes % len(shape2)] + else: + axes = [self.axes] * 2 + else: + axes = self.axes + if shape1[axes[0]] != shape2[axes[1]]: + raise ValueError( + f"Incompatible input shapes: " + f"axis values {shape1[axes[0]]} (at axis {axes[0]}) != " + f"{shape2[axes[1]]} (at axis {axes[1]}). " + f"Full input shapes: {shape1}, {shape2}" + ) + self.built = True + + def _merge_function(self, inputs): + if len(inputs) != 2: + raise ValueError( + f"A `Dot` layer should be called on exactly 2 inputs. " + f"Received: inputs={inputs}" + ) + x1 = inputs[0] + x2 = inputs[1] + + if isinstance(self.axes, int): + if self.axes < 0: + axes = [ + self.axes % len(x1.shape), + self.axes % len(x2.shape), + ] + else: + axes = [self.axes] * 2 + else: + axes = [] + for i in range(len(self.axes)): + if self.axes[i] < 0: + axes.append(self.axes[i] % len(inputs[i].shape)) + else: + axes.append(self.axes[i]) + + if self.normalize: + x1 = normalize(x1, axis=axes[0]) + x2 = normalize(x2, axis=axes[1]) + output = batch_dot(x1, x2, axes) + return output + + def compute_output_shape(self, input_shape): + if not isinstance(input_shape, (tuple, list)) or len(input_shape) != 2: + raise ValueError( + f"A `Dot` layer should be called on a list of 2 inputs. " + f"Received: input_shape={input_shape}" + ) + shape1 = list(input_shape[0]) + shape2 = list(input_shape[1]) + if isinstance(self.axes, int): + if self.axes < 0: + axes = [self.axes % len(shape1), self.axes % len(shape2)] + else: + axes = [self.axes] * 2 + else: + axes = self.axes + shape1.pop(axes[0]) + shape2.pop(axes[1]) + shape2.pop(0) + output_shape = shape1 + shape2 + if len(output_shape) == 1: + output_shape += [1] + return tuple(output_shape) + + def compute_mask(self, inputs, mask=None): + return None + + def get_config(self): + config = { + "axes": self.axes, + "normalize": self.normalize, + } + base_config = super().get_config() + return dict(list(base_config.items()) + list(config.items())) + + +@keras_export("keras.layers.dot") +def dot(inputs, axes=-1, **kwargs): + """Functional interface to the `Dot` layer. + + Args: + inputs: A list of input tensors (at least 2). + axes: Integer or tuple of integers, + axis or axes along which to take the dot product. + normalize: Whether to L2-normalize samples along the + dot product axis before taking the dot product. + If set to `True`, then the output of the dot product + is the cosine proximity between the two samples. + **kwargs: Standard layer keyword arguments. + + Returns: + A tensor, the dot product of the samples from the inputs. + """ + return Dot(axes=axes, **kwargs)(inputs) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/maximum.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/maximum.py new file mode 100644 index 0000000000000000000000000000000000000000..3072ecb625a90a77e7e87f6d6c93938e16b0a833 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/maximum.py @@ -0,0 +1,67 @@ +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.merging.base_merge import Merge + + +@keras_export("keras.layers.Maximum") +class Maximum(Merge): + """Computes element-wise maximum on a list of inputs. + + It takes as input a list of tensors, all of the same shape, + and returns a single tensor (also of the same shape). + + Examples: + + >>> input_shape = (2, 3, 4) + >>> x1 = np.random.rand(*input_shape) + >>> x2 = np.random.rand(*input_shape) + >>> y = keras.layers.Maximum()([x1, x2]) + + Usage in a Keras model: + + >>> input1 = keras.layers.Input(shape=(16,)) + >>> x1 = keras.layers.Dense(8, activation='relu')(input1) + >>> input2 = keras.layers.Input(shape=(32,)) + >>> x2 = keras.layers.Dense(8, activation='relu')(input2) + >>> # equivalent to `y = keras.layers.maximum([x1, x2])` + >>> y = keras.layers.Maximum()([x1, x2]) + >>> out = keras.layers.Dense(4)(y) + >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) + + """ + + def _merge_function(self, inputs): + return self._apply_merge_op_and_or_mask(ops.maximum, inputs) + + +@keras_export("keras.layers.maximum") +def maximum(inputs, **kwargs): + """Functional interface to the `keras.layers.Maximum` layer. + + Args: + inputs: A list of input tensors , all of the same shape. + **kwargs: Standard layer keyword arguments. + + Returns: + A tensor as the element-wise product of the inputs with the same + shape as the inputs. + + Examples: + + >>> input_shape = (2, 3, 4) + >>> x1 = np.random.rand(*input_shape) + >>> x2 = np.random.rand(*input_shape) + >>> y = keras.layers.maximum([x1, x2]) + + Usage in a Keras model: + + >>> input1 = keras.layers.Input(shape=(16,)) + >>> x1 = keras.layers.Dense(8, activation='relu')(input1) + >>> input2 = keras.layers.Input(shape=(32,)) + >>> x2 = keras.layers.Dense(8, activation='relu')(input2) + >>> y = keras.layers.maximum([x1, x2]) + >>> out = keras.layers.Dense(4)(y) + >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) + + """ + return Maximum(**kwargs)(inputs) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/minimum.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/minimum.py new file mode 100644 index 0000000000000000000000000000000000000000..dad5997ef656fe8cfec00dcd5d8121e546525486 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/minimum.py @@ -0,0 +1,67 @@ +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.merging.base_merge import Merge + + +@keras_export("keras.layers.Minimum") +class Minimum(Merge): + """Computes elementwise minimum on a list of inputs. + + It takes as input a list of tensors, all of the same shape, + and returns a single tensor (also of the same shape). + + Examples: + + >>> input_shape = (2, 3, 4) + >>> x1 = np.random.rand(*input_shape) + >>> x2 = np.random.rand(*input_shape) + >>> y = keras.layers.Minimum()([x1, x2]) + + Usage in a Keras model: + + >>> input1 = keras.layers.Input(shape=(16,)) + >>> x1 = keras.layers.Dense(8, activation='relu')(input1) + >>> input2 = keras.layers.Input(shape=(32,)) + >>> x2 = keras.layers.Dense(8, activation='relu')(input2) + >>> # equivalent to `y = keras.layers.minimum([x1, x2])` + >>> y = keras.layers.Minimum()([x1, x2]) + >>> out = keras.layers.Dense(4)(y) + >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) + + """ + + def _merge_function(self, inputs): + return self._apply_merge_op_and_or_mask(ops.minimum, inputs) + + +@keras_export("keras.layers.minimum") +def minimum(inputs, **kwargs): + """Functional interface to the `keras.layers.Minimum` layer. + + Args: + inputs: A list of input tensors , all of the same shape. + **kwargs: Standard layer keyword arguments. + + Returns: + A tensor as the elementwise product of the inputs with the same + shape as the inputs. + + Examples: + + >>> input_shape = (2, 3, 4) + >>> x1 = np.random.rand(*input_shape) + >>> x2 = np.random.rand(*input_shape) + >>> y = keras.layers.minimum([x1, x2]) + + Usage in a Keras model: + + >>> input1 = keras.layers.Input(shape=(16,)) + >>> x1 = keras.layers.Dense(8, activation='relu')(input1) + >>> input2 = keras.layers.Input(shape=(32,)) + >>> x2 = keras.layers.Dense(8, activation='relu')(input2) + >>> y = keras.layers.minimum([x1, x2]) + >>> out = keras.layers.Dense(4)(y) + >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) + + """ + return Minimum(**kwargs)(inputs) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/multiply.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/multiply.py new file mode 100644 index 0000000000000000000000000000000000000000..72fbe1e831dc872454c365438a925988fc1da5fe --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/multiply.py @@ -0,0 +1,91 @@ +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.merging.base_merge import Merge + + +@keras_export("keras.layers.Multiply") +class Multiply(Merge): + """Performs elementwise multiplication. + + It takes as input a list of tensors, all of the same shape, + and returns a single tensor (also of the same shape). + + Examples: + + >>> input_shape = (2, 3, 4) + >>> x1 = np.random.rand(*input_shape) + >>> x2 = np.random.rand(*input_shape) + >>> y = keras.layers.Multiply()([x1, x2]) + + Usage in a Keras model: + + >>> input1 = keras.layers.Input(shape=(16,)) + >>> x1 = keras.layers.Dense(8, activation='relu')(input1) + >>> input2 = keras.layers.Input(shape=(32,)) + >>> x2 = keras.layers.Dense(8, activation='relu')(input2) + >>> # equivalent to `y = keras.layers.multiply([x1, x2])` + >>> y = keras.layers.Multiply()([x1, x2]) + >>> out = keras.layers.Dense(4)(y) + >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) + + """ + + def _merge_function(self, inputs): + masks = [backend.get_keras_mask(x) for x in inputs] + has_output_mask = all(mask is not None for mask in masks) + output = None + output_mask = None + + for x, mask in zip(inputs, masks): + if mask is not None: + mask = ops.broadcast_to(ops.expand_dims(mask, -1), ops.shape(x)) + # Replace 0s with 1s outside of mask. + x = ops.where(mask, x, ops.cast(1, x.dtype)) + if has_output_mask: + output_mask = ( + mask + if output_mask is None + else ops.logical_or(output_mask, mask) + ) + output = x if output is None else ops.multiply(output, x) + + if has_output_mask: + # Replace 1s with 0s outside of mask per standard masking rules. + output = ops.where(output_mask, output, ops.cast(0, output.dtype)) + output_mask = ops.any(output_mask, axis=-1, keepdims=False) + backend.set_keras_mask(output, output_mask) + return output + + +@keras_export("keras.layers.multiply") +def multiply(inputs, **kwargs): + """Functional interface to the `keras.layers.Multiply` layer. + + Args: + inputs: A list of input tensors , all of the same shape. + **kwargs: Standard layer keyword arguments. + + Returns: + A tensor as the elementwise product of the inputs with the same + shape as the inputs. + + Examples: + + >>> input_shape = (2, 3, 4) + >>> x1 = np.random.rand(*input_shape) + >>> x2 = np.random.rand(*input_shape) + >>> y = keras.layers.multiply([x1, x2]) + + Usage in a Keras model: + + >>> input1 = keras.layers.Input(shape=(16,)) + >>> x1 = keras.layers.Dense(8, activation='relu')(input1) + >>> input2 = keras.layers.Input(shape=(32,)) + >>> x2 = keras.layers.Dense(8, activation='relu')(input2) + >>> y = keras.layers.multiply([x1, x2]) + >>> out = keras.layers.Dense(4)(y) + >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) + + """ + return Multiply(**kwargs)(inputs) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/subtract.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/subtract.py new file mode 100644 index 0000000000000000000000000000000000000000..78036adaf233278deaee212c28bb55677e46a41b --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/merging/subtract.py @@ -0,0 +1,82 @@ +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.merging.base_merge import Merge + + +@keras_export("keras.layers.Subtract") +class Subtract(Merge): + """Performs elementwise subtraction. + + It takes as input a list of tensors of size 2 both of the + same shape, and returns a single tensor (inputs[0] - inputs[1]) + of same shape. + + Examples: + + >>> input_shape = (2, 3, 4) + >>> x1 = np.random.rand(*input_shape) + >>> x2 = np.random.rand(*input_shape) + >>> y = keras.layers.Subtract()([x1, x2]) + + Usage in a Keras model: + + >>> input1 = keras.layers.Input(shape=(16,)) + >>> x1 = keras.layers.Dense(8, activation='relu')(input1) + >>> input2 = keras.layers.Input(shape=(32,)) + >>> x2 = keras.layers.Dense(8, activation='relu')(input2) + >>> # equivalent to `subtracted = keras.layers.subtract([x1, x2])` + >>> subtracted = keras.layers.Subtract()([x1, x2]) + >>> out = keras.layers.Dense(4)(subtracted) + >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) + + """ + + def build(self, input_shape): + super().build(input_shape) + if len(input_shape) != 2: + raise ValueError( + "A `Subtract` layer should be called on exactly 2 inputs. " + f"Received: input_shape={input_shape}" + ) + + def _merge_function(self, inputs): + if len(inputs) != 2: + raise ValueError( + "A `Subtract` layer should be called on exactly 2 inputs. " + f"Received: inputs={inputs}" + ) + return ops.subtract(inputs[0], inputs[1]) + + +@keras_export("keras.layers.subtract") +def subtract(inputs, **kwargs): + """Functional interface to the `keras.layers.Subtract` layer. + + Args: + inputs: A list of input tensors of size 2, each tensor of + the same shape. + **kwargs: Standard layer keyword arguments. + + Returns: + A tensor as the difference of the inputs. It has the same shape + as the inputs. + + Examples: + + >>> input_shape = (2, 3, 4) + >>> x1 = np.random.rand(*input_shape) + >>> x2 = np.random.rand(*input_shape) + >>> y = keras.layers.subtract([x1, x2]) + + Usage in a Keras model: + + >>> input1 = keras.layers.Input(shape=(16,)) + >>> x1 = keras.layers.Dense(8, activation='relu')(input1) + >>> input2 = keras.layers.Input(shape=(32,)) + >>> x2 = keras.layers.Dense(8, activation='relu')(input2) + >>> subtracted = keras.layers.subtract([x1, x2]) + >>> out = keras.layers.Dense(4)(subtracted) + >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) + + """ + return Subtract(**kwargs)(inputs) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ec529c5f81abddd45fcf32d152dcf34958f57fc Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/batch_normalization.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/batch_normalization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c23acd0191fdf19fb1b6a7c4105c60573bdb0297 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/batch_normalization.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/group_normalization.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/group_normalization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..957c8072251ad441ff9de1be392ca8eda70beee6 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/group_normalization.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/layer_normalization.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/layer_normalization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88ff8040d9141b32fcb0d8b3b95ab8adb439f0c4 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/layer_normalization.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/spectral_normalization.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/spectral_normalization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a35eab4e485ad139e0de96e4aa2f8d87cedd21b0 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/spectral_normalization.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/unit_normalization.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/unit_normalization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd470fa29945bb73056b7b2c2c2cf5c758a7ad35 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/__pycache__/unit_normalization.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/batch_normalization.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/batch_normalization.py new file mode 100644 index 0000000000000000000000000000000000000000..5cd2e37527a76b36f728b7c96b1a3270d7c9d9a8 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/batch_normalization.py @@ -0,0 +1,352 @@ +from keras.src import backend +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer + + +@keras_export("keras.layers.BatchNormalization") +class BatchNormalization(Layer): + """Layer that normalizes its inputs. + + Batch normalization applies a transformation that maintains the mean output + close to 0 and the output standard deviation close to 1. + + Importantly, batch normalization works differently during training and + during inference. + + **During training** (i.e. when using `fit()` or when calling the layer/model + with the argument `training=True`), the layer normalizes its output using + the mean and standard deviation of the current batch of inputs. That is to + say, for each channel being normalized, the layer returns + `gamma * (batch - mean(batch)) / sqrt(var(batch) + epsilon) + beta`, where: + + - `epsilon` is small constant (configurable as part of the constructor + arguments) + - `gamma` is a learned scaling factor (initialized as 1), which + can be disabled by passing `scale=False` to the constructor. + - `beta` is a learned offset factor (initialized as 0), which + can be disabled by passing `center=False` to the constructor. + + **During inference** (i.e. when using `evaluate()` or `predict()` or when + calling the layer/model with the argument `training=False` (which is the + default), the layer normalizes its output using a moving average of the + mean and standard deviation of the batches it has seen during training. That + is to say, it returns + `gamma * (batch - self.moving_mean) / sqrt(self.moving_var+epsilon) + beta`. + + `self.moving_mean` and `self.moving_var` are non-trainable variables that + are updated each time the layer in called in training mode, as such: + + - `moving_mean = moving_mean * momentum + mean(batch) * (1 - momentum)` + - `moving_var = moving_var * momentum + var(batch) * (1 - momentum)` + + As such, the layer will only normalize its inputs during inference + *after having been trained on data that has similar statistics as the + inference data*. + + Args: + axis: Integer, the axis that should be normalized + (typically the features axis). For instance, after a `Conv2D` layer + with `data_format="channels_first"`, use `axis=1`. + momentum: Momentum for the moving average. + epsilon: Small float added to variance to avoid dividing by zero. + center: If `True`, add offset of `beta` to normalized tensor. + If `False`, `beta` is ignored. + scale: If `True`, multiply by `gamma`. If `False`, `gamma` is not used. + When the next layer is linear this can be disabled + since the scaling will be done by the next layer. + beta_initializer: Initializer for the beta weight. + gamma_initializer: Initializer for the gamma weight. + moving_mean_initializer: Initializer for the moving mean. + moving_variance_initializer: Initializer for the moving variance. + beta_regularizer: Optional regularizer for the beta weight. + gamma_regularizer: Optional regularizer for the gamma weight. + beta_constraint: Optional constraint for the beta weight. + gamma_constraint: Optional constraint for the gamma weight. + synchronized: Only applicable with the TensorFlow backend. + If `True`, synchronizes the global batch statistics (mean and + variance) for the layer across all devices at each training step + in a distributed training strategy. + If `False`, each replica uses its own local batch statistics. + **kwargs: Base layer keyword arguments (e.g. `name` and `dtype`). + + Call arguments: + inputs: Input tensor (of any rank). + training: Python boolean indicating whether the layer should behave in + training mode or in inference mode. + - `training=True`: The layer will normalize its inputs using + the mean and variance of the current batch of inputs. + - `training=False`: The layer will normalize its inputs using + the mean and variance of its moving statistics, learned during + training. + mask: Binary tensor of shape broadcastable to `inputs` tensor, with + `True` values indicating the positions for which mean and variance + should be computed. Masked elements of the current inputs are not + taken into account for mean and variance computation during + training. Any prior unmasked element values will be taken into + account until their momentum expires. + + Reference: + + - [Ioffe and Szegedy, 2015](https://arxiv.org/abs/1502.03167). + + **About setting `layer.trainable = False` on a `BatchNormalization` layer:** + + The meaning of setting `layer.trainable = False` is to freeze the layer, + i.e. its internal state will not change during training: + its trainable weights will not be updated + during `fit()` or `train_on_batch()`, and its state updates will not be run. + + Usually, this does not necessarily mean that the layer is run in inference + mode (which is normally controlled by the `training` argument that can + be passed when calling a layer). "Frozen state" and "inference mode" + are two separate concepts. + + However, in the case of the `BatchNormalization` layer, **setting + `trainable = False` on the layer means that the layer will be + subsequently run in inference mode** (meaning that it will use + the moving mean and the moving variance to normalize the current batch, + rather than using the mean and variance of the current batch). + + Note that: + + - Setting `trainable` on an model containing other layers will recursively + set the `trainable` value of all inner layers. + - If the value of the `trainable` attribute is changed after calling + `compile()` on a model, the new value doesn't take effect for this model + until `compile()` is called again. + """ + + def __init__( + self, + axis=-1, + momentum=0.99, + epsilon=1e-3, + center=True, + scale=True, + beta_initializer="zeros", + gamma_initializer="ones", + moving_mean_initializer="zeros", + moving_variance_initializer="ones", + beta_regularizer=None, + gamma_regularizer=None, + beta_constraint=None, + gamma_constraint=None, + synchronized=False, + **kwargs, + ): + super().__init__(**kwargs) + self.axis = int(axis) + + if synchronized and backend.backend() != "tensorflow": + raise ValueError( + "Argument synchronized=True is only supported " + "with the TensorFlow backend." + ) + self.synchronized = synchronized + + self.momentum = float(momentum) + self.epsilon = float(epsilon) + self.center = center + self.scale = scale + self.beta_initializer = initializers.get(beta_initializer) + self.gamma_initializer = initializers.get(gamma_initializer) + self.moving_mean_initializer = initializers.get(moving_mean_initializer) + self.moving_variance_initializer = initializers.get( + moving_variance_initializer + ) + self.beta_regularizer = regularizers.get(beta_regularizer) + self.gamma_regularizer = regularizers.get(gamma_regularizer) + self.beta_constraint = constraints.get(beta_constraint) + self.gamma_constraint = constraints.get(gamma_constraint) + self.supports_masking = True + + self.gamma = None + self.beta = None + self.moving_mean = None + self.moving_variance = None + self._reduction_axes = None + + def build(self, input_shape): + shape = (input_shape[self.axis],) + if self.scale: + self.gamma = self.add_weight( + shape=shape, + name="gamma", + initializer=self.gamma_initializer, + regularizer=self.gamma_regularizer, + constraint=self.gamma_constraint, + trainable=True, + autocast=False, + ) + if self.center: + self.beta = self.add_weight( + shape=shape, + name="beta", + initializer=self.beta_initializer, + regularizer=self.beta_regularizer, + constraint=self.beta_constraint, + trainable=True, + autocast=False, + ) + self.moving_mean = self.add_weight( + shape=shape, + name="moving_mean", + initializer=self.moving_mean_initializer, + trainable=False, + autocast=False, + ) + self.moving_variance = self.add_weight( + shape=shape, + name="moving_variance", + initializer=self.moving_variance_initializer, + trainable=False, + autocast=False, + ) + + self.input_spec = InputSpec( + ndim=len(input_shape), axes={self.axis: input_shape[self.axis]} + ) + + reduction_axes = list(range(len(input_shape))) + del reduction_axes[self.axis] + self._reduction_axes = reduction_axes + self.built = True + + def compute_output_shape(self, input_shape): + if isinstance(self.axis, int): + axes = [self.axis] + else: + axes = self.axis + + for axis in axes: + if axis >= len(input_shape) or axis < -len(input_shape): + raise ValueError( + f"Axis {axis} is out of bounds for " + f"input shape {input_shape}. " + f"Received: axis={self.axis}" + ) + return input_shape + + def call(self, inputs, training=None, mask=None): + # Check if the mask has one less dimension than the inputs. + if mask is not None: + if len(mask.shape) != len(inputs.shape) - 1: + # Raise a value error + raise ValueError( + "The mask provided should be one dimension less " + "than the inputs. Received: " + f"mask.shape={mask.shape}, inputs.shape={inputs.shape}" + ) + + compute_dtype = backend.result_type(inputs.dtype, "float32") + # BN is prone to overflow with float16/bfloat16 inputs, so we upcast to + # float32 for the subsequent computations. + inputs = ops.cast(inputs, compute_dtype) + + moving_mean = ops.cast(self.moving_mean, inputs.dtype) + moving_variance = ops.cast(self.moving_variance, inputs.dtype) + + if training and self.trainable: + mean, variance = self._moments(inputs, mask) + + self.moving_mean.assign( + moving_mean * self.momentum + mean * (1.0 - self.momentum) + ) + self.moving_variance.assign( + moving_variance * self.momentum + + variance * (1.0 - self.momentum) + ) + else: + mean = moving_mean + variance = moving_variance + + if self.scale: + gamma = ops.cast(self.gamma, inputs.dtype) + else: + gamma = None + + if self.center: + beta = ops.cast(self.beta, inputs.dtype) + else: + beta = None + + outputs = ops.batch_normalization( + x=inputs, + mean=mean, + variance=variance, + axis=self.axis, + offset=beta, + scale=gamma, + epsilon=self.epsilon, + ) + return ops.cast(outputs, self.compute_dtype) + + def get_config(self): + base_config = super().get_config() + config = { + "axis": self.axis, + "momentum": self.momentum, + "epsilon": self.epsilon, + "center": self.center, + "scale": self.scale, + "beta_initializer": initializers.serialize(self.beta_initializer), + "gamma_initializer": initializers.serialize(self.gamma_initializer), + "moving_mean_initializer": initializers.serialize( + self.moving_mean_initializer + ), + "moving_variance_initializer": initializers.serialize( + self.moving_variance_initializer + ), + "beta_regularizer": regularizers.serialize(self.beta_regularizer), + "gamma_regularizer": regularizers.serialize(self.gamma_regularizer), + "beta_constraint": constraints.serialize(self.beta_constraint), + "gamma_constraint": constraints.serialize(self.gamma_constraint), + "synchronized": self.synchronized, + } + return {**base_config, **config} + + def _moments(self, inputs, mask): + if mask is None: + return ops.moments( + inputs, + axes=self._reduction_axes, + synchronized=self.synchronized, + ) + + mask_weights = ops.cast( + mask, + inputs.dtype, + ) + mask_weights_broadcasted = ops.expand_dims( + mask_weights, + axis=-1, + ) + weighted_inputs = mask_weights_broadcasted * inputs + + weighted_input_sum = ops.sum( + weighted_inputs, + self._reduction_axes, + keepdims=True, + ) + sum_of_weights = ops.sum( + mask_weights_broadcasted, + self._reduction_axes, + keepdims=True, + ) + mean = weighted_input_sum / (sum_of_weights + backend.config.epsilon()) + + difference = weighted_inputs - mean + squared_difference = ops.square(difference) + weighted_distsq = ops.sum( + mask_weights_broadcasted * squared_difference, + self._reduction_axes, + keepdims=True, + ) + variance = weighted_distsq / (sum_of_weights + backend.config.epsilon()) + + return ops.squeeze(mean), ops.squeeze(variance) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/group_normalization.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/group_normalization.py new file mode 100644 index 0000000000000000000000000000000000000000..9d91d1f9944e8aad63dd1b6e6fd8e325b7f547db --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/group_normalization.py @@ -0,0 +1,240 @@ +from keras.src import backend +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer + + +@keras_export("keras.layers.GroupNormalization") +class GroupNormalization(Layer): + """Group normalization layer. + + Group Normalization divides the channels into groups and computes + within each group the mean and variance for normalization. + Empirically, its accuracy is more stable than batch norm in a wide + range of small batch sizes, if learning rate is adjusted linearly + with batch sizes. + + Relation to Layer Normalization: + If the number of groups is set to 1, then this operation becomes nearly + identical to Layer Normalization (see Layer Normalization docs for details). + + Relation to Instance Normalization: + If the number of groups is set to the input dimension (number of groups is + equal to number of channels), then this operation becomes identical to + Instance Normalization. You can achieve this via `groups=-1`. + + Args: + groups: Integer, the number of groups for Group Normalization. Can be in + the range `[1, N]` where N is the input dimension. The input + dimension must be divisible by the number of groups. + Defaults to 32. + axis: Integer or List/Tuple. The axis or axes to normalize across. + Typically, this is the features axis/axes. The left-out axes are + typically the batch axis/axes. -1 is the last dimension in the + input. Defaults to `-1`. + epsilon: Small float added to variance to avoid dividing by zero. + Defaults to 1e-3. + center: If `True`, add offset of `beta` to normalized tensor. + If `False`, `beta` is ignored. Defaults to `True`. + scale: If `True`, multiply by `gamma`. If `False`, `gamma` is not used. + When the next layer is linear (also e.g. `relu`), this can be + disabled since the scaling will be done by the next layer. + Defaults to `True`. + beta_initializer: Initializer for the beta weight. Defaults to zeros. + gamma_initializer: Initializer for the gamma weight. Defaults to ones. + beta_regularizer: Optional regularizer for the beta weight. None by + default. + gamma_regularizer: Optional regularizer for the gamma weight. None by + default. + beta_constraint: Optional constraint for the beta weight. + None by default. + gamma_constraint: Optional constraint for the gamma weight. None by + default. Input shape: Arbitrary. Use the keyword argument + `input_shape` (tuple of integers, does not include the samples + axis) when using this layer as the first layer in a model. + Output shape: Same shape as input. + **kwargs: Base layer keyword arguments (e.g. `name` and `dtype`). + + Reference: + + - [Yuxin Wu & Kaiming He, 2018](https://arxiv.org/abs/1803.08494) + """ + + def __init__( + self, + groups=32, + axis=-1, + epsilon=1e-3, + center=True, + scale=True, + beta_initializer="zeros", + gamma_initializer="ones", + beta_regularizer=None, + gamma_regularizer=None, + beta_constraint=None, + gamma_constraint=None, + **kwargs, + ): + super().__init__(**kwargs) + self.supports_masking = True + self.groups = groups + self.axis = axis + self.epsilon = epsilon + self.center = center + self.scale = scale + self.beta_initializer = initializers.get(beta_initializer) + self.gamma_initializer = initializers.get(gamma_initializer) + self.beta_regularizer = regularizers.get(beta_regularizer) + self.gamma_regularizer = regularizers.get(gamma_regularizer) + self.beta_constraint = constraints.get(beta_constraint) + self.gamma_constraint = constraints.get(gamma_constraint) + + def build(self, input_shape): + dim = input_shape[self.axis] + + if dim is None: + raise ValueError( + f"Axis {self.axis} of input tensor should have a defined " + "dimension but the layer received an input with shape " + f"{input_shape}." + ) + + if self.groups == -1: + self.groups = dim + + if dim < self.groups: + raise ValueError( + f"Number of groups ({self.groups}) cannot be more than the " + f"number of channels ({dim})." + ) + + if dim % self.groups != 0: + raise ValueError( + f"Number of groups ({self.groups}) must be a multiple " + f"of the number of channels ({dim})." + ) + + self.input_spec = InputSpec( + ndim=len(input_shape), axes={self.axis: dim} + ) + + if self.scale: + self.gamma = self.add_weight( + shape=(dim,), + name="gamma", + initializer=self.gamma_initializer, + regularizer=self.gamma_regularizer, + constraint=self.gamma_constraint, + ) + else: + self.gamma = None + + if self.center: + self.beta = self.add_weight( + shape=(dim,), + name="beta", + initializer=self.beta_initializer, + regularizer=self.beta_regularizer, + constraint=self.beta_constraint, + ) + else: + self.beta = None + + super().build(input_shape) + + def call(self, inputs): + reshaped_inputs = self._reshape_into_groups(inputs) + normalized_inputs = self._apply_normalization( + reshaped_inputs, inputs.shape + ) + return ops.reshape(normalized_inputs, ops.shape(inputs)) + + def _reshape_into_groups(self, inputs): + input_shape = ops.shape(inputs) + group_shape = list(inputs.shape) + group_shape[0] = -1 + for i, e in enumerate(group_shape[1:]): + if e is None: + group_shape[i + 1] = input_shape[i + 1] + + group_shape[self.axis] = input_shape[self.axis] // self.groups + group_shape.insert(self.axis, self.groups) + reshaped_inputs = ops.reshape(inputs, group_shape) + return reshaped_inputs + + def _apply_normalization(self, reshaped_inputs, input_shape): + inputs_dtype = reshaped_inputs.dtype + compute_dtype = backend.result_type(inputs_dtype, "float32") + # GN is prone to overflow with float16/bfloat16 inputs, so we upcast to + # float32 for the subsequent computations. + reshaped_inputs = ops.cast(reshaped_inputs, compute_dtype) + + group_reduction_axes = list(range(1, len(reshaped_inputs.shape))) + + axis = -2 if self.axis == -1 else self.axis - 1 + group_reduction_axes.pop(axis) + + broadcast_shape = self._create_broadcast_shape(input_shape) + mean, variance = ops.moments( + reshaped_inputs, axes=group_reduction_axes, keepdims=True + ) + + # Compute the batch normalization. + inv = ops.rsqrt(variance + self.epsilon) + if self.scale: + gamma = ops.reshape(self.gamma, broadcast_shape) + gamma = ops.cast(gamma, reshaped_inputs.dtype) + inv = inv * gamma + + res = -mean * inv + if self.center: + beta = ops.reshape(self.beta, broadcast_shape) + beta = ops.cast(beta, reshaped_inputs.dtype) + res = res + beta + + normalized_inputs = reshaped_inputs * inv + res + normalized_inputs = ops.cast(normalized_inputs, inputs_dtype) + + return normalized_inputs + + def _create_broadcast_shape(self, input_shape): + broadcast_shape = [1] * len(input_shape) + broadcast_shape[self.axis] = input_shape[self.axis] // self.groups + broadcast_shape.insert(self.axis, self.groups) + return broadcast_shape + + def compute_output_shape(self, input_shape): + if isinstance(self.axis, int): + axes = [self.axis] + else: + axes = self.axis + + for axis in axes: + if axis >= len(input_shape) or axis < -len(input_shape): + raise ValueError( + f"Axis {axis} is out of bounds for " + f"input shape {input_shape}. " + f"Received: axis={self.axis}" + ) + return input_shape + + def get_config(self): + config = { + "groups": self.groups, + "axis": self.axis, + "epsilon": self.epsilon, + "center": self.center, + "scale": self.scale, + "beta_initializer": initializers.serialize(self.beta_initializer), + "gamma_initializer": initializers.serialize(self.gamma_initializer), + "beta_regularizer": regularizers.serialize(self.beta_regularizer), + "gamma_regularizer": regularizers.serialize(self.gamma_regularizer), + "beta_constraint": constraints.serialize(self.beta_constraint), + "gamma_constraint": constraints.serialize(self.gamma_constraint), + } + base_config = super().get_config() + return {**base_config, **config} diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/layer_normalization.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/layer_normalization.py new file mode 100644 index 0000000000000000000000000000000000000000..52301bfe2c9aa3cea14e3fcbdfcb532fcfe33702 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/layer_normalization.py @@ -0,0 +1,265 @@ +from keras.src import backend +from keras.src import constraints +from keras.src import initializers +from keras.src import ops +from keras.src import regularizers +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer + + +@keras_export("keras.layers.LayerNormalization") +class LayerNormalization(Layer): + """Layer normalization layer (Ba et al., 2016). + + Normalize the activations of the previous layer for each given example in a + batch independently, rather than across a batch like Batch Normalization. + i.e. applies a transformation that maintains the mean activation within each + example close to 0 and the activation standard deviation close to 1. + + If `scale` or `center` are enabled, the layer will scale the normalized + outputs by broadcasting them with a trainable variable `gamma`, and center + the outputs by broadcasting with a trainable variable `beta`. `gamma` will + default to a ones tensor and `beta` will default to a zeros tensor, so that + centering and scaling are no-ops before training has begun. + + So, with scaling and centering enabled the normalization equations + are as follows: + + Let the intermediate activations for a mini-batch to be the `inputs`. + + For each sample `x_i` in `inputs` with `k` features, we compute the mean and + variance of the sample: + + ```python + mean_i = sum(x_i[j] for j in range(k)) / k + var_i = sum((x_i[j] - mean_i) ** 2 for j in range(k)) / k + ``` + + and then compute a normalized `x_i_normalized`, including a small factor + `epsilon` for numerical stability. + + ```python + x_i_normalized = (x_i - mean_i) / sqrt(var_i + epsilon) + ``` + + And finally `x_i_normalized ` is linearly transformed by `gamma` and `beta`, + which are learned parameters: + + ```python + output_i = x_i_normalized * gamma + beta + ``` + + `gamma` and `beta` will span the axes of `inputs` specified in `axis`, and + this part of the inputs' shape must be fully defined. + + For example: + + >>> layer = keras.layers.LayerNormalization(axis=[1, 2, 3]) + >>> layer.build([5, 20, 30, 40]) + >>> print(layer.beta.shape) + (20, 30, 40) + >>> print(layer.gamma.shape) + (20, 30, 40) + + Note that other implementations of layer normalization may choose to define + `gamma` and `beta` over a separate set of axes from the axes being + normalized across. For example, Group Normalization + ([Wu et al. 2018](https://arxiv.org/abs/1803.08494)) with group size of 1 + corresponds to a Layer Normalization that normalizes across height, width, + and channel and has `gamma` and `beta` span only the channel dimension. + So, this Layer Normalization implementation will not match a Group + Normalization layer with group size set to 1. + + Args: + axis: Integer or List/Tuple. The axis or axes to normalize across. + Typically, this is the features axis/axes. The left-out axes are + typically the batch axis/axes. `-1` is the last dimension in the + input. Defaults to `-1`. + epsilon: Small float added to variance to avoid dividing by zero. + Defaults to 1e-3. + center: If True, add offset of `beta` to normalized tensor. If False, + `beta` is ignored. Defaults to `True`. + scale: If True, multiply by `gamma`. If False, `gamma` is not used. + When the next layer is linear (also e.g. `nn.relu`), this can be + disabled since the scaling will be done by the next layer. + Defaults to `True`. + rms_scaling: If True, `center` and `scale` are ignored, and the + inputs are scaled by `gamma` and the inverse square root + of the square of all inputs. This is an approximate and faster + approach that avoids ever computing the mean of the input. + beta_initializer: Initializer for the beta weight. Defaults to zeros. + gamma_initializer: Initializer for the gamma weight. Defaults to ones. + beta_regularizer: Optional regularizer for the beta weight. + None by default. + gamma_regularizer: Optional regularizer for the gamma weight. + None by default. + beta_constraint: Optional constraint for the beta weight. + None by default. + gamma_constraint: Optional constraint for the gamma weight. + None by default. + **kwargs: Base layer keyword arguments (e.g. `name` and `dtype`). + + + Reference: + + - [Lei Ba et al., 2016](https://arxiv.org/abs/1607.06450). + """ + + def __init__( + self, + axis=-1, + epsilon=1e-3, + center=True, + scale=True, + rms_scaling=False, + beta_initializer="zeros", + gamma_initializer="ones", + beta_regularizer=None, + gamma_regularizer=None, + beta_constraint=None, + gamma_constraint=None, + **kwargs, + ): + super().__init__(**kwargs) + if isinstance(axis, (list, tuple)): + self.axis = list(axis) + elif isinstance(axis, int): + self.axis = axis + else: + raise TypeError( + "Expected an int or a list/tuple of ints for the " + "argument 'axis', but received: %r" % axis + ) + + self.epsilon = epsilon + self.center = center + self.scale = scale + self.rms_scaling = rms_scaling + self.beta_initializer = initializers.get(beta_initializer) + self.gamma_initializer = initializers.get(gamma_initializer) + self.beta_regularizer = regularizers.get(beta_regularizer) + self.gamma_regularizer = regularizers.get(gamma_regularizer) + self.beta_constraint = constraints.get(beta_constraint) + self.gamma_constraint = constraints.get(gamma_constraint) + + self.supports_masking = True + self.autocast = False + + def build(self, input_shape): + if isinstance(self.axis, list): + shape = tuple([input_shape[dim] for dim in self.axis]) + else: + shape = (input_shape[self.axis],) + self.axis = [self.axis] + if self.scale or self.rms_scaling: + self.gamma = self.add_weight( + name="gamma", + shape=shape, + initializer=self.gamma_initializer, + regularizer=self.gamma_regularizer, + constraint=self.gamma_constraint, + trainable=True, + autocast=False, + ) + else: + self.gamma = None + + if self.center and not self.rms_scaling: + self.beta = self.add_weight( + name="beta", + shape=shape, + initializer=self.beta_initializer, + regularizer=self.beta_regularizer, + constraint=self.beta_constraint, + trainable=True, + autocast=False, + ) + else: + self.beta = None + + self.built = True + + def call(self, inputs): + # Compute the axes along which to reduce the mean / variance + input_shape = inputs.shape + ndims = len(input_shape) + + # Broadcasting only necessary for norm when the axis is not just + # the last dimension + broadcast_shape = [1] * ndims + for dim in self.axis: + broadcast_shape[dim] = input_shape[dim] + + def _broadcast(v): + if ( + v is not None + and len(v.shape) != ndims + and self.axis != [ndims - 1] + ): + return ops.reshape(v, broadcast_shape) + return v + + compute_dtype = backend.result_type(inputs.dtype, "float32") + # LN is prone to overflow with float16/bfloat16 inputs, so we upcast to + # float32 for the subsequent computations. + inputs = ops.cast(inputs, compute_dtype) + + if self.rms_scaling: + # Calculate outputs with only variance and gamma if rms scaling + # is enabled + # Calculate the variance along self.axis (layer activations). + variance = ops.var(inputs, axis=self.axis, keepdims=True) + inv = ops.rsqrt(variance + self.epsilon) + + outputs = ( + inputs * inv * ops.cast(_broadcast(self.gamma), inputs.dtype) + ) + else: + # Calculate the mean & variance along self.axis (layer activations). + mean, variance = ops.moments(inputs, axes=self.axis, keepdims=True) + gamma, beta = _broadcast(self.gamma), _broadcast(self.beta) + + inv = ops.rsqrt(variance + self.epsilon) + if gamma is not None: + gamma = ops.cast(gamma, inputs.dtype) + inv = inv * gamma + + res = -mean * inv + if beta is not None: + beta = ops.cast(beta, inputs.dtype) + res = res + beta + + outputs = inputs * inv + res + return ops.cast(outputs, self.compute_dtype) + + def compute_output_shape(self, input_shape): + if isinstance(self.axis, int): + axes = [self.axis] + else: + axes = self.axis + + for axis in axes: + if axis >= len(input_shape) or axis < -len(input_shape): + raise ValueError( + f"Axis {axis} is out of bounds for " + f"input shape {input_shape}. " + f"Received: axis={self.axis}" + ) + return input_shape + + def get_config(self): + config = { + "axis": self.axis, + "epsilon": self.epsilon, + "center": self.center, + "scale": self.scale, + "rms_scaling": self.rms_scaling, + "beta_initializer": initializers.serialize(self.beta_initializer), + "gamma_initializer": initializers.serialize(self.gamma_initializer), + "beta_regularizer": regularizers.serialize(self.beta_regularizer), + "gamma_regularizer": regularizers.serialize(self.gamma_regularizer), + "beta_constraint": constraints.serialize(self.beta_constraint), + "gamma_constraint": constraints.serialize(self.gamma_constraint), + } + base_config = super().get_config() + return {**base_config, **config} diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/spectral_normalization.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/spectral_normalization.py new file mode 100644 index 0000000000000000000000000000000000000000..fc11844fc929c75beba85114735c48f08775b318 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/spectral_normalization.py @@ -0,0 +1,121 @@ +from keras.src import initializers +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers import Wrapper +from keras.src.layers.input_spec import InputSpec +from keras.src.utils.numerical_utils import normalize + + +@keras_export("keras.layers.SpectralNormalization") +class SpectralNormalization(Wrapper): + """Performs spectral normalization on the weights of a target layer. + + This wrapper controls the Lipschitz constant of the weights of a layer by + constraining their spectral norm, which can stabilize the training of GANs. + + Args: + layer: A `keras.layers.Layer` instance that + has either a `kernel` (e.g. `Conv2D`, `Dense`...) + or an `embeddings` attribute (`Embedding` layer). + power_iterations: int, the number of iterations during normalization. + **kwargs: Base wrapper keyword arguments. + + Examples: + + Wrap `keras.layers.Conv2D`: + >>> x = np.random.rand(1, 10, 10, 1) + >>> conv2d = SpectralNormalization(keras.layers.Conv2D(2, 2)) + >>> y = conv2d(x) + >>> y.shape + (1, 9, 9, 2) + + Wrap `keras.layers.Dense`: + >>> x = np.random.rand(1, 10, 10, 1) + >>> dense = SpectralNormalization(keras.layers.Dense(10)) + >>> y = dense(x) + >>> y.shape + (1, 10, 10, 10) + + Reference: + + - [Spectral Normalization for GAN](https://arxiv.org/abs/1802.05957). + """ + + def __init__(self, layer, power_iterations=1, **kwargs): + super().__init__(layer, **kwargs) + if power_iterations <= 0: + raise ValueError( + "`power_iterations` should be greater than zero. Received: " + f"`power_iterations={power_iterations}`" + ) + self.power_iterations = power_iterations + + def build(self, input_shape): + super().build(input_shape) + self.input_spec = InputSpec(shape=[None] + list(input_shape[1:])) + + if hasattr(self.layer, "kernel"): + self.kernel = self.layer.kernel + elif hasattr(self.layer, "embeddings"): + self.kernel = self.layer.embeddings + else: + raise ValueError( + f"{type(self.layer).__name__} object has no attribute 'kernel' " + "nor 'embeddings'" + ) + + self.kernel_shape = self.kernel.shape + + self.vector_u = self.add_weight( + shape=(1, self.kernel_shape[-1]), + initializer=initializers.TruncatedNormal(stddev=0.02), + trainable=False, + name="vector_u", + dtype=self.kernel.dtype, + ) + + def call(self, inputs, training=False): + if training: + new_vector_u, new_kernel = ops.cond( + ops.all(ops.equal(self.kernel.value, 0)), + lambda: (self.vector_u.value, self.kernel.value), + self.normalized_weights, + ) + self.vector_u.assign(new_vector_u) + self.kernel.assign(new_kernel) + + output = self.layer(inputs) + return ops.cast(output, inputs.dtype) + + def compute_output_shape(self, input_shape): + return self.layer.compute_output_shape(input_shape) + + def normalized_weights(self): + """Generate spectral normalized weights. + + This method returns the updated value for `self.kernel` with the + spectral normalized value, so that the layer is ready for `call()`. + """ + + weights = ops.reshape(self.kernel, [-1, self.kernel_shape[-1]]) + vector_u = self.vector_u.value + + for _ in range(self.power_iterations): + vector_v = normalize( + ops.matmul(vector_u, ops.transpose(weights)), axis=None + ) + vector_u = normalize(ops.matmul(vector_v, weights), axis=None) + vector_u = ops.stop_gradient(vector_u) + vector_v = ops.stop_gradient(vector_v) + sigma = ops.matmul( + ops.matmul(vector_v, weights), ops.transpose(vector_u) + ) + kernel = ops.reshape(ops.divide(self.kernel, sigma), self.kernel_shape) + return ops.cast(vector_u, self.vector_u.dtype), ops.cast( + kernel, self.kernel.dtype + ) + + def get_config(self): + config = {"power_iterations": self.power_iterations} + base_config = super().get_config() + return {**base_config, **config} diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/unit_normalization.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/unit_normalization.py new file mode 100644 index 0000000000000000000000000000000000000000..be77aa59c30dec9a0d6436d298d70a2fa6e40e94 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/normalization/unit_normalization.py @@ -0,0 +1,63 @@ +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer + + +@keras_export("keras.layers.UnitNormalization") +class UnitNormalization(Layer): + """Unit normalization layer. + + Normalize a batch of inputs so that each input in the batch has a L2 norm + equal to 1 (across the axes specified in `axis`). + + Example: + + >>> data = np.arange(6).reshape(2, 3) + >>> normalized_data = keras.layers.UnitNormalization()(data) + >>> np.sum(normalized_data[0, :] ** 2) + 1.0 + + Args: + axis: Integer or list/tuple. The axis or axes to normalize across. + Typically, this is the features axis or axes. The left-out axes are + typically the batch axis or axes. `-1` is the last dimension + in the input. Defaults to `-1`. + """ + + def __init__(self, axis=-1, **kwargs): + super().__init__(**kwargs) + if isinstance(axis, (list, tuple)): + self.axis = list(axis) + elif isinstance(axis, int): + self.axis = axis + else: + raise TypeError( + "Invalid value for `axis` argument: " + "expected an int or a list/tuple of ints. " + f"Received: axis={axis}" + ) + self.supports_masking = True + self.built = True + + def call(self, inputs): + return ops.normalize(inputs, axis=self.axis, order=2, epsilon=1e-12) + + def compute_output_shape(self, input_shape): + # Ensure axis is always treated as a list + if isinstance(self.axis, int): + axes = [self.axis] + else: + axes = self.axis + + for axis in axes: + if axis >= len(input_shape) or axis < -len(input_shape): + raise ValueError( + f"Axis {self.axis} is out of bounds for " + f"input shape {input_shape}." + ) + return input_shape + + def get_config(self): + config = super().get_config() + config.update({"axis": self.axis}) + return config diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61f1866f7996221f0080c21278d099feedb6dc9d Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/average_pooling1d.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/average_pooling1d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5649a3ab99c20ef7299302cf877ab4140d8cbfd4 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/average_pooling1d.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/average_pooling2d.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/average_pooling2d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae9371e74d6f8cbd16ff510d1214bf72fabe3014 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/average_pooling2d.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/average_pooling3d.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/average_pooling3d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ed07008cfbe896e9189eb0e99b6c11f85c2da25 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/average_pooling3d.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/base_global_pooling.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/base_global_pooling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ec9f0b239cfed994568f8674b9cf94e8beb3572 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/base_global_pooling.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/base_pooling.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/base_pooling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e41bd7fe92563ee6b00fcc1c28b08397b00277c Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/base_pooling.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_average_pooling1d.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_average_pooling1d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4dce9e8104d594812ebfc3edbda0ba0cf9f37d6f Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_average_pooling1d.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_average_pooling2d.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_average_pooling2d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b07f03fa3414a55bdf1a35ceb5eed81f6c47276 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_average_pooling2d.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_average_pooling3d.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_average_pooling3d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23aa6ac424b4c11eb978cef75ecb00802a2f9f56 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_average_pooling3d.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_max_pooling1d.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_max_pooling1d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5f22af1909e1cbb20b7ac6894bc3b6cf7ff6394 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_max_pooling1d.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_max_pooling2d.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_max_pooling2d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac5b2046204ad6901848cf452adcfa031c424e19 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_max_pooling2d.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_max_pooling3d.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_max_pooling3d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0097387b06dc74ffda006d812fd62c3dacfa83bd Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/global_max_pooling3d.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/max_pooling1d.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/max_pooling1d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ab0441b934c6eb59611ce55cbe851463e102e7e Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/max_pooling1d.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/max_pooling2d.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/max_pooling2d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efa7f30903142dec7219c4538ffb95b00101ed88 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/max_pooling2d.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/max_pooling3d.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/max_pooling3d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08d4a29cee75687e7316e8def7301ba963dc5fd0 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/__pycache__/max_pooling3d.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/average_pooling1d.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/average_pooling1d.py new file mode 100644 index 0000000000000000000000000000000000000000..0450149c0473994a3a5db09817d71a50664d8656 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/average_pooling1d.py @@ -0,0 +1,92 @@ +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_pooling import BasePooling + + +@keras_export(["keras.layers.AveragePooling1D", "keras.layers.AvgPool1D"]) +class AveragePooling1D(BasePooling): + """Average pooling for temporal data. + + Downsamples the input representation by taking the average value over the + window defined by `pool_size`. The window is shifted by `strides`. The + resulting output when using "valid" padding option has a shape of: + `output_shape = (input_shape - pool_size + 1) / strides)` + + The resulting output shape when using the "same" padding option is: + `output_shape = input_shape / strides` + + Args: + pool_size: int, size of the max pooling window. + strides: int or None. Specifies how much the pooling window moves + for each pooling step. If None, it will default to `pool_size`. + padding: string, either `"valid"` or `"same"` (case-insensitive). + `"valid"` means no padding. `"same"` results in padding evenly to + the left/right or up/down of the input such that output has the same + height/width dimension as the input. + data_format: string, either `"channels_last"` or `"channels_first"`. + The ordering of the dimensions in the inputs. `"channels_last"` + corresponds to inputs with shape `(batch, steps, features)` + while `"channels_first"` corresponds to inputs with shape + `(batch, features, steps)`. It defaults to the `image_data_format` + value found in your Keras config file at `~/.keras/keras.json`. + If you never set it, then it will be `"channels_last"`. + + Input shape: + + - If `data_format="channels_last"`: + 3D tensor with shape `(batch_size, steps, features)`. + - If `data_format="channels_first"`: + 3D tensor with shape `(batch_size, features, steps)`. + + Output shape: + + - If `data_format="channels_last"`: + 3D tensor with shape `(batch_size, downsampled_steps, features)`. + - If `data_format="channels_first"`: + 3D tensor with shape `(batch_size, features, downsampled_steps)`. + + Examples: + + `strides=1` and `padding="valid"`: + + >>> x = np.array([1., 2., 3., 4., 5.]) + >>> x = np.reshape(x, [1, 5, 1]) + >>> avg_pool_1d = keras.layers.AveragePooling1D(pool_size=2, + ... strides=1, padding="valid") + >>> avg_pool_1d(x) + + `strides=2` and `padding="valid"`: + + >>> x = np.array([1., 2., 3., 4., 5.]) + >>> x = np.reshape(x, [1, 5, 1]) + >>> avg_pool_1d = keras.layers.AveragePooling1D(pool_size=2, + ... strides=2, padding="valid") + >>> avg_pool_1d(x) + + `strides=1` and `padding="same"`: + + >>> x = np.array([1., 2., 3., 4., 5.]) + >>> x = np.reshape(x, [1, 5, 1]) + >>> avg_pool_1d = keras.layers.AveragePooling1D(pool_size=2, + ... strides=1, padding="same") + >>> avg_pool_1d(x) + """ + + def __init__( + self, + pool_size, + strides=None, + padding="valid", + data_format=None, + name=None, + **kwargs, + ): + super().__init__( + pool_size, + strides, + pool_dimensions=1, + pool_mode="average", + padding=padding, + data_format=data_format, + name=name, + **kwargs, + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/average_pooling2d.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/average_pooling2d.py new file mode 100644 index 0000000000000000000000000000000000000000..005a0cb9b7305e7c66e44e1586a4354a25e70b90 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/average_pooling2d.py @@ -0,0 +1,109 @@ +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_pooling import BasePooling + + +@keras_export(["keras.layers.AveragePooling2D", "keras.layers.AvgPool2D"]) +class AveragePooling2D(BasePooling): + """Average pooling operation for 2D spatial data. + + Downsamples the input along its spatial dimensions (height and width) + by taking the average value over an input window + (of size defined by `pool_size`) for each channel of the input. + The window is shifted by `strides` along each dimension. + + The resulting output when using the `"valid"` padding option has a spatial + shape (number of rows or columns) of: + `output_shape = math.floor((input_shape - pool_size) / strides) + 1` + (when `input_shape >= pool_size`) + + The resulting output shape when using the `"same"` padding option is: + `output_shape = math.floor((input_shape - 1) / strides) + 1` + + Args: + pool_size: int or tuple of 2 integers, factors by which to downscale + (dim1, dim2). If only one integer is specified, the same + window length will be used for all dimensions. + strides: int or tuple of 2 integers, or None. Strides values. If None, + it will default to `pool_size`. If only one int is specified, the + same stride size will be used for all dimensions. + padding: string, either `"valid"` or `"same"` (case-insensitive). + `"valid"` means no padding. `"same"` results in padding evenly to + the left/right or up/down of the input such that output has the same + height/width dimension as the input. + data_format: string, either `"channels_last"` or `"channels_first"`. + The ordering of the dimensions in the inputs. `"channels_last"` + corresponds to inputs with shape `(batch, height, width, channels)` + while `"channels_first"` corresponds to inputs with shape + `(batch, channels, height, width)`. It defaults to the + `image_data_format` value found in your Keras config file at + `~/.keras/keras.json`. If you never set it, then it will be + `"channels_last"`. + + Input shape: + + - If `data_format="channels_last"`: + 4D tensor with shape `(batch_size, height, width, channels)`. + - If `data_format="channels_first"`: + 4D tensor with shape `(batch_size, channels, height, width)`. + + Output shape: + + - If `data_format="channels_last"`: + 4D tensor with shape + `(batch_size, pooled_height, pooled_width, channels)`. + - If `data_format="channels_first"`: + 4D tensor with shape + `(batch_size, channels, pooled_height, pooled_width)`. + + Examples: + + `strides=(1, 1)` and `padding="valid"`: + + >>> x = np.array([[1., 2., 3.], + ... [4., 5., 6.], + ... [7., 8., 9.]]) + >>> x = np.reshape(x, [1, 3, 3, 1]) + >>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2), + ... strides=(1, 1), padding="valid") + >>> avg_pool_2d(x) + + `strides=(2, 2)` and `padding="valid"`: + + >>> x = np.array([[1., 2., 3., 4.], + ... [5., 6., 7., 8.], + ... [9., 10., 11., 12.]]) + >>> x = np.reshape(x, [1, 3, 4, 1]) + >>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2), + ... strides=(2, 2), padding="valid") + >>> avg_pool_2d(x) + + `stride=(1, 1)` and `padding="same"`: + + >>> x = np.array([[1., 2., 3.], + ... [4., 5., 6.], + ... [7., 8., 9.]]) + >>> x = np.reshape(x, [1, 3, 3, 1]) + >>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2), + ... strides=(1, 1), padding="same") + >>> avg_pool_2d(x) + """ + + def __init__( + self, + pool_size, + strides=None, + padding="valid", + data_format=None, + name=None, + **kwargs, + ): + super().__init__( + pool_size, + strides, + pool_dimensions=2, + pool_mode="average", + padding=padding, + data_format=data_format, + name=name, + **kwargs, + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/average_pooling3d.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/average_pooling3d.py new file mode 100644 index 0000000000000000000000000000000000000000..2e5c7448d332cd8e737fac98855b3d9d61f1d448 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/average_pooling3d.py @@ -0,0 +1,85 @@ +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_pooling import BasePooling + + +@keras_export(["keras.layers.AveragePooling3D", "keras.layers.AvgPool3D"]) +class AveragePooling3D(BasePooling): + """Average pooling operation for 3D data (spatial or spatio-temporal). + + Downsamples the input along its spatial dimensions (depth, height, and + width) by taking the average value over an input window (of size defined by + `pool_size`) for each channel of the input. The window is shifted by + `strides` along each dimension. + + Args: + pool_size: int or tuple of 3 integers, factors by which to downscale + (dim1, dim2, dim3). If only one integer is specified, the same + window length will be used for all dimensions. + strides: int or tuple of 3 integers, or None. Strides values. If None, + it will default to `pool_size`. If only one int is specified, the + same stride size will be used for all dimensions. + padding: string, either `"valid"` or `"same"` (case-insensitive). + `"valid"` means no padding. `"same"` results in padding evenly to + the left/right or up/down of the input such that output has the same + height/width dimension as the input. + data_format: string, either `"channels_last"` or `"channels_first"`. + The ordering of the dimensions in the inputs. `"channels_last"` + corresponds to inputs with shape + `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while + `"channels_first"` corresponds to inputs with shape + `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. + It defaults to the `image_data_format` value found in your Keras + config file at `~/.keras/keras.json`. If you never set it, then it + will be `"channels_last"`. + + Input shape: + + - If `data_format="channels_last"`: + 5D tensor with shape: + `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` + - If `data_format="channels_first"`: + 5D tensor with shape: + `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)` + + Output shape: + + - If `data_format="channels_last"`: + 5D tensor with shape: + `(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)` + - If `data_format="channels_first"`: + 5D tensor with shape: + `(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)` + + Example: + + ```python + depth = 30 + height = 30 + width = 30 + channels = 3 + + inputs = keras.layers.Input(shape=(depth, height, width, channels)) + layer = keras.layers.AveragePooling3D(pool_size=3) + outputs = layer(inputs) # Shape: (batch_size, 10, 10, 10, 3) + ``` + """ + + def __init__( + self, + pool_size, + strides=None, + padding="valid", + data_format=None, + name=None, + **kwargs, + ): + super().__init__( + pool_size, + strides, + pool_dimensions=3, + pool_mode="average", + padding=padding, + data_format=data_format, + name=name, + **kwargs, + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/base_global_pooling.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/base_global_pooling.py new file mode 100644 index 0000000000000000000000000000000000000000..e04ab0e626ab3b75abe978ec7e4c39538f885578 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/base_global_pooling.py @@ -0,0 +1,49 @@ +from keras.src import backend +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer + + +class BaseGlobalPooling(Layer): + """Base global pooling layer.""" + + def __init__( + self, pool_dimensions, data_format=None, keepdims=False, **kwargs + ): + super().__init__(**kwargs) + + self.data_format = backend.standardize_data_format(data_format) + self.keepdims = keepdims + self.input_spec = InputSpec(ndim=pool_dimensions + 2) + self.built = True + + def call(self, inputs): + raise NotImplementedError + + def compute_output_shape(self, input_shape): + num_spatial_dims = len(input_shape) - 2 + if self.data_format == "channels_last": + if self.keepdims: + return ( + (input_shape[0],) + + (1,) * num_spatial_dims + + (input_shape[-1],) + ) + else: + return (input_shape[0],) + (input_shape[-1],) + else: + if self.keepdims: + return (input_shape[0], input_shape[1]) + ( + 1, + ) * num_spatial_dims + else: + return (input_shape[0], input_shape[1]) + + def get_config(self): + config = super().get_config() + config.update( + { + "data_format": self.data_format, + "keepdims": self.keepdims, + } + ) + return config diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/base_pooling.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/base_pooling.py new file mode 100644 index 0000000000000000000000000000000000000000..79f571aed36b358930d86fb89869d2f81f403b11 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/base_pooling.py @@ -0,0 +1,81 @@ +from keras.src import backend +from keras.src import ops +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.ops.operation_utils import compute_pooling_output_shape +from keras.src.utils import argument_validation + + +class BasePooling(Layer): + """Base pooling layer.""" + + def __init__( + self, + pool_size, + strides, + pool_dimensions, + pool_mode="max", + padding="valid", + data_format=None, + name=None, + **kwargs, + ): + super().__init__(name=name, **kwargs) + + self.pool_size = argument_validation.standardize_tuple( + pool_size, pool_dimensions, "pool_size" + ) + strides = pool_size if strides is None else strides + self.strides = argument_validation.standardize_tuple( + strides, pool_dimensions, "strides", allow_zero=True + ) + self.pool_mode = pool_mode + self.padding = padding + self.data_format = backend.standardize_data_format(data_format) + + self.input_spec = InputSpec(ndim=pool_dimensions + 2) + self.built = True + + def call(self, inputs): + if self.pool_mode == "max": + return ops.max_pool( + inputs, + pool_size=self.pool_size, + strides=self.strides, + padding=self.padding, + data_format=self.data_format, + ) + elif self.pool_mode == "average": + return ops.average_pool( + inputs, + pool_size=self.pool_size, + strides=self.strides, + padding=self.padding, + data_format=self.data_format, + ) + else: + raise ValueError( + "`pool_mode` must be either 'max' or 'average'. Received: " + f"{self.pool_mode}." + ) + + def compute_output_shape(self, input_shape): + return compute_pooling_output_shape( + input_shape, + self.pool_size, + self.strides, + self.padding, + self.data_format, + ) + + def get_config(self): + config = super().get_config() + config.update( + { + "pool_size": self.pool_size, + "padding": self.padding, + "strides": self.strides, + "data_format": self.data_format, + } + ) + return config diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_average_pooling1d.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_average_pooling1d.py new file mode 100644 index 0000000000000000000000000000000000000000..6db5fb923c8c942b1bd260cbd3fe97dff54e3821 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_average_pooling1d.py @@ -0,0 +1,86 @@ +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling + + +@keras_export( + [ + "keras.layers.GlobalAveragePooling1D", + "keras.layers.GlobalAvgPool1D", + ] +) +class GlobalAveragePooling1D(BaseGlobalPooling): + """Global average pooling operation for temporal data. + + Args: + data_format: string, either `"channels_last"` or `"channels_first"`. + The ordering of the dimensions in the inputs. `"channels_last"` + corresponds to inputs with shape `(batch, steps, features)` + while `"channels_first"` corresponds to inputs with shape + `(batch, features, steps)`. It defaults to the `image_data_format` + value found in your Keras config file at `~/.keras/keras.json`. + If you never set it, then it will be `"channels_last"`. + keepdims: A boolean, whether to keep the temporal dimension or not. + If `keepdims` is `False` (default), the rank of the tensor is + reduced for spatial dimensions. If `keepdims` is `True`, the + temporal dimension are retained with length 1. + The behavior is the same as for `tf.reduce_mean` or `np.mean`. + + Call arguments: + inputs: A 3D tensor. + mask: Binary tensor of shape `(batch_size, steps)` indicating whether + a given step should be masked (excluded from the average). + + Input shape: + + - If `data_format='channels_last'`: + 3D tensor with shape: + `(batch_size, steps, features)` + - If `data_format='channels_first'`: + 3D tensor with shape: + `(batch_size, features, steps)` + + Output shape: + + - If `keepdims=False`: + 2D tensor with shape `(batch_size, features)`. + - If `keepdims=True`: + - If `data_format="channels_last"`: + 3D tensor with shape `(batch_size, 1, features)` + - If `data_format="channels_first"`: + 3D tensor with shape `(batch_size, features, 1)` + + Example: + + >>> x = np.random.rand(2, 3, 4) + >>> y = keras.layers.GlobalAveragePooling1D()(x) + >>> y.shape + (2, 4) + """ + + def __init__(self, data_format=None, keepdims=False, **kwargs): + super().__init__( + pool_dimensions=1, + data_format=data_format, + keepdims=keepdims, + **kwargs, + ) + self.supports_masking = True + + def call(self, inputs, mask=None): + steps_axis = 1 if self.data_format == "channels_last" else 2 + if mask is not None: + mask = backend.cast(mask, inputs[0].dtype) + mask = ops.expand_dims( + mask, 2 if self.data_format == "channels_last" else 1 + ) + inputs *= mask + return ops.sum( + inputs, axis=steps_axis, keepdims=self.keepdims + ) / ops.sum(mask, axis=steps_axis, keepdims=self.keepdims) + else: + return ops.mean(inputs, axis=steps_axis, keepdims=self.keepdims) + + def compute_mask(self, inputs, mask=None): + return None diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_average_pooling2d.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_average_pooling2d.py new file mode 100644 index 0000000000000000000000000000000000000000..1536c3c302e842fe156f02459addd6a2350963e5 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_average_pooling2d.py @@ -0,0 +1,68 @@ +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling + + +@keras_export( + [ + "keras.layers.GlobalAveragePooling2D", + "keras.layers.GlobalAvgPool2D", + ] +) +class GlobalAveragePooling2D(BaseGlobalPooling): + """Global average pooling operation for 2D data. + + Args: + data_format: string, either `"channels_last"` or `"channels_first"`. + The ordering of the dimensions in the inputs. `"channels_last"` + corresponds to inputs with shape `(batch, height, width, channels)` + while `"channels_first"` corresponds to inputs with shape + `(batch, features, height, weight)`. It defaults to the + `image_data_format` value found in your Keras config file at + `~/.keras/keras.json`. If you never set it, then it will be + `"channels_last"`. + keepdims: A boolean, whether to keep the temporal dimension or not. + If `keepdims` is `False` (default), the rank of the tensor is + reduced for spatial dimensions. If `keepdims` is `True`, the + spatial dimension are retained with length 1. + The behavior is the same as for `tf.reduce_mean` or `np.mean`. + + Input shape: + + - If `data_format='channels_last'`: + 4D tensor with shape: + `(batch_size, height, width, channels)` + - If `data_format='channels_first'`: + 4D tensor with shape: + `(batch_size, channels, height, width)` + + Output shape: + + - If `keepdims=False`: + 2D tensor with shape `(batch_size, channels)`. + - If `keepdims=True`: + - If `data_format="channels_last"`: + 4D tensor with shape `(batch_size, 1, 1, channels)` + - If `data_format="channels_first"`: + 4D tensor with shape `(batch_size, channels, 1, 1)` + + Example: + + >>> x = np.random.rand(2, 4, 5, 3) + >>> y = keras.layers.GlobalAveragePooling2D()(x) + >>> y.shape + (2, 3) + """ + + def __init__(self, data_format=None, keepdims=False, **kwargs): + super().__init__( + pool_dimensions=2, + data_format=data_format, + keepdims=keepdims, + **kwargs, + ) + + def call(self, inputs): + if self.data_format == "channels_last": + return ops.mean(inputs, axis=[1, 2], keepdims=self.keepdims) + return ops.mean(inputs, axis=[2, 3], keepdims=self.keepdims) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_average_pooling3d.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_average_pooling3d.py new file mode 100644 index 0000000000000000000000000000000000000000..14ffc5bfc4d0a2a70ebf729804d72158adbda5b1 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_average_pooling3d.py @@ -0,0 +1,69 @@ +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling + + +@keras_export( + [ + "keras.layers.GlobalAveragePooling3D", + "keras.layers.GlobalAvgPool3D", + ] +) +class GlobalAveragePooling3D(BaseGlobalPooling): + """Global average pooling operation for 3D data. + + Args: + data_format: string, either `"channels_last"` or `"channels_first"`. + The ordering of the dimensions in the inputs. `"channels_last"` + corresponds to inputs with shape + `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` + while `"channels_first"` corresponds to inputs with shape + `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. + It defaults to the `image_data_format` value found in your Keras + config file at `~/.keras/keras.json`. If you never set it, then it + will be `"channels_last"`. + keepdims: A boolean, whether to keep the temporal dimension or not. + If `keepdims` is `False` (default), the rank of the tensor is + reduced for spatial dimensions. If `keepdims` is `True`, the + spatial dimension are retained with length 1. + The behavior is the same as for `tf.reduce_mean` or `np.mean`. + + Input shape: + + - If `data_format='channels_last'`: + 5D tensor with shape: + `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` + - If `data_format='channels_first'`: + 5D tensor with shape: + `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)` + + Output shape: + + - If `keepdims=False`: + 2D tensor with shape `(batch_size, channels)`. + - If `keepdims=True`: + - If `data_format="channels_last"`: + 5D tensor with shape `(batch_size, 1, 1, 1, channels)` + - If `data_format="channels_first"`: + 5D tensor with shape `(batch_size, channels, 1, 1, 1)` + + Example: + + >>> x = np.random.rand(2, 4, 5, 4, 3) + >>> y = keras.layers.GlobalAveragePooling3D()(x) + >>> y.shape + (2, 3) + """ + + def __init__(self, data_format=None, keepdims=False, **kwargs): + super().__init__( + pool_dimensions=3, + data_format=data_format, + keepdims=keepdims, + **kwargs, + ) + + def call(self, inputs): + if self.data_format == "channels_last": + return ops.mean(inputs, axis=[1, 2, 3], keepdims=self.keepdims) + return ops.mean(inputs, axis=[2, 3, 4], keepdims=self.keepdims) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_max_pooling1d.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_max_pooling1d.py new file mode 100644 index 0000000000000000000000000000000000000000..7c6d9ff79692842bc55cf8baca7729690a4daa62 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_max_pooling1d.py @@ -0,0 +1,66 @@ +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling + + +@keras_export( + [ + "keras.layers.GlobalMaxPooling1D", + "keras.layers.GlobalMaxPool1D", + ] +) +class GlobalMaxPooling1D(BaseGlobalPooling): + """Global max pooling operation for temporal data. + + Args: + data_format: string, either `"channels_last"` or `"channels_first"`. + The ordering of the dimensions in the inputs. `"channels_last"` + corresponds to inputs with shape `(batch, steps, features)` + while `"channels_first"` corresponds to inputs with shape + `(batch, features, steps)`. It defaults to the `image_data_format` + value found in your Keras config file at `~/.keras/keras.json`. + If you never set it, then it will be `"channels_last"`. + keepdims: A boolean, whether to keep the temporal dimension or not. + If `keepdims` is `False` (default), the rank of the tensor is + reduced for spatial dimensions. If `keepdims` is `True`, the + temporal dimension are retained with length 1. + The behavior is the same as for `tf.reduce_mean` or `np.mean`. + + Input shape: + + - If `data_format='channels_last'`: + 3D tensor with shape: + `(batch_size, steps, features)` + - If `data_format='channels_first'`: + 3D tensor with shape: + `(batch_size, features, steps)` + + Output shape: + + - If `keepdims=False`: + 2D tensor with shape `(batch_size, features)`. + - If `keepdims=True`: + - If `data_format="channels_last"`: + 3D tensor with shape `(batch_size, 1, features)` + - If `data_format="channels_first"`: + 3D tensor with shape `(batch_size, features, 1)` + + Example: + + >>> x = np.random.rand(2, 3, 4) + >>> y = keras.layers.GlobalMaxPooling1D()(x) + >>> y.shape + (2, 4) + """ + + def __init__(self, data_format=None, keepdims=False, **kwargs): + super().__init__( + pool_dimensions=1, + data_format=data_format, + keepdims=keepdims, + **kwargs, + ) + + def call(self, inputs): + steps_axis = 1 if self.data_format == "channels_last" else 2 + return ops.max(inputs, axis=steps_axis, keepdims=self.keepdims) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_max_pooling2d.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_max_pooling2d.py new file mode 100644 index 0000000000000000000000000000000000000000..289ebe0a87d6230628a579f15f486f6eabcee595 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_max_pooling2d.py @@ -0,0 +1,68 @@ +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling + + +@keras_export( + [ + "keras.layers.GlobalMaxPooling2D", + "keras.layers.GlobalMaxPool2D", + ] +) +class GlobalMaxPooling2D(BaseGlobalPooling): + """Global max pooling operation for 2D data. + + Args: + data_format: string, either `"channels_last"` or `"channels_first"`. + The ordering of the dimensions in the inputs. `"channels_last"` + corresponds to inputs with shape `(batch, height, width, channels)` + while `"channels_first"` corresponds to inputs with shape + `(batch, features, height, weight)`. It defaults to the + `image_data_format` value found in your Keras config file at + `~/.keras/keras.json`. If you never set it, then it will be + `"channels_last"`. + keepdims: A boolean, whether to keep the temporal dimension or not. + If `keepdims` is `False` (default), the rank of the tensor is + reduced for spatial dimensions. If `keepdims` is `True`, the + spatial dimension are retained with length 1. + The behavior is the same as for `tf.reduce_mean` or `np.mean`. + + Input shape: + + - If `data_format='channels_last'`: + 4D tensor with shape: + `(batch_size, height, width, channels)` + - If `data_format='channels_first'`: + 4D tensor with shape: + `(batch_size, channels, height, width)` + + Output shape: + + - If `keepdims=False`: + 2D tensor with shape `(batch_size, channels)`. + - If `keepdims=True`: + - If `data_format="channels_last"`: + 4D tensor with shape `(batch_size, 1, 1, channels)` + - If `data_format="channels_first"`: + 4D tensor with shape `(batch_size, channels, 1, 1)` + + Example: + + >>> x = np.random.rand(2, 4, 5, 3) + >>> y = keras.layers.GlobalMaxPooling2D()(x) + >>> y.shape + (2, 3) + """ + + def __init__(self, data_format=None, keepdims=False, **kwargs): + super().__init__( + pool_dimensions=2, + data_format=data_format, + keepdims=keepdims, + **kwargs, + ) + + def call(self, inputs): + if self.data_format == "channels_last": + return ops.max(inputs, axis=[1, 2], keepdims=self.keepdims) + return ops.max(inputs, axis=[2, 3], keepdims=self.keepdims) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_max_pooling3d.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_max_pooling3d.py new file mode 100644 index 0000000000000000000000000000000000000000..07e1eb065bc7e294ba5a78c76b3e715c3b23b8e0 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/global_max_pooling3d.py @@ -0,0 +1,69 @@ +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling + + +@keras_export( + [ + "keras.layers.GlobalMaxPooling3D", + "keras.layers.GlobalMaxPool3D", + ] +) +class GlobalMaxPooling3D(BaseGlobalPooling): + """Global max pooling operation for 3D data. + + Args: + data_format: string, either `"channels_last"` or `"channels_first"`. + The ordering of the dimensions in the inputs. `"channels_last"` + corresponds to inputs with shape + `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` + while `"channels_first"` corresponds to inputs with shape + `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. + It defaults to the `image_data_format` value found in your Keras + config file at `~/.keras/keras.json`. If you never set it, then it + will be `"channels_last"`. + keepdims: A boolean, whether to keep the temporal dimension or not. + If `keepdims` is `False` (default), the rank of the tensor is + reduced for spatial dimensions. If `keepdims` is `True`, the + spatial dimension are retained with length 1. + The behavior is the same as for `tf.reduce_mean` or `np.mean`. + + Input shape: + + - If `data_format='channels_last'`: + 5D tensor with shape: + `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` + - If `data_format='channels_first'`: + 5D tensor with shape: + `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)` + + Output shape: + + - If `keepdims=False`: + 2D tensor with shape `(batch_size, channels)`. + - If `keepdims=True`: + - If `data_format="channels_last"`: + 5D tensor with shape `(batch_size, 1, 1, 1, channels)` + - If `data_format="channels_first"`: + 5D tensor with shape `(batch_size, channels, 1, 1, 1)` + + Example: + + >>> x = np.random.rand(2, 4, 5, 4, 3) + >>> y = keras.layers.GlobalMaxPooling3D()(x) + >>> y.shape + (2, 3) + """ + + def __init__(self, data_format=None, keepdims=False, **kwargs): + super().__init__( + pool_dimensions=3, + data_format=data_format, + keepdims=keepdims, + **kwargs, + ) + + def call(self, inputs): + if self.data_format == "channels_last": + return ops.max(inputs, axis=[1, 2, 3], keepdims=self.keepdims) + return ops.max(inputs, axis=[2, 3, 4], keepdims=self.keepdims) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/max_pooling1d.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/max_pooling1d.py new file mode 100644 index 0000000000000000000000000000000000000000..c6c35d105f8f97f5d5e97c6ee3cfbefc78228be8 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/max_pooling1d.py @@ -0,0 +1,93 @@ +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_pooling import BasePooling + + +@keras_export(["keras.layers.MaxPooling1D", "keras.layers.MaxPool1D"]) +class MaxPooling1D(BasePooling): + """Max pooling operation for 1D temporal data. + + Downsamples the input representation by taking the maximum value over a + spatial window of size `pool_size`. The window is shifted by `strides`. + + The resulting output when using the `"valid"` padding option has a shape of: + `output_shape = (input_shape - pool_size + 1) / strides)`. + + The resulting output shape when using the `"same"` padding option is: + `output_shape = input_shape / strides` + + Args: + pool_size: int, size of the max pooling window. + strides: int or None. Specifies how much the pooling window moves + for each pooling step. If None, it will default to `pool_size`. + padding: string, either `"valid"` or `"same"` (case-insensitive). + `"valid"` means no padding. `"same"` results in padding evenly to + the left/right or up/down of the input such that output has the same + height/width dimension as the input. + data_format: string, either `"channels_last"` or `"channels_first"`. + The ordering of the dimensions in the inputs. `"channels_last"` + corresponds to inputs with shape `(batch, steps, features)` + while `"channels_first"` corresponds to inputs with shape + `(batch, features, steps)`. It defaults to the `image_data_format` + value found in your Keras config file at `~/.keras/keras.json`. + If you never set it, then it will be `"channels_last"`. + + Input shape: + + - If `data_format="channels_last"`: + 3D tensor with shape `(batch_size, steps, features)`. + - If `data_format="channels_first"`: + 3D tensor with shape `(batch_size, features, steps)`. + + Output shape: + + - If `data_format="channels_last"`: + 3D tensor with shape `(batch_size, downsampled_steps, features)`. + - If `data_format="channels_first"`: + 3D tensor with shape `(batch_size, features, downsampled_steps)`. + + Examples: + + `strides=1` and `padding="valid"`: + + >>> x = np.array([1., 2., 3., 4., 5.]) + >>> x = np.reshape(x, [1, 5, 1]) + >>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2, + ... strides=1, padding="valid") + >>> max_pool_1d(x) + + `strides=2` and `padding="valid"`: + + >>> x = np.array([1., 2., 3., 4., 5.]) + >>> x = np.reshape(x, [1, 5, 1]) + >>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2, + ... strides=2, padding="valid") + >>> max_pool_1d(x) + + `strides=1` and `padding="same"`: + + >>> x = np.array([1., 2., 3., 4., 5.]) + >>> x = np.reshape(x, [1, 5, 1]) + >>> max_pool_1d = keras.layers.MaxPooling1D(pool_size=2, + ... strides=1, padding="same") + >>> max_pool_1d(x) + """ + + def __init__( + self, + pool_size=2, + strides=None, + padding="valid", + data_format=None, + name=None, + **kwargs, + ): + super().__init__( + pool_size, + strides, + pool_dimensions=1, + pool_mode="max", + padding=padding, + data_format=data_format, + name=name, + **kwargs, + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/max_pooling2d.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/max_pooling2d.py new file mode 100644 index 0000000000000000000000000000000000000000..237da0670ab1da7d72063f855dc74b7db3885622 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/max_pooling2d.py @@ -0,0 +1,109 @@ +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_pooling import BasePooling + + +@keras_export(["keras.layers.MaxPooling2D", "keras.layers.MaxPool2D"]) +class MaxPooling2D(BasePooling): + """Max pooling operation for 2D spatial data. + + Downsamples the input along its spatial dimensions (height and width) + by taking the maximum value over an input window + (of size defined by `pool_size`) for each channel of the input. + The window is shifted by `strides` along each dimension. + + The resulting output when using the `"valid"` padding option has a spatial + shape (number of rows or columns) of: + `output_shape = math.floor((input_shape - pool_size) / strides) + 1` + (when `input_shape >= pool_size`) + + The resulting output shape when using the `"same"` padding option is: + `output_shape = math.floor((input_shape - 1) / strides) + 1` + + Args: + pool_size: int or tuple of 2 integers, factors by which to downscale + (dim1, dim2). If only one integer is specified, the same + window length will be used for all dimensions. + strides: int or tuple of 2 integers, or None. Strides values. If None, + it will default to `pool_size`. If only one int is specified, the + same stride size will be used for all dimensions. + padding: string, either `"valid"` or `"same"` (case-insensitive). + `"valid"` means no padding. `"same"` results in padding evenly to + the left/right or up/down of the input such that output has the same + height/width dimension as the input. + data_format: string, either `"channels_last"` or `"channels_first"`. + The ordering of the dimensions in the inputs. `"channels_last"` + corresponds to inputs with shape `(batch, height, width, channels)` + while `"channels_first"` corresponds to inputs with shape + `(batch, channels, height, width)`. It defaults to the + `image_data_format` value found in your Keras config file at + `~/.keras/keras.json`. If you never set it, then it will be + `"channels_last"`. + + Input shape: + + - If `data_format="channels_last"`: + 4D tensor with shape `(batch_size, height, width, channels)`. + - If `data_format="channels_first"`: + 4D tensor with shape `(batch_size, channels, height, width)`. + + Output shape: + + - If `data_format="channels_last"`: + 4D tensor with shape + `(batch_size, pooled_height, pooled_width, channels)`. + - If `data_format="channels_first"`: + 4D tensor with shape + `(batch_size, channels, pooled_height, pooled_width)`. + + Examples: + + `strides=(1, 1)` and `padding="valid"`: + + >>> x = np.array([[1., 2., 3.], + ... [4., 5., 6.], + ... [7., 8., 9.]]) + >>> x = np.reshape(x, [1, 3, 3, 1]) + >>> max_pool_2d = keras.layers.MaxPooling2D(pool_size=(2, 2), + ... strides=(1, 1), padding="valid") + >>> max_pool_2d(x) + + `strides=(2, 2)` and `padding="valid"`: + + >>> x = np.array([[1., 2., 3., 4.], + ... [5., 6., 7., 8.], + ... [9., 10., 11., 12.]]) + >>> x = np.reshape(x, [1, 3, 4, 1]) + >>> max_pool_2d = keras.layers.MaxPooling2D(pool_size=(2, 2), + ... strides=(2, 2), padding="valid") + >>> max_pool_2d(x) + + `stride=(1, 1)` and `padding="same"`: + + >>> x = np.array([[1., 2., 3.], + ... [4., 5., 6.], + ... [7., 8., 9.]]) + >>> x = np.reshape(x, [1, 3, 3, 1]) + >>> max_pool_2d = keras.layers.MaxPooling2D(pool_size=(2, 2), + ... strides=(1, 1), padding="same") + >>> max_pool_2d(x) + """ + + def __init__( + self, + pool_size=(2, 2), + strides=None, + padding="valid", + data_format=None, + name=None, + **kwargs, + ): + super().__init__( + pool_size, + strides, + pool_dimensions=2, + pool_mode="max", + padding=padding, + data_format=data_format, + name=name, + **kwargs, + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/max_pooling3d.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/max_pooling3d.py new file mode 100644 index 0000000000000000000000000000000000000000..d6487e87f321af62eb5ad4d68339c5624e73049c --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/pooling/max_pooling3d.py @@ -0,0 +1,85 @@ +from keras.src.api_export import keras_export +from keras.src.layers.pooling.base_pooling import BasePooling + + +@keras_export(["keras.layers.MaxPooling3D", "keras.layers.MaxPool3D"]) +class MaxPooling3D(BasePooling): + """Max pooling operation for 3D data (spatial or spatio-temporal). + + Downsamples the input along its spatial dimensions (depth, height, and + width) by taking the maximum value over an input window (of size defined by + `pool_size`) for each channel of the input. The window is shifted by + `strides` along each dimension. + + Args: + pool_size: int or tuple of 3 integers, factors by which to downscale + (dim1, dim2, dim3). If only one integer is specified, the same + window length will be used for all dimensions. + strides: int or tuple of 3 integers, or None. Strides values. If None, + it will default to `pool_size`. If only one int is specified, the + same stride size will be used for all dimensions. + padding: string, either `"valid"` or `"same"` (case-insensitive). + `"valid"` means no padding. `"same"` results in padding evenly to + the left/right or up/down of the input such that output has the same + height/width dimension as the input. + data_format: string, either `"channels_last"` or `"channels_first"`. + The ordering of the dimensions in the inputs. `"channels_last"` + corresponds to inputs with shape + `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while + `"channels_first"` corresponds to inputs with shape + `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. + It defaults to the `image_data_format` value found in your Keras + config file at `~/.keras/keras.json`. If you never set it, then it + will be `"channels_last"`. + + Input shape: + + - If `data_format="channels_last"`: + 5D tensor with shape: + `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` + - If `data_format="channels_first"`: + 5D tensor with shape: + `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)` + + Output shape: + + - If `data_format="channels_last"`: + 5D tensor with shape: + `(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)` + - If `data_format="channels_first"`: + 5D tensor with shape: + `(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)` + + Example: + + ```python + depth = 30 + height = 30 + width = 30 + channels = 3 + + inputs = keras.layers.Input(shape=(depth, height, width, channels)) + layer = keras.layers.MaxPooling3D(pool_size=3) + outputs = layer(inputs) # Shape: (batch_size, 10, 10, 10, 3) + ``` + """ + + def __init__( + self, + pool_size=(2, 2, 2), + strides=None, + padding="valid", + data_format=None, + name=None, + **kwargs, + ): + super().__init__( + pool_size, + strides, + pool_dimensions=3, + pool_mode="max", + padding=padding, + data_format=data_format, + name=name, + **kwargs, + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6433c30142fc092217e1bff330b33c1993bc250 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/auto_contrast.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/auto_contrast.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9451f0f59d4928e62f8fddd2bafaca78e69d773d Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/auto_contrast.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/base_image_preprocessing_layer.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/base_image_preprocessing_layer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52107d3648cfcda008c544c1b991131dd05a30f9 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/base_image_preprocessing_layer.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/center_crop.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/center_crop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb591e50b4cf4bb763902d4fbbc4f26fa2c8517e Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/center_crop.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/equalization.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/equalization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f05ec806d65516deb1c06d6bef689159b77a088 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/equalization.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/max_num_bounding_box.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/max_num_bounding_box.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0d4ee72dc19d22a48c7cebabb8ea61e41d0fa25 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/max_num_bounding_box.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/mix_up.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/mix_up.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94af22a89915469946a3b8a2b9ff5992b8e8aae7 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/mix_up.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/rand_augment.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/rand_augment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8049888390ecde58da8ef2a8d002c07f9a66f160 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/rand_augment.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_brightness.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_brightness.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6ee603e2e3bc34d01c51eedbb57e5f91a36d34b Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_brightness.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_color_degeneration.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_color_degeneration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90346c115996ceb6923726ec1f6af7d868ccc0f6 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_color_degeneration.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_color_jitter.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_color_jitter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8668cc2a08d665f789721feacaa920bac957fa14 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_color_jitter.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9cccb13dc92011d7431d473f934ed6ddbc27b32 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/bounding_box.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/bounding_box.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9169b6602221d2e51bcc66d114e36e2e87fe120d Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/bounding_box.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/converters.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/converters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e504473921c54f140952ac5ad8b8d426a576fc21 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/converters.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/formats.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/formats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..776ae002af5d47dc9f9bc7d07b20605da5e0dad4 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/formats.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/iou.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/iou.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00e01e917821560d07aec24d4d7f6fb324acaca2 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/iou.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/validation.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/validation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa6e2b4f9636e1b2c43c6745de1485dde02b24fe Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/validation.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/bounding_box.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/bounding_box.py new file mode 100644 index 0000000000000000000000000000000000000000..08e41e312231f49f9d3c687e6c1d150e00acbc44 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/bounding_box.py @@ -0,0 +1,468 @@ +import math + +from keras.src.utils import backend_utils + +SUPPORTED_FORMATS = ( + "xyxy", + "yxyx", + "xywh", + "center_xywh", + "center_yxhw", + "rel_xyxy", + "rel_yxyx", + "rel_xywh", + "rel_center_xywh", +) + + +class BoundingBox: + def __init__(self): + self.backend = backend_utils.DynamicBackend() + + def convert_format( + self, + boxes, + source: str, + target: str, + height=None, + width=None, + dtype="float32", + ): + if isinstance(boxes, dict): + boxes["boxes"] = self.convert_format( + boxes["boxes"], + source=source, + target=target, + height=height, + width=width, + dtype=dtype, + ) + return boxes + + to_xyxy_converters = { + "xyxy": self._xyxy_to_xyxy, + "yxyx": self._yxyx_to_xyxy, + "xywh": self._xywh_to_xyxy, + "center_xywh": self._center_xywh_to_xyxy, + "center_yxhw": self._center_yxhw_to_xyxy, + "rel_xyxy": self._rel_xyxy_to_xyxy, + "rel_yxyx": self._rel_yxyx_to_xyxy, + "rel_xywh": self._rel_xywh_to_xyxy, + "rel_center_xywh": self._rel_center_xywh_to_xyxy, + } + from_xyxy_converters = { + "xyxy": self._xyxy_to_xyxy, + "yxyx": self._xyxy_to_yxyx, + "xywh": self._xyxy_to_xywh, + "center_xywh": self._xyxy_to_center_xywh, + "center_yxhw": self._xyxy_to_center_yxhw, + "rel_xyxy": self._xyxy_to_rel_xyxy, + "rel_yxyx": self._xyxy_to_rel_yxyx, + "rel_xywh": self._xyxy_to_rel_xywh, + "rel_center_xywh": self._xyxy_to_rel_center_xywh, + } + + ops = self.backend + boxes_shape = ops.shape(boxes) + if boxes_shape[-1] != 4: + raise ValueError( + "`boxes` must be a tensor with the last dimension of 4. " + f"Received: boxes.shape={boxes_shape}" + ) + source = source.lower() + target = target.lower() + if source not in SUPPORTED_FORMATS or target not in SUPPORTED_FORMATS: + raise ValueError( + f"Invalid source or target format. " + f"Supported formats: {SUPPORTED_FORMATS}" + ) + + if (source.startswith("rel_") or target.startswith("rel_")) and ( + width is None or height is None + ): + raise ValueError( + "convert_format() must receive `height` and `width` " + "transforming between relative and absolute formats." + f"convert_format() received source=`{source}`, " + f"target=`{target}, " + f"but height={height} and width={width}." + ) + boxes = ops.cast(boxes, dtype) + if source == target: + return boxes + if width is not None: + width = ops.cast(width, dtype) + if height is not None: + height = ops.cast(height, dtype) + + if source.startswith("rel_") and target.startswith("rel_"): + source = source.replace("rel_", "", 1) + target = target.replace("rel_", "", 1) + to_xyxy_converter = to_xyxy_converters[source] + from_xyxy_converter = from_xyxy_converters[target] + in_xyxy_boxes = to_xyxy_converter(boxes, height, width) + return from_xyxy_converter(in_xyxy_boxes, height, width) + + def clip_to_image_size( + self, + bounding_boxes, + height=None, + width=None, + bounding_box_format="xyxy", + ): + if bounding_box_format not in ("xyxy", "rel_xyxy"): + raise NotImplementedError + if bounding_box_format == "xyxy" and (height is None or width is None): + raise ValueError( + "`height` and `width` must be set if `format='xyxy'`." + ) + + ops = self.backend + boxes, labels = bounding_boxes["boxes"], bounding_boxes["labels"] + if width is not None: + width = ops.cast(width, boxes.dtype) + if height is not None: + height = ops.cast(height, boxes.dtype) + + if bounding_box_format == "xyxy": + x1, y1, x2, y2 = ops.numpy.split(boxes, 4, axis=-1) + x1 = ops.numpy.clip(x1, 0, width) + y1 = ops.numpy.clip(y1, 0, height) + x2 = ops.numpy.clip(x2, 0, width) + y2 = ops.numpy.clip(y2, 0, height) + boxes = ops.numpy.concatenate([x1, y1, x2, y2], axis=-1) + + areas = self._compute_area(boxes) + areas = ops.numpy.squeeze(areas, axis=-1) + labels = ops.numpy.where(areas > 0, labels, -1) + elif bounding_box_format == "rel_xyxy": + x1, y1, x2, y2 = ops.numpy.split(boxes, 4, axis=-1) + x1 = ops.numpy.clip(x1, 0.0, 1.0) + y1 = ops.numpy.clip(y1, 0.0, 1.0) + x2 = ops.numpy.clip(x2, 0.0, 1.0) + y2 = ops.numpy.clip(y2, 0.0, 1.0) + boxes = ops.numpy.concatenate([x1, y1, x2, y2], axis=-1) + + areas = self._compute_area(boxes) + areas = ops.numpy.squeeze(areas, axis=-1) + labels = ops.numpy.where(areas > 0, labels, -1) + + result = bounding_boxes.copy() + result["boxes"] = boxes + result["labels"] = labels + return result + + def affine( + self, + boxes, + angle, + translate_x, + translate_y, + scale, + shear_x, + shear_y, + height, + width, + center_x=None, + center_y=None, + ): + ops = self.backend + + boxes_shape = ops.shape(boxes) + batch_size = boxes_shape[0] + n_boxes = boxes_shape[1] + if center_x is None: + center_x = 0.5 + if center_y is None: + center_y = 0.5 + matrix = self._compute_inverse_affine_matrix( + center_x, + center_y, + angle, + translate_x, + translate_y, + scale, + shear_x, + shear_y, + height, + width, + ) + boxes = ops.cast(boxes, dtype=matrix.dtype) + transposed_matrix = ops.numpy.transpose(matrix[:, :2, :], [0, 2, 1]) + points = boxes # [B, N, 4] + points = ops.numpy.stack( + [ + points[..., 0], + points[..., 1], + points[..., 2], + points[..., 1], + points[..., 2], + points[..., 3], + points[..., 0], + points[..., 3], + ], + axis=-1, + ) + points = ops.numpy.reshape(points, [batch_size, n_boxes, 4, 2]) + points = ops.numpy.concatenate( + [ + points, + ops.numpy.ones([batch_size, n_boxes, 4, 1], points.dtype), + ], + axis=-1, + ) + transformed_points = ops.numpy.einsum( + "bnxy,byz->bnxz", points, transposed_matrix + ) + boxes_min = ops.numpy.amin(transformed_points, axis=2) + boxes_max = ops.numpy.amax(transformed_points, axis=2) + outputs = ops.numpy.concatenate([boxes_min, boxes_max], axis=-1) + return outputs + + def crop(self, boxes, top, left, height, width): + ops = self.backend + + x1, y1, x2, y2 = ops.numpy.split(boxes, 4, axis=-1) + x1 = x1 - left + y1 = y1 - top + x2 = x2 - left + y2 = y2 - top + x1 = ops.numpy.clip(x1, 0, width) + y1 = ops.numpy.clip(y1, 0, height) + x2 = ops.numpy.clip(x2, 0, width) + y2 = ops.numpy.clip(y2, 0, height) + outputs = ops.numpy.concatenate([x1, y1, x2, y2], axis=-1) + return outputs + + def pad(self, boxes, top, left): + ops = self.backend + + x1, y1, x2, y2 = ops.numpy.split(boxes, 4, axis=-1) + x1 = x1 + left + y1 = y1 + top + x2 = x2 + left + y2 = y2 + top + outputs = ops.numpy.concatenate([x1, y1, x2, y2], axis=-1) + return outputs + + # Converters + + def _xyxy_to_xyxy(self, boxes, height=None, width=None): + return boxes + + def _yxyx_to_xyxy(self, boxes, height=None, width=None): + y1, x1, y2, x2 = self.backend.numpy.split(boxes, 4, axis=-1) + return self.backend.numpy.concatenate([x1, y1, x2, y2], axis=-1) + + def _xywh_to_xyxy(self, boxes, height=None, width=None): + x1, y1, w, h = self.backend.numpy.split(boxes, 4, axis=-1) + x2 = x1 + w + y2 = y1 + h + return self.backend.numpy.concatenate([x1, y1, x2, y2], axis=-1) + + def _center_xywh_to_xyxy(self, boxes, height=None, width=None): + ops = self.backend + cx, cy, w, h = ops.numpy.split(boxes, 4, axis=-1) + half_w = w / 2.0 + half_h = h / 2.0 + x1 = cx - half_w + y1 = cy - half_h + x2 = cx + half_w + y2 = cy + half_h + return self.backend.numpy.concatenate([x1, y1, x2, y2], axis=-1) + + def _center_yxhw_to_xyxy(self, boxes, height=None, width=None): + ops = self.backend + cy, cx, h, w = ops.numpy.split(boxes, 4, axis=-1) + half_w = w / 2.0 + half_h = h / 2.0 + x1 = cx - half_w + y1 = cy - half_h + x2 = cx + half_w + y2 = cy + half_h + return self.backend.numpy.concatenate([x1, y1, x2, y2], axis=-1) + + def _rel_xyxy_to_xyxy(self, boxes, height=None, width=None): + ops = self.backend + rel_x1, rel_y1, rel_x2, rel_y2 = ops.numpy.split(boxes, 4, axis=-1) + x1 = rel_x1 * width + y1 = rel_y1 * height + x2 = rel_x2 * width + y2 = rel_y2 * height + return self.backend.numpy.concatenate([x1, y1, x2, y2], axis=-1) + + def _rel_yxyx_to_xyxy(self, boxes, height=None, width=None): + ops = self.backend + rel_y1, rel_x1, rel_y2, rel_x2 = ops.numpy.split(boxes, 4, axis=-1) + x1 = rel_x1 * width + y1 = rel_y1 * height + x2 = rel_x2 * width + y2 = rel_y2 * height + return self.backend.numpy.concatenate([x1, y1, x2, y2], axis=-1) + + def _rel_xywh_to_xyxy(self, boxes, height=None, width=None): + ops = self.backend + rel_x1, rel_y1, rel_w, rel_h = ops.numpy.split(boxes, 4, axis=-1) + x1 = rel_x1 * width + y1 = rel_y1 * height + x2 = (rel_x1 + rel_w) * width + y2 = (rel_y1 + rel_h) * height + return self.backend.numpy.concatenate([x1, y1, x2, y2], axis=-1) + + def _rel_center_xywh_to_xyxy(self, boxes, height=None, width=None): + ops = self.backend + rel_cx, rel_cy, rel_w, rel_h = ops.numpy.split(boxes, 4, axis=-1) + half_rel_w = rel_w / 2.0 + half_rel_h = rel_h / 2.0 + x1 = (rel_cx - half_rel_w) * height + y1 = (rel_cy - half_rel_h) * width + x2 = (rel_cx + half_rel_w) * height + y2 = (rel_cy + half_rel_h) * width + return self.backend.numpy.concatenate([x1, y1, x2, y2], axis=-1) + + def _xyxy_to_yxyx(self, boxes, height=None, width=None): + x1, y1, x2, y2 = self.backend.numpy.split(boxes, 4, axis=-1) + return self.backend.numpy.concatenate([y1, x1, y2, x2], axis=-1) + + def _xyxy_to_xywh(self, boxes, height=None, width=None): + x1, y1, x2, y2 = self.backend.numpy.split(boxes, 4, axis=-1) + w = x2 - x1 + h = y2 - y1 + return self.backend.numpy.concatenate([x1, y1, w, h], axis=-1) + + def _xyxy_to_center_xywh(self, boxes, height=None, width=None): + x1, y1, x2, y2 = self.backend.numpy.split(boxes, 4, axis=-1) + cx = x1 + ((x2 - x1) / 2.0) + cy = y1 + ((y2 - y1) / 2.0) + w = x2 - x1 + h = y2 - y1 + return self.backend.numpy.concatenate([cx, cy, w, h], axis=-1) + + def _xyxy_to_center_yxhw(self, boxes, height=None, width=None): + x1, y1, x2, y2 = self.backend.numpy.split(boxes, 4, axis=-1) + cx = x1 + ((x2 - x1) / 2.0) + cy = y1 + ((y2 - y1) / 2.0) + w = x2 - x1 + h = y2 - y1 + return self.backend.numpy.concatenate([cy, cx, h, w], axis=-1) + + def _xyxy_to_rel_xyxy(self, boxes, height=None, width=None): + x1, y1, x2, y2 = self.backend.numpy.split(boxes, 4, axis=-1) + rel_x1 = self.backend.numpy.divide(x1, width) + rel_y1 = self.backend.numpy.divide(y1, height) + rel_x2 = self.backend.numpy.divide(x2, width) + rel_y2 = self.backend.numpy.divide(y2, height) + return self.backend.numpy.concatenate( + [rel_x1, rel_y1, rel_x2, rel_y2], axis=-1 + ) + + def _xyxy_to_rel_yxyx(self, boxes, height=None, width=None): + x1, y1, x2, y2 = self.backend.numpy.split(boxes, 4, axis=-1) + rel_x1 = self.backend.numpy.divide(x1, width) + rel_y1 = self.backend.numpy.divide(y1, height) + rel_x2 = self.backend.numpy.divide(x2, width) + rel_y2 = self.backend.numpy.divide(y2, height) + return self.backend.numpy.concatenate( + [rel_y1, rel_x1, rel_y2, rel_x2], axis=-1 + ) + + def _xyxy_to_rel_xywh(self, boxes, height=None, width=None): + x1, y1, x2, y2 = self.backend.numpy.split(boxes, 4, axis=-1) + rel_x1 = x1 / width + rel_y1 = y1 / height + rel_w = (x2 - x1) / width + rel_h = (y2 - y1) / height + return self.backend.numpy.concatenate( + [rel_x1, rel_y1, rel_w, rel_h], axis=-1 + ) + + def _xyxy_to_rel_center_xywh(self, boxes, height=None, width=None): + x1, y1, x2, y2 = self.backend.numpy.split(boxes, 4, axis=-1) + rel_cx = (x1 + ((x2 - x1) / 2.0)) / width + rel_cy = (y1 + ((y2 - y1) / 2.0)) / height + rel_w = (x2 - x1) / width + rel_h = (y2 - y1) / height + return self.backend.numpy.concatenate( + [rel_cx, rel_cy, rel_w, rel_h], axis=-1 + ) + + # Clip + def _compute_area(self, boxes, format="xyxy"): + if format not in ("xyxy", "rel_xyxy"): + raise NotImplementedError + + ops = self.backend + x1, y1, x2, y2 = ops.numpy.split(boxes, 4, axis=-1) + widths = x2 - x1 + heights = y2 - y1 + return widths * heights + + def _compute_inverse_affine_matrix( + self, + center_x, + center_y, + angle, + translate_x, + translate_y, + scale, + shear_x, + shear_y, + height, + width, + ): + # Ref: TF._geometry._get_inverse_affine_matrix + ops = self.backend + batch_size = ops.shape(angle)[0] + dtype = angle.dtype + + angle = -angle + shear_x = -shear_x + shear_y = -shear_y + + cx = ops.numpy.multiply(center_x, (width - 1)) + cy = ops.numpy.multiply(center_y, (height - 1)) + rot = ops.numpy.multiply(angle, 1.0 / 180.0 * math.pi) + tx = ops.numpy.multiply(-translate_x, (width - 1)) + ty = ops.numpy.multiply(-translate_y, (height - 1)) + sx = ops.numpy.multiply(shear_x, 1.0 / 180.0 * math.pi) + sy = ops.numpy.multiply(shear_y, 1.0 / 180.0 * math.pi) + + # Cached results + cos_sy = ops.numpy.cos(sy) + tan_sx = ops.numpy.tan(sx) + rot_minus_sy = rot - sy + cx_plus_tx = cx + tx + cy_plus_ty = cy + ty + + # Rotate Scale Shear (RSS) without scaling + a = ops.numpy.cos(rot_minus_sy) / cos_sy + b = a * tan_sx + ops.numpy.sin(rot) + c = -ops.numpy.sin(rot_minus_sy) / cos_sy + d = ops.numpy.cos(rot) - c * tan_sx + + # Inverted rotation matrix with scale and shear + # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1 + a0 = ops.numpy.multiply(d, scale) + a1 = ops.numpy.multiply(-b, scale) + b0 = ops.numpy.multiply(-c, scale) + b1 = ops.numpy.multiply(a, scale) + a2 = cx - a0 * cx_plus_tx - a1 * cy_plus_ty + b2 = cy - b0 * cx_plus_tx - b1 * cy_plus_ty + + # Shape of matrix: [[batch_size], ...] -> [batch_size, 6] + matrix = ops.numpy.stack( + [ + a0, + a1, + a2, + b0, + b1, + b2, + ops.numpy.zeros([batch_size], dtype), + ops.numpy.zeros([batch_size], dtype), + ops.numpy.ones([batch_size], dtype), + ], + axis=-1, + ) + matrix = ops.numpy.reshape(matrix, [batch_size, 3, 3]) + return matrix diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/converters.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/converters.py new file mode 100644 index 0000000000000000000000000000000000000000..6a6d6f9867b96c56bf1531c381aec432427561e7 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/converters.py @@ -0,0 +1,448 @@ +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.bounding_box import ( # noqa: E501 + BoundingBox, +) +from keras.src.utils import backend_utils + + +@keras_export("keras.utils.bounding_boxes.convert_format") +def convert_format( + boxes, source, target, height=None, width=None, dtype="float32" +): + """Converts bounding boxes between formats. + + Supported formats (case-insensitive): + `"xyxy"`: [left, top, right, bottom] + `"yxyx"`: [top, left, bottom, right] + `"xywh"`: [left, top, width, height] + `"center_xywh"`: [center_x, center_y, width, height] + `"center_yxhw"`: [center_y, center_x, height, width] + `"rel_xyxy"`, `"rel_yxyx"`, `"rel_xywh"`, `"rel_center_xywh"`: Relative + versions of the above formats, where coordinates are normalized + to the range [0, 1] based on the image `height` and `width`. + + Args: + boxes: Bounding boxes tensor/array or dictionary of `boxes` and + `labels`. + source: Source format string. + target: Target format string. + height: Image height (required for relative target format). + width: Image width (required for relative target format). + dtype: Data type for conversion (optional). + + Returns: + Converted boxes. + + Raises: + ValueError: For invalid formats, shapes, or missing dimensions. + + Example: + ```python + boxes = np.array([[10, 20, 30, 40], [50, 60, 70, 80]]) + # Convert from 'xyxy' to 'xywh' format + boxes_xywh = keras.utils.bounding_boxes.convert_format( + boxes, source='xyxy', target='xywh' + ) # Output: [[10. 20. 20. 20.], [50. 60. 20. 20.]] + + # Convert to relative 'rel_xyxy' format + boxes_rel_xyxy = keras.utils.bounding_boxes.convert_format( + boxes, source='xyxy', target='rel_xyxy', height=200, width=300 + ) # Output: [[0.03333334 0.1 0.1 0.2 ], + #[0.16666667 0.3 0.23333333 0.4 ]] + ``` + """ + box_utils = BoundingBox() + # Switch to tensorflow backend if we are in tf.data pipe + if backend_utils.in_tf_graph(): + box_utils.backend.set_backend("tensorflow") + boxes = box_utils.convert_format( + boxes=boxes, + source=source, + target=target, + height=height, + width=width, + dtype=dtype, + ) + # Switch back to original backend + box_utils.backend.reset() + return boxes + + +@keras_export("keras.utils.bounding_boxes.clip_to_image_size") +def clip_to_image_size( + bounding_boxes, height=None, width=None, bounding_box_format="xyxy" +): + """Clips bounding boxes to be within the image dimensions. + Args: + bounding_boxes: A dictionary with 'boxes' shape `(N, 4)` or + `(batch, N, 4)` and 'labels' shape `(N,)` or `(batch, N,)`. + height: Image height. + width: Image width. + bounding_box_format: The format of the input bounding boxes. Defaults to + `"xyxy"`. + + Returns: + Clipped bounding boxes. + + Example: + ```python + boxes = {"boxes": np.array([[-10, -20, 150, 160], [50, 40, 70, 80]]), + "labels": np.array([0, 1])} + clipped_boxes = keras.utils.bounding_boxes.clip_to_image_size( + boxes, height=100, width=120, + ) + # Output will have boxes clipped to the image boundaries, and labels + # potentially adjusted if the clipped area becomes zero + ``` + """ + + box_utils = BoundingBox() + # Switch to tensorflow backend if we are in tf.data pipe + if backend_utils.in_tf_graph(): + box_utils.backend.set_backend("tensorflow") + bounding_boxes = box_utils.clip_to_image_size( + bounding_boxes, + height=height, + width=width, + bounding_box_format=bounding_box_format, + ) + # Switch back to original backend + box_utils.backend.reset() + return bounding_boxes + + +@keras_export("keras.utils.bounding_boxes.affine_transform") +def affine_transform( + boxes, + angle, + translate_x, + translate_y, + scale, + shear_x, + shear_y, + height, + width, + center_x=None, + center_y=None, + bounding_box_format="xyxy", +): + """Applies an affine transformation to the bounding boxes. + + The `height` and `width` parameters are used to normalize the + translation and scaling factors. + + Args: + boxes: The bounding boxes to transform, a tensor/array of shape + `(N, 4)` or `(batch_size, N, 4)`. + angle: Rotation angle in degrees. + translate_x: Horizontal translation fraction. + translate_y: Vertical translation fraction. + scale: Scaling factor. + shear_x: Shear angle in x-direction (degrees). + shear_y: Shear angle in y-direction (degrees). + height: Height of the image/data. + width: Width of the image/data. + center_x: x-coordinate of the transformation center (fraction). + center_y: y-coordinate of the transformation center (fraction). + bounding_box_format: The format of the input bounding boxes. Defaults to + `"xyxy"`. + + Returns: + The transformed bounding boxes, a tensor/array with the same shape + as the input `boxes`. + """ + if bounding_box_format != "xyxy": + raise NotImplementedError + box_utils = BoundingBox() + # Switch to tensorflow backend if we are in tf.data pipe + if backend_utils.in_tf_graph(): + box_utils.backend.set_backend("tensorflow") + + boxes = box_utils.affine( + boxes, + angle, + translate_x, + translate_y, + scale, + shear_x, + shear_y, + height, + width, + center_x=center_x, + center_y=center_y, + ) + box_utils.backend.reset() + return boxes + + +@keras_export("keras.utils.bounding_boxes.crop") +def crop(boxes, top, left, height, width, bounding_box_format="xyxy"): + """Crops bounding boxes based on the given offsets and dimensions. + + This function crops bounding boxes to a specified region defined by + `top`, `left`, `height`, and `width`. The boxes are first converted to + `xyxy` format, cropped, and then returned. + + Args: + boxes: The bounding boxes to crop. A NumPy array or tensor of shape + `(N, 4)` or `(batch_size, N, 4)`. + top: The vertical offset of the top-left corner of the cropping region. + left: The horizontal offset of the top-left corner of the cropping + region. + height: The height of the cropping region. Defaults to `None`. + width: The width of the cropping region. Defaults to `None`. + bounding_box_format: The format of the input bounding boxes. Defaults to + `"xyxy"`. + + Returns: + The cropped bounding boxes. + + Example: + ```python + boxes = np.array([[10, 20, 50, 60], [70, 80, 100, 120]]) # xyxy format + cropped_boxes = keras.utils.bounding_boxes.crop( + boxes, bounding_box_format="xyxy", top=10, left=20, height=40, width=30 + ) # Cropping a 30x40 region starting at (20, 10) + print(cropped_boxes) + # Expected output: + # array([[ 0., 10., 30., 50.], + # [50., 70., 80., 110.]]) + """ + if bounding_box_format != "xyxy": + raise NotImplementedError + box_utils = BoundingBox() + # Switch to tensorflow backend if we are in tf.data pipe + if backend_utils.in_tf_graph(): + box_utils.backend.set_backend("tensorflow") + outputs = box_utils.crop(boxes, top, left, height, width) + box_utils.backend.reset() + return outputs + + +@keras_export("keras.utils.bounding_boxes.pad") +def pad(boxes, top, left, height=None, width=None, bounding_box_format="xyxy"): + """Pads bounding boxes by adding top and left offsets. + + This function adds padding to the bounding boxes by increasing the 'top' + and 'left' coordinates by the specified amounts. The method assume the + input bounding_box_format is `xyxy`. + + Args: + boxes: Bounding boxes to pad. Shape `(N, 4)` or `(batch, N, 4)`. + top: Vertical padding to add. + left: Horizontal padding to add. + height: Image height. Defaults to None. + width: Image width. Defaults to None. + bounding_box_format: The format of the input bounding boxes. Defaults to + `"xyxy"`. + + Returns: + Padded bounding boxes in the original format. + """ + if bounding_box_format != "xyxy": + raise NotImplementedError + box_utils = BoundingBox() + # Switch to tensorflow backend if we are in tf.data pipe + if backend_utils.in_tf_graph(): + box_utils.backend.set_backend("tensorflow") + outputs = box_utils.pad(boxes, top, left) + box_utils.backend.reset() + return outputs + + +@keras_export("keras.utils.bounding_boxes.encode_box_to_deltas") +def encode_box_to_deltas( + anchors, + boxes, + anchor_format, + box_format, + encoding_format="center_yxhw", + variance=None, + image_shape=None, +): + """Encodes bounding boxes relative to anchors as deltas. + + This function calculates the deltas that represent the difference between + bounding boxes and provided anchors. Deltas encode the offsets and scaling + factors to apply to anchors to obtain the target boxes. + + Boxes and anchors are first converted to the specified `encoding_format` + (defaulting to `center_yxhw`) for consistent delta representation. + + Args: + anchors: `Tensors`. Anchor boxes with shape of `(N, 4)` where N is the + number of anchors. + boxes: `Tensors` Bounding boxes to encode. Boxes can be of shape + `(B, N, 4)` or `(N, 4)`. + anchor_format: str. The format of the input `anchors` + (e.g., "xyxy", "xywh", etc.). + box_format: str. The format of the input `boxes` + (e.g., "xyxy", "xywh", etc.). + encoding_format: str. The intermediate format to which boxes and anchors + are converted before delta calculation. Defaults to "center_yxhw". + variance: `List[float]`. A 4-element array/tensor representing variance + factors to scale the box deltas. If provided, the calculated deltas + are divided by the variance. Defaults to None. + image_shape: `Tuple[int]`. The shape of the image (height, width, 3). + When using relative bounding box format for `box_format` the + `image_shape` is used for normalization. + Returns: + Encoded box deltas. The return type matches the `encode_format`. + + Raises: + ValueError: If `variance` is not None and its length is not 4. + ValueError: If `encoding_format` is not `"center_xywh"` or + `"center_yxhw"`. + + """ + if variance is not None: + variance = ops.convert_to_tensor(variance, "float32") + var_len = variance.shape[-1] + + if var_len != 4: + raise ValueError(f"`variance` must be length 4, got {variance}") + + if encoding_format not in ["center_xywh", "center_yxhw"]: + raise ValueError( + "`encoding_format` should be one of 'center_xywh' or " + f"'center_yxhw', got {encoding_format}" + ) + + if image_shape is None: + height, width = None, None + else: + height, width, _ = image_shape + + encoded_anchors = convert_format( + anchors, + source=anchor_format, + target=encoding_format, + height=height, + width=width, + ) + boxes = convert_format( + boxes, + source=box_format, + target=encoding_format, + height=height, + width=width, + ) + anchor_dimensions = ops.maximum(encoded_anchors[..., 2:], backend.epsilon()) + box_dimensions = ops.maximum(boxes[..., 2:], backend.epsilon()) + # anchors be unbatched, boxes can either be batched or unbatched. + boxes_delta = ops.concatenate( + [ + (boxes[..., :2] - encoded_anchors[..., :2]) / anchor_dimensions, + ops.log(box_dimensions / anchor_dimensions), + ], + axis=-1, + ) + if variance is not None: + boxes_delta /= variance + return boxes_delta + + +@keras_export("keras.utils.bounding_boxes.decode_deltas_to_boxes") +def decode_deltas_to_boxes( + anchors, + boxes_delta, + anchor_format, + box_format, + encoded_format="center_yxhw", + variance=None, + image_shape=None, +): + """Converts bounding boxes from delta format to the specified `box_format`. + + This function decodes bounding box deltas relative to anchors to obtain the + final bounding box coordinates. The boxes are encoded in a specific + `encoded_format` (center_yxhw by default) during the decoding process. + This allows flexibility in how the deltas are applied to the anchors. + + Args: + anchors: Can be `Tensors` or `Dict[Tensors]` where keys are level + indices and values are corresponding anchor boxes. + The shape of the array/tensor should be `(N, 4)` where N is the + number of anchors. + boxes_delta Can be `Tensors` or `Dict[Tensors]` Bounding box deltas + must have the same type and structure as `anchors`. The + shape of the array/tensor can be `(N, 4)` or `(B, N, 4)` where N is + the number of boxes. + anchor_format: str. The format of the input `anchors`. + (e.g., `"xyxy"`, `"xywh"`, etc.) + box_format: str. The desired format for the output boxes. + (e.g., `"xyxy"`, `"xywh"`, etc.) + encoded_format: str. Raw output format from regression head. Defaults + to `"center_yxhw"`. + variance: `List[floats]`. A 4-element array/tensor representing + variance factors to scale the box deltas. If provided, the deltas + are multiplied by the variance before being applied to the anchors. + Defaults to None. + image_shape: `Tuple[int]`. The shape of the image (height, width, 3). + When using relative bounding box format for `box_format` the + `image_shape` is used for normalization. + + Returns: + Decoded box coordinates. The return type matches the `box_format`. + + Raises: + ValueError: If `variance` is not None and its length is not 4. + ValueError: If `encoded_format` is not `"center_xywh"` or + `"center_yxhw"`. + + """ + if variance is not None: + variance = ops.convert_to_tensor(variance, "float32") + var_len = variance.shape[-1] + + if var_len != 4: + raise ValueError(f"`variance` must be length 4, got {variance}") + + if encoded_format not in ["center_xywh", "center_yxhw"]: + raise ValueError( + f"`encoded_format` should be 'center_xywh' or 'center_yxhw', " + f"but got '{encoded_format}'." + ) + + if image_shape is None: + height, width = None, None + else: + height, width, _ = image_shape + + def decode_single_level(anchor, box_delta): + encoded_anchor = convert_format( + anchor, + source=anchor_format, + target=encoded_format, + height=height, + width=width, + ) + if variance is not None: + box_delta = box_delta * variance + # anchors be unbatched, boxes can either be batched or unbatched. + box = ops.concatenate( + [ + box_delta[..., :2] * encoded_anchor[..., 2:] + + encoded_anchor[..., :2], + ops.exp(box_delta[..., 2:]) * encoded_anchor[..., 2:], + ], + axis=-1, + ) + box = convert_format( + box, + source=encoded_format, + target=box_format, + height=height, + width=width, + ) + return box + + if isinstance(anchors, dict) and isinstance(boxes_delta, dict): + boxes = {} + for lvl, anchor in anchors.items(): + boxes[lvl] = decode_single_level(anchor, boxes_delta[lvl]) + return boxes + else: + return decode_single_level(anchors, boxes_delta) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/formats.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/formats.py new file mode 100644 index 0000000000000000000000000000000000000000..38baf4964b1ef1507e0adda9b9701147685a9594 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/formats.py @@ -0,0 +1,135 @@ +class XYXY: + """XYXY contains axis indices for the XYXY format. + + All values in the XYXY format should be absolute pixel values. + + The XYXY format consists of the following required indices: + + - LEFT: left of the bounding box + - TOP: top of the bounding box + - RIGHT: right of the bounding box + - BOTTOM: bottom of the bounding box + """ + + LEFT = 0 + TOP = 1 + RIGHT = 2 + BOTTOM = 3 + + +class REL_XYXY: + """REL_XYXY contains axis indices for the REL_XYXY format. + + REL_XYXY is like XYXY, but each value is relative to the width and height of + the origin image. Values are percentages of the origin images' width and + height respectively. + + The REL_XYXY format consists of the following required indices: + + - LEFT: left of the bounding box + - TOP: top of the bounding box + - RIGHT: right of the bounding box + - BOTTOM: bottom of the bounding box + """ + + LEFT = 0 + TOP = 1 + RIGHT = 2 + BOTTOM = 3 + + +class CENTER_XYWH: + """CENTER_XYWH contains axis indices for the CENTER_XYWH format. + + All values in the CENTER_XYWH format should be absolute pixel values. + + The CENTER_XYWH format consists of the following required indices: + + - X: X coordinate of the center of the bounding box + - Y: Y coordinate of the center of the bounding box + - WIDTH: width of the bounding box + - HEIGHT: height of the bounding box + """ + + X = 0 + Y = 1 + WIDTH = 2 + HEIGHT = 3 + + +class XYWH: + """XYWH contains axis indices for the XYWH format. + + All values in the XYWH format should be absolute pixel values. + + The XYWH format consists of the following required indices: + + - X: X coordinate of the left of the bounding box + - Y: Y coordinate of the top of the bounding box + - WIDTH: width of the bounding box + - HEIGHT: height of the bounding box + """ + + X = 0 + Y = 1 + WIDTH = 2 + HEIGHT = 3 + + +class REL_XYWH: + """REL_XYWH contains axis indices for the XYWH format. + + REL_XYXY is like XYWH, but each value is relative to the width and height of + the origin image. Values are percentages of the origin images' width and + height respectively. + + - X: X coordinate of the left of the bounding box + - Y: Y coordinate of the top of the bounding box + - WIDTH: width of the bounding box + - HEIGHT: height of the bounding box + """ + + X = 0 + Y = 1 + WIDTH = 2 + HEIGHT = 3 + + +class YXYX: + """YXYX contains axis indices for the YXYX format. + + All values in the YXYX format should be absolute pixel values. + + The YXYX format consists of the following required indices: + + - TOP: top of the bounding box + - LEFT: left of the bounding box + - BOTTOM: bottom of the bounding box + - RIGHT: right of the bounding box + """ + + TOP = 0 + LEFT = 1 + BOTTOM = 2 + RIGHT = 3 + + +class REL_YXYX: + """REL_YXYX contains axis indices for the REL_YXYX format. + + REL_YXYX is like YXYX, but each value is relative to the width and height of + the origin image. Values are percentages of the origin images' width and + height respectively. + + The REL_YXYX format consists of the following required indices: + + - TOP: top of the bounding box + - LEFT: left of the bounding box + - BOTTOM: bottom of the bounding box + - RIGHT: right of the bounding box + """ + + TOP = 0 + LEFT = 1 + BOTTOM = 2 + RIGHT = 3 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/iou.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/iou.py new file mode 100644 index 0000000000000000000000000000000000000000..94e68e60777bd07b2eb45491dd52fa62ea0bc6e0 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/iou.py @@ -0,0 +1,281 @@ +"""Contains functions to compute ious of bounding boxes.""" + +import math + +import keras +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes import ( + converters, +) + + +def _compute_area(box): + """Computes area for bounding boxes + + Args: + box: [N, 4] or [batch_size, N, 4] float Tensor, either batched + or unbatched boxes. + Returns: + a float Tensor of [N] or [batch_size, N] + """ + y_min, x_min, y_max, x_max = ops.split(box[..., :4], 4, axis=-1) + return ops.squeeze((y_max - y_min) * (x_max - x_min), axis=-1) + + +def _compute_intersection(boxes1, boxes2): + """Computes intersection area between two sets of boxes. + + Args: + boxes1: [N, 4] or [batch_size, N, 4] float Tensor boxes. + boxes2: [M, 4] or [batch_size, M, 4] float Tensor boxes. + Returns: + a [N, M] or [batch_size, N, M] float Tensor. + """ + y_min1, x_min1, y_max1, x_max1 = ops.split(boxes1[..., :4], 4, axis=-1) + y_min2, x_min2, y_max2, x_max2 = ops.split(boxes2[..., :4], 4, axis=-1) + boxes2_rank = len(boxes2.shape) + perm = [1, 0] if boxes2_rank == 2 else [0, 2, 1] + # [N, M] or [batch_size, N, M] + intersect_ymax = ops.minimum(y_max1, ops.transpose(y_max2, perm)) + intersect_ymin = ops.maximum(y_min1, ops.transpose(y_min2, perm)) + intersect_xmax = ops.minimum(x_max1, ops.transpose(x_max2, perm)) + intersect_xmin = ops.maximum(x_min1, ops.transpose(x_min2, perm)) + + intersect_height = intersect_ymax - intersect_ymin + intersect_width = intersect_xmax - intersect_xmin + zeros_t = ops.cast(0, intersect_height.dtype) + intersect_height = ops.maximum(zeros_t, intersect_height) + intersect_width = ops.maximum(zeros_t, intersect_width) + + return intersect_height * intersect_width + + +@keras_export("keras.utils.bounding_boxes.compute_iou") +def compute_iou( + boxes1, + boxes2, + bounding_box_format, + use_masking=False, + mask_val=-1, + image_shape=None, +): + """Computes a lookup table vector containing the ious for a given set boxes. + + The lookup vector is to be indexed by [`boxes1_index`,`boxes2_index`] if + boxes are unbatched and by [`batch`, `boxes1_index`,`boxes2_index`] if the + boxes are batched. + + The users can pass `boxes1` and `boxes2` to be different ranks. For example: + 1) `boxes1`: [batch_size, M, 4], `boxes2`: [batch_size, N, 4] -> return + [batch_size, M, N]. + 2) `boxes1`: [batch_size, M, 4], `boxes2`: [N, 4] -> return + [batch_size, M, N] + 3) `boxes1`: [M, 4], `boxes2`: [batch_size, N, 4] -> return + [batch_size, M, N] + 4) `boxes1`: [M, 4], `boxes2`: [N, 4] -> return [M, N] + + Args: + boxes1: a list of bounding boxes in 'corners' format. Can be batched or + unbatched. + boxes2: a list of bounding boxes in 'corners' format. Can be batched or + unbatched. + bounding_box_format: a case-insensitive string which is one of `"xyxy"`, + `"rel_xyxy"`, `"xyWH"`, `"center_xyWH"`, `"yxyx"`, `"rel_yxyx"`. + For detailed information on the supported format, see the + use_masking: whether masking will be applied. This will mask all + `boxes1` or `boxes2` that have values less than 0 in all its 4 + dimensions. Default to `False`. + mask_val: int to mask those returned IOUs if the masking is True, + defaults to -1. + image_shape: `Tuple[int]`. The shape of the image (height, width, 3). + When using relative bounding box format for `box_format` the + `image_shape` is used for normalization. + + Returns: + iou_lookup_table: a vector containing the pairwise ious of boxes1 and + boxes2. + """ # noqa: E501 + + boxes1_rank = len(ops.shape(boxes1)) + boxes2_rank = len(ops.shape(boxes2)) + + if boxes1_rank not in [2, 3]: + raise ValueError( + "compute_iou() expects boxes1 to be batched, or to be unbatched. " + f"Received len(boxes1.shape)={boxes1_rank}, " + f"len(boxes2.shape)={boxes2_rank}. Expected either " + "len(boxes1.shape)=2 AND or len(boxes1.shape)=3." + ) + if boxes2_rank not in [2, 3]: + raise ValueError( + "compute_iou() expects boxes2 to be batched, or to be unbatched. " + f"Received len(boxes1.shape)={boxes1_rank}, " + f"len(boxes2.shape)={boxes2_rank}. Expected either " + "len(boxes2.shape)=2 AND or len(boxes2.shape)=3." + ) + + target_format = "yxyx" + if "rel" in bounding_box_format and image_shape is None: + raise ValueError( + "When using relative bounding box formats (e.g. `rel_yxyx`) " + "the `image_shape` argument must be provided." + f"Received `image_shape`: {image_shape}" + ) + + if image_shape is None: + height, width = None, None + else: + height, width, _ = image_shape + + boxes1 = converters.convert_format( + boxes1, + source=bounding_box_format, + target=target_format, + height=height, + width=width, + ) + + boxes2 = converters.convert_format( + boxes2, + source=bounding_box_format, + target=target_format, + height=height, + width=width, + ) + + intersect_area = _compute_intersection(boxes1, boxes2) + boxes1_area = _compute_area(boxes1) + boxes2_area = _compute_area(boxes2) + boxes2_area_rank = len(boxes2_area.shape) + boxes2_axis = 1 if (boxes2_area_rank == 2) else 0 + boxes1_area = ops.expand_dims(boxes1_area, axis=-1) + boxes2_area = ops.expand_dims(boxes2_area, axis=boxes2_axis) + union_area = boxes1_area + boxes2_area - intersect_area + res = ops.divide(intersect_area, union_area + backend.epsilon()) + + if boxes1_rank == 2: + perm = [1, 0] + else: + perm = [0, 2, 1] + + if not use_masking: + return res + + mask_val_t = ops.cast(mask_val, res.dtype) * ops.ones_like(res) + boxes1_mask = ops.less(ops.max(boxes1, axis=-1, keepdims=True), 0.0) + boxes2_mask = ops.less(ops.max(boxes2, axis=-1, keepdims=True), 0.0) + background_mask = ops.logical_or( + boxes1_mask, ops.transpose(boxes2_mask, perm) + ) + iou_lookup_table = ops.where(background_mask, mask_val_t, res) + return iou_lookup_table + + +@keras_export("keras.utils.bounding_boxes.compute_ciou") +def compute_ciou(boxes1, boxes2, bounding_box_format, image_shape=None): + """ + Computes the Complete IoU (CIoU) between two bounding boxes or between + two batches of bounding boxes. + + CIoU loss is an extension of GIoU loss, which further improves the IoU + optimization for object detection. CIoU loss not only penalizes the + bounding box coordinates but also considers the aspect ratio and center + distance of the boxes. The length of the last dimension should be 4 to + represent the bounding boxes. + + Args: + box1 (tensor): tensor representing the first bounding box with + shape (..., 4). + box2 (tensor): tensor representing the second bounding box with + shape (..., 4). + bounding_box_format: a case-insensitive string (for example, "xyxy"). + Each bounding box is defined by these 4 values. For detailed + information on the supported formats, see the [KerasCV bounding box + documentation](https://keras.io/api/keras_cv/bounding_box/formats/). + image_shape: `Tuple[int]`. The shape of the image (height, width, 3). + When using relative bounding box format for `box_format` the + `image_shape` is used for normalization. + + Returns: + tensor: The CIoU distance between the two bounding boxes. + """ + target_format = "xyxy" + if "rel" in bounding_box_format: + raise ValueError( + "When using relative bounding box formats (e.g. `rel_yxyx`) " + "the `image_shape` argument must be provided." + f"Received `image_shape`: {image_shape}" + ) + + if image_shape is None: + height, width = None, None + else: + height, width, _ = image_shape + + boxes1 = converters.convert_format( + boxes1, + source=bounding_box_format, + target=target_format, + height=height, + width=width, + ) + + boxes2 = converters.convert_format( + boxes2, + source=bounding_box_format, + target=target_format, + height=height, + width=width, + ) + + x_min1, y_min1, x_max1, y_max1 = ops.split(boxes1[..., :4], 4, axis=-1) + x_min2, y_min2, x_max2, y_max2 = ops.split(boxes2[..., :4], 4, axis=-1) + + width_1 = x_max1 - x_min1 + height_1 = y_max1 - y_min1 + keras.backend.epsilon() + width_2 = x_max2 - x_min2 + height_2 = y_max2 - y_min2 + keras.backend.epsilon() + + intersection_area = ops.maximum( + ops.minimum(x_max1, x_max2) - ops.maximum(x_min1, x_min2), 0 + ) * ops.maximum( + ops.minimum(y_max1, y_max2) - ops.maximum(y_min1, y_min2), 0 + ) + union_area = ( + width_1 * height_1 + + width_2 * height_2 + - intersection_area + + keras.backend.epsilon() + ) + iou = ops.squeeze( + ops.divide(intersection_area, union_area + keras.backend.epsilon()), + axis=-1, + ) + + convex_width = ops.maximum(x_max1, x_max2) - ops.minimum(x_min1, x_min2) + convex_height = ops.maximum(y_max1, y_max2) - ops.minimum(y_min1, y_min2) + convex_diagonal_squared = ops.squeeze( + convex_width**2 + convex_height**2 + keras.backend.epsilon(), + axis=-1, + ) + centers_distance_squared = ops.squeeze( + ((x_min1 + x_max1) / 2 - (x_min2 + x_max2) / 2) ** 2 + + ((y_min1 + y_max1) / 2 - (y_min2 + y_max2) / 2) ** 2, + axis=-1, + ) + + v = ops.squeeze( + ops.power( + (4 / math.pi**2) + * (ops.arctan(width_2 / height_2) - ops.arctan(width_1 / height_1)), + 2, + ), + axis=-1, + ) + alpha = v / (v - iou + (1 + keras.backend.epsilon())) + + return iou - ( + centers_distance_squared / convex_diagonal_squared + v * alpha + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/validation.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/validation.py new file mode 100644 index 0000000000000000000000000000000000000000..f731701891229db81505abffb163d990430a53f2 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/validation.py @@ -0,0 +1,182 @@ +from keras.src import backend as current_backend +from keras.src.utils import tf_utils + + +def _classes_shape(batched, classes_shape, max_boxes): + if max_boxes is None: + return None + if batched: + return [None, max_boxes] + classes_shape[2:] + return [max_boxes] + classes_shape[2:] + + +def _box_shape(batched, boxes_shape, max_boxes): + # ensure we dont drop the final axis in RaggedTensor mode + if max_boxes is None: + shape = list(boxes_shape) + shape[-1] = 4 + return shape + if batched: + return [None, max_boxes, 4] + return [max_boxes, 4] + + +def densify_bounding_boxes( + bounding_boxes, + is_batched=False, + max_boxes=None, + boxes_default_value=0, + labels_default_value=-1, + backend=None, +): + validate_bounding_boxes(bounding_boxes) + boxes = bounding_boxes["boxes"] + labels = bounding_boxes["labels"] + backend = backend or current_backend + if isinstance(boxes, list): + if boxes and isinstance(boxes[0], list): + if boxes[0] and isinstance(boxes[0][0], list): + # Batched case + if not isinstance(labels[0][0], int): + raise ValueError( + "If providing `bounding_boxes['labels']` as a list, " + "it should contain integers labels. Received: " + f"bounding_boxes['labels']={labels}" + ) + if max_boxes is not None: + max_boxes = max([len(b) for b in boxes]) + new_boxes = [] + new_labels = [] + for b, l in zip(boxes, labels): + if len(b) >= max_boxes: + new_boxes.append(b[:max_boxes]) + new_labels.append(l[:max_boxes]) + else: + num_boxes_to_add = max_boxes - len(b) + added_boxes = [ + [ + boxes_default_value, + boxes_default_value, + boxes_default_value, + boxes_default_value, + ] + for _ in range(num_boxes_to_add) + ] + new_boxes.append(b + added_boxes) + new_labels.append( + l + + [ + labels_default_value + for _ in range(num_boxes_to_add) + ] + ) + else: + # Unbatched case + if max_boxes and len(b) >= max_boxes: + new_boxes = b[:max_boxes] + new_labels = l[:max_boxes] + else: + num_boxes_to_add = max_boxes - len(b) + added_boxes = [ + [ + boxes_default_value, + boxes_default_value, + boxes_default_value, + boxes_default_value, + ] + for _ in range(num_boxes_to_add) + ] + new_boxes = b + added_boxes + new_labels = l + [ + labels_default_value for _ in range(num_boxes_to_add) + ] + return { + "boxes": backend.convert_to_tensor(new_boxes, dtype="int32"), + "labels": backend.convert_to_tensor(new_boxes, dtype="int32"), + } + + if tf_utils.is_ragged_tensor(boxes): + bounding_boxes["boxes"] = bounding_boxes["boxes"].to_tensor( + default_value=boxes_default_value, + shape=_classes_shape( + is_batched, bounding_boxes["boxes"].shape, max_boxes + ), + ) + bounding_boxes["labels"] = bounding_boxes["labels"].to_tensor( + default_value=labels_default_value, + shape=_box_shape( + is_batched, bounding_boxes["labels"].shape, max_boxes + ), + ) + return bounding_boxes + + bounding_boxes["boxes"] = backend.convert_to_tensor(boxes, dtype="float32") + bounding_boxes["labels"] = backend.convert_to_tensor(labels) + return bounding_boxes + + +def validate_bounding_boxes(bounding_boxes): + if ( + not isinstance(bounding_boxes, dict) + or "labels" not in bounding_boxes + or "boxes" not in bounding_boxes + ): + raise ValueError( + "Expected `bounding_boxes` agurment to be a " + "dict with keys 'boxes' and 'labels'. Received: " + f"bounding_boxes={bounding_boxes}" + ) + boxes = bounding_boxes["boxes"] + labels = bounding_boxes["labels"] + if isinstance(boxes, list): + if not isinstance(labels, list): + raise ValueError( + "If `bounding_boxes['boxes']` is a list, then " + "`bounding_boxes['labels']` must also be a list." + f"Received: bounding_boxes['labels']={labels}" + ) + if len(boxes) != len(labels): + raise ValueError( + "If `bounding_boxes['boxes']` and " + "`bounding_boxes['labels']` are both lists, " + "they must have the same length. Received: " + f"len(bounding_boxes['boxes'])={len(boxes)} and " + f"len(bounding_boxes['labels'])={len(labels)} and " + ) + elif tf_utils.is_ragged_tensor(boxes): + if not tf_utils.is_ragged_tensor(labels): + raise ValueError( + "If `bounding_boxes['boxes']` is a Ragged tensor, " + " `bounding_boxes['labels']` must also be a " + "Ragged tensor. " + f"Received: bounding_boxes['labels']={labels}" + ) + else: + boxes_shape = current_backend.shape(boxes) + labels_shape = current_backend.shape(labels) + if len(boxes_shape) == 2: # (boxes, 4) + if len(labels_shape) not in {1, 2}: + raise ValueError( + "Found " + f"bounding_boxes['boxes'].shape={boxes_shape} " + "and expected bounding_boxes['labels'] to have " + "rank 1 or 2, but received: " + f"bounding_boxes['labels'].shape={labels_shape} " + ) + elif len(boxes_shape) == 3: + if len(labels_shape) not in {2, 3}: + raise ValueError( + "Found " + f"bounding_boxes['boxes'].shape={boxes_shape} " + "and expected bounding_boxes['labels'] to have " + "rank 2 or 3, but received: " + f"bounding_boxes['labels'].shape={labels_shape} " + ) + else: + raise ValueError( + "Expected `bounding_boxes['boxes']` " + "to have rank 2 or 3, with shape " + "(num_boxes, 4) or (batch_size, num_boxes, 4). " + "Received: " + f"bounding_boxes['boxes'].shape={boxes_shape}" + )