diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1c46a35c34780adbc600cc451ba6bd5837252d7 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/__pycache__/backend.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/__pycache__/backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b16e8cdaaf7ff53f2341de71f15ab9054e87e8c3 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/__pycache__/backend.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/__pycache__/layers.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/__pycache__/layers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e3db362a74b1baa718b4731f59a2ccddf656e07 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/__pycache__/layers.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/__pycache__/losses.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/__pycache__/losses.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f421e346564713271096eecb68ed923928b091ee Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/__pycache__/losses.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/backend.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/backend.py new file mode 100644 index 0000000000000000000000000000000000000000..1c3876d858360b0146eb21d219d2b67269157244 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/backend.py @@ -0,0 +1,2291 @@ +"""Legacy Keras 1/2 backend functions.""" + +import itertools + +import numpy as np + +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.utils.module_utils import tensorflow as tf + +py_any = any +py_all = all + + +@keras_export("keras._legacy.backend.abs") +def abs(x): + """DEPRECATED.""" + return tf.abs(x) + + +@keras_export("keras._legacy.backend.all") +def all(x, axis=None, keepdims=False): + """DEPRECATED.""" + x = tf.cast(x, tf.bool) + return tf.reduce_all(x, axis, keepdims) + + +@keras_export("keras._legacy.backend.any") +def any(x, axis=None, keepdims=False): + """DEPRECATED.""" + x = tf.cast(x, tf.bool) + return tf.reduce_any(x, axis, keepdims) + + +@keras_export("keras._legacy.backend.argmax") +def argmax(x, axis=-1): + """DEPRECATED.""" + return tf.argmax(x, axis) + + +@keras_export("keras._legacy.backend.argmin") +def argmin(x, axis=-1): + """DEPRECATED.""" + return tf.argmin(x, axis) + + +@keras_export("keras._legacy.backend.arange") +def arange(start, stop=None, step=1, dtype="int32"): + """DEPRECATED.""" + if stop is None and start < 0: + start = 0 + result = tf.range(start, limit=stop, delta=step, name="arange") + if dtype != "int32": + result = tf.cast(result, dtype) + return result + + +@keras_export("keras._legacy.backend.batch_dot") +def batch_dot(x, y, axes=None): + """DEPRECATED.""" + x_shape = x.shape + y_shape = y.shape + + x_ndim = len(x_shape) + y_ndim = len(y_shape) + + if x_ndim < 2 or y_ndim < 2: + raise ValueError( + "Cannot do batch_dot on inputs " + "with rank < 2. " + "Received inputs with tf.shapes " + + str(x_shape) + + " and " + + str(y_shape) + + "." + ) + + x_batch_size = x_shape[0] + y_batch_size = y_shape[0] + + if x_batch_size is not None and y_batch_size is not None: + if x_batch_size != y_batch_size: + raise ValueError( + "Cannot do batch_dot on inputs " + "with different batch sizes. " + "Received inputs with tf.shapes " + + str(x_shape) + + " and " + + str(y_shape) + + "." + ) + if isinstance(axes, int): + axes = [axes, axes] + + if axes is None: + if y_ndim == 2: + axes = [x_ndim - 1, y_ndim - 1] + else: + axes = [x_ndim - 1, y_ndim - 2] + + if py_any(isinstance(a, (list, tuple)) for a in axes): + raise ValueError( + "Multiple target dimensions are not supported. " + + "Expected: None, int, (int, int), " + + "Provided: " + + str(axes) + ) + + # if tuple, convert to list. + axes = list(axes) + + # convert negative indices. + if axes[0] < 0: + axes[0] += x_ndim + if axes[1] < 0: + axes[1] += y_ndim + + # sanity checks + if 0 in axes: + raise ValueError( + "Cannot perform batch_dot over axis 0. " + "If your inputs are not batched, " + "add a dummy batch dimension to your " + "inputs using K.expand_dims(x, 0)" + ) + a0, a1 = axes + d1 = x_shape[a0] + d2 = y_shape[a1] + + if d1 is not None and d2 is not None and d1 != d2: + raise ValueError( + "Cannot do batch_dot on inputs with tf.shapes " + + str(x_shape) + + " and " + + str(y_shape) + + " with axes=" + + str(axes) + + ". x.shape[%d] != y.shape[%d] (%d != %d)." + % (axes[0], axes[1], d1, d2) + ) + + # backup ndims. Need them later. + orig_x_ndim = x_ndim + orig_y_ndim = y_ndim + + # if rank is 2, expand to 3. + if x_ndim == 2: + x = tf.expand_dims(x, 1) + a0 += 1 + x_ndim += 1 + if y_ndim == 2: + y = tf.expand_dims(y, 2) + y_ndim += 1 + + # bring x's dimension to be reduced to last axis. + if a0 != x_ndim - 1: + pattern = list(range(x_ndim)) + for i in range(a0, x_ndim - 1): + pattern[i] = pattern[i + 1] + pattern[-1] = a0 + x = tf.transpose(x, pattern) + + # bring y's dimension to be reduced to axis 1. + if a1 != 1: + pattern = list(range(y_ndim)) + for i in range(a1, 1, -1): + pattern[i] = pattern[i - 1] + pattern[1] = a1 + y = tf.transpose(y, pattern) + + # normalize both inputs to rank 3. + if x_ndim > 3: + # squash middle dimensions of x. + x_shape = tf.shape(x) + x_mid_dims = x_shape[1:-1] + x_squashed_shape = tf.stack([x_shape[0], -1, x_shape[-1]]) + x = tf.reshape(x, x_squashed_shape) + x_squashed = True + else: + x_squashed = False + + if y_ndim > 3: + # squash trailing dimensions of y. + y_shape = tf.shape(y) + y_trail_dims = y_shape[2:] + y_squashed_shape = tf.stack([y_shape[0], y_shape[1], -1]) + y = tf.reshape(y, y_squashed_shape) + y_squashed = True + else: + y_squashed = False + + result = tf.matmul(x, y) + + # if inputs were squashed, we have to reshape the matmul output. + output_shape = tf.shape(result) + do_reshape = False + + if x_squashed: + output_shape = tf.concat( + [output_shape[:1], x_mid_dims, output_shape[-1:]], 0 + ) + do_reshape = True + + if y_squashed: + output_shape = tf.concat([output_shape[:-1], y_trail_dims], 0) + do_reshape = True + + if do_reshape: + result = tf.reshape(result, output_shape) + + # if the inputs were originally rank 2, we remove the added 1 dim. + if orig_x_ndim == 2: + result = tf.squeeze(result, 1) + elif orig_y_ndim == 2: + result = tf.squeeze(result, -1) + + return result + + +@keras_export("keras._legacy.backend.batch_flatten") +def batch_flatten(x): + """DEPRECATED.""" + x = tf.reshape(x, tf.stack([-1, prod(tf.shape(x)[1:])])) + return x + + +@keras_export("keras._legacy.backend.batch_get_value") +def batch_get_value(tensors): + """DEPRECATED.""" + return [x.numpy() for x in tensors] + + +@keras_export("keras._legacy.backend.batch_set_value") +def batch_set_value(tuples): + """DEPRECATED.""" + if tf.executing_eagerly() or tf.inside_function(): + for x, value in tuples: + value = np.asarray(value, dtype=x.dtype.name) + x.assign(value) + + +@keras_export("keras._legacy.backend.batch_normalization") +def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3): + """DEPRECATED.""" + return tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon) + + +@keras_export("keras._legacy.backend.bias_add") +def bias_add(x, bias, data_format=None): + """DEPRECATED.""" + if data_format is None: + data_format = backend.image_data_format() + if data_format not in {"channels_first", "channels_last"}: + raise ValueError(f"Unknown data_format: {data_format}") + bias_shape = bias.shape + if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1: + raise ValueError( + f"Unexpected bias dimensions {len(bias_shape)}. " + f"Expected it to be 1 or {ndim(x) - 1} dimensions" + ) + + if len(bias_shape) == 1: + if data_format == "channels_first": + return tf.nn.bias_add(x, bias, data_format="NCHW") + return tf.nn.bias_add(x, bias, data_format="NHWC") + if ndim(x) in (3, 4, 5): + if data_format == "channels_first": + bias_reshape_axis = (1, bias_shape[-1]) + bias_shape[:-1] + return x + reshape(bias, bias_reshape_axis) + return x + reshape(bias, (1,) + bias_shape) + return tf.nn.bias_add(x, bias) + + +@keras_export("keras._legacy.backend.binary_crossentropy") +def binary_crossentropy(target, output, from_logits=False): + """DEPRECATED.""" + target = tf.convert_to_tensor(target) + output = tf.convert_to_tensor(output) + + if from_logits: + return tf.nn.sigmoid_cross_entropy_with_logits( + labels=target, logits=output + ) + + epsilon_ = tf.convert_to_tensor(backend.epsilon(), output.dtype) + output = tf.clip_by_value(output, epsilon_, 1.0 - epsilon_) + + # Compute cross entropy from probabilities. + bce = target * tf.math.log(output + backend.epsilon()) + bce += (1 - target) * tf.math.log(1 - output + backend.epsilon()) + return -bce + + +@keras_export("keras._legacy.backend.binary_focal_crossentropy") +def binary_focal_crossentropy( + target, + output, + apply_class_balancing=False, + alpha=0.25, + gamma=2.0, + from_logits=False, +): + """DEPRECATED.""" + sigmoidal = tf.sigmoid(output) if from_logits else output + + p_t = target * sigmoidal + (1 - target) * (1 - sigmoidal) + + # Calculate focal factor + focal_factor = tf.pow(1.0 - p_t, gamma) + + # Binary crossentropy + bce = binary_crossentropy( + target=target, + output=output, + from_logits=from_logits, + ) + focal_bce = focal_factor * bce + + if apply_class_balancing: + weight = target * alpha + (1 - target) * (1 - alpha) + focal_bce = weight * focal_bce + + return focal_bce + + +@keras_export("keras._legacy.backend.cast") +def cast(x, dtype): + """DEPRECATED.""" + return tf.cast(x, dtype) + + +@keras_export("keras._legacy.backend.cast_to_floatx") +def cast_to_floatx(x): + """DEPRECATED.""" + if isinstance(x, (tf.Tensor, tf.Variable, tf.SparseTensor)): + return tf.cast(x, dtype=backend.floatx()) + return np.asarray(x, dtype=backend.floatx()) + + +@keras_export("keras._legacy.backend.categorical_crossentropy") +def categorical_crossentropy(target, output, from_logits=False, axis=-1): + """DEPRECATED.""" + target = tf.convert_to_tensor(target) + output = tf.convert_to_tensor(output) + target.shape.assert_is_compatible_with(output.shape) + + if from_logits: + return tf.nn.softmax_cross_entropy_with_logits( + labels=target, logits=output, axis=axis + ) + + # Adjust the predictions so that the probability of + # each class for every sample adds up to 1 + # This is needed to ensure that the cross entropy is + # computed correctly. + output = output / tf.reduce_sum(output, axis, True) + + # Compute cross entropy from probabilities. + epsilon_ = tf.convert_to_tensor(backend.epsilon(), output.dtype) + output = tf.clip_by_value(output, epsilon_, 1.0 - epsilon_) + return -tf.reduce_sum(target * tf.math.log(output), axis) + + +@keras_export("keras._legacy.backend.categorical_focal_crossentropy") +def categorical_focal_crossentropy( + target, + output, + alpha=0.25, + gamma=2.0, + from_logits=False, + axis=-1, +): + """DEPRECATED.""" + target = tf.convert_to_tensor(target) + output = tf.convert_to_tensor(output) + target.shape.assert_is_compatible_with(output.shape) + + if from_logits: + output = tf.nn.softmax(output, axis=axis) + + # Adjust the predictions so that the probability of + # each class for every sample adds up to 1 + # This is needed to ensure that the cross entropy is + # computed correctly. + output = output / tf.reduce_sum(output, axis=axis, keepdims=True) + + epsilon_ = tf.convert_to_tensor(backend.epsilon(), output.dtype) + output = tf.clip_by_value(output, epsilon_, 1.0 - epsilon_) + + # Calculate cross entropy + cce = -target * tf.math.log(output) + + # Calculate factors + modulating_factor = tf.pow(1.0 - output, gamma) + weighting_factor = tf.multiply(modulating_factor, alpha) + + # Apply weighting factor + focal_cce = tf.multiply(weighting_factor, cce) + focal_cce = tf.reduce_sum(focal_cce, axis=axis) + return focal_cce + + +@keras_export("keras._legacy.backend.clip") +def clip(x, min_value, max_value): + """DEPRECATED.""" + if isinstance(min_value, (int, float)) and isinstance( + max_value, (int, float) + ): + if max_value < min_value: + max_value = min_value + if min_value is None: + min_value = -np.inf + if max_value is None: + max_value = np.inf + return tf.clip_by_value(x, min_value, max_value) + + +@keras_export("keras._legacy.backend.concatenate") +def concatenate(tensors, axis=-1): + """DEPRECATED.""" + if axis < 0: + rank = ndim(tensors[0]) + if rank: + axis %= rank + else: + axis = 0 + + if py_all(is_sparse(x) for x in tensors): + return tf.compat.v1.sparse_concat(axis, tensors) + elif py_all(isinstance(x, tf.RaggedTensor) for x in tensors): + return tf.concat(tensors, axis) + else: + return tf.concat([to_dense(x) for x in tensors], axis) + + +@keras_export("keras._legacy.backend.constant") +def constant(value, dtype=None, shape=None, name=None): + """DEPRECATED.""" + if dtype is None: + dtype = backend.floatx() + + return tf.constant(value, dtype=dtype, shape=shape, name=name) + + +def _preprocess_conv1d_input(x, data_format): + tf_data_format = "NWC" # to pass TF Conv2dNative operations + if data_format == "channels_first": + tf_data_format = "NCW" + return x, tf_data_format + + +def _preprocess_conv2d_input(x, data_format, force_transpose=False): + tf_data_format = "NHWC" + if data_format == "channels_first": + if force_transpose: + x = tf.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC + else: + tf_data_format = "NCHW" + return x, tf_data_format + + +def _preprocess_conv3d_input(x, data_format): + tf_data_format = "NDHWC" + if data_format == "channels_first": + tf_data_format = "NCDHW" + return x, tf_data_format + + +def _preprocess_padding(padding): + if padding == "same": + padding = "SAME" + elif padding == "valid": + padding = "VALID" + else: + raise ValueError(f"Invalid padding: {padding}") + return padding + + +@keras_export("keras._legacy.backend.conv1d") +def conv1d( + x, kernel, strides=1, padding="valid", data_format=None, dilation_rate=1 +): + """DEPRECATED.""" + if data_format is None: + data_format = backend.image_data_format() + if data_format not in {"channels_first", "channels_last"}: + raise ValueError(f"Unknown data_format: {data_format}") + + kernel_shape = kernel.shape.as_list() + if padding == "causal": + # causal (dilated) convolution: + left_pad = dilation_rate * (kernel_shape[0] - 1) + x = temporal_padding(x, (left_pad, 0)) + padding = "valid" + padding = _preprocess_padding(padding) + + x, tf_data_format = _preprocess_conv1d_input(x, data_format) + x = tf.compat.v1.nn.convolution( + input=x, + filter=kernel, + dilation_rate=dilation_rate, + strides=strides, + padding=padding, + data_format=tf_data_format, + ) + if data_format == "channels_first" and tf_data_format == "NWC": + x = tf.transpose(x, (0, 2, 1)) # NWC -> NCW + return x + + +@keras_export("keras._legacy.backend.conv2d") +def conv2d( + x, + kernel, + strides=(1, 1), + padding="valid", + data_format=None, + dilation_rate=(1, 1), +): + """DEPRECATED.""" + if data_format is None: + data_format = backend.image_data_format() + if data_format not in {"channels_first", "channels_last"}: + raise ValueError(f"Unknown data_format: {data_format}") + + x, tf_data_format = _preprocess_conv2d_input(x, data_format) + padding = _preprocess_padding(padding) + x = tf.compat.v1.nn.convolution( + input=x, + filter=kernel, + dilation_rate=dilation_rate, + strides=strides, + padding=padding, + data_format=tf_data_format, + ) + if data_format == "channels_first" and tf_data_format == "NHWC": + x = tf.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW + return x + + +@keras_export("keras._legacy.backend.conv2d_transpose") +def conv2d_transpose( + x, + kernel, + output_shape, + strides=(1, 1), + padding="valid", + data_format=None, + dilation_rate=(1, 1), +): + """DEPRECATED.""" + if data_format is None: + data_format = backend.image_data_format() + if data_format not in {"channels_first", "channels_last"}: + raise ValueError(f"Unknown data_format: {data_format}") + + # `atrous_conv2d_transpose` only supports NHWC format, even on GPU. + if data_format == "channels_first" and dilation_rate != (1, 1): + force_transpose = True + else: + force_transpose = False + + x, tf_data_format = _preprocess_conv2d_input( + x, data_format, force_transpose + ) + + if data_format == "channels_first" and tf_data_format == "NHWC": + output_shape = ( + output_shape[0], + output_shape[2], + output_shape[3], + output_shape[1], + ) + if output_shape[0] is None: + output_shape = (tf.shape(x)[0],) + tuple(output_shape[1:]) + + if isinstance(output_shape, (tuple, list)): + output_shape = tf.stack(list(output_shape)) + + padding = _preprocess_padding(padding) + if tf_data_format == "NHWC": + strides = (1,) + strides + (1,) + else: + strides = (1, 1) + strides + + if dilation_rate == (1, 1): + x = tf.compat.v1.nn.conv2d_transpose( + x, + kernel, + output_shape, + strides, + padding=padding, + data_format=tf_data_format, + ) + else: + if dilation_rate[0] != dilation_rate[1]: + raise ValueError( + "Expected the 2 dimensions of the `dilation_rate` argument " + "to be equal to each other. " + f"Received: dilation_rate={dilation_rate}" + ) + x = tf.nn.atrous_conv2d_transpose( + x, kernel, output_shape, rate=dilation_rate[0], padding=padding + ) + if data_format == "channels_first" and tf_data_format == "NHWC": + x = tf.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW + return x + + +@keras_export("keras._legacy.backend.conv3d") +def conv3d( + x, + kernel, + strides=(1, 1, 1), + padding="valid", + data_format=None, + dilation_rate=(1, 1, 1), +): + """DEPRECATED.""" + if data_format is None: + data_format = backend.image_data_format() + if data_format not in {"channels_first", "channels_last"}: + raise ValueError(f"Unknown data_format: {data_format}") + + x, tf_data_format = _preprocess_conv3d_input(x, data_format) + padding = _preprocess_padding(padding) + x = tf.compat.v1.nn.convolution( + input=x, + filter=kernel, + dilation_rate=dilation_rate, + strides=strides, + padding=padding, + data_format=tf_data_format, + ) + if data_format == "channels_first" and tf_data_format == "NDHWC": + x = tf.transpose(x, (0, 4, 1, 2, 3)) + return x + + +@keras_export("keras._legacy.backend.cos") +def cos(x): + """DEPRECATED.""" + return tf.cos(x) + + +@keras_export("keras._legacy.backend.count_params") +def count_params(x): + """DEPRECATED.""" + return np.prod(x.shape.as_list()) + + +@keras_export("keras._legacy.backend.ctc_batch_cost") +def ctc_batch_cost(y_true, y_pred, input_length, label_length): + """DEPRECATED.""" + label_length = tf.cast(tf.squeeze(label_length, axis=-1), tf.int32) + input_length = tf.cast(tf.squeeze(input_length, axis=-1), tf.int32) + sparse_labels = tf.cast( + ctc_label_dense_to_sparse(y_true, label_length), tf.int32 + ) + + y_pred = tf.math.log( + tf.transpose(y_pred, perm=[1, 0, 2]) + backend.epsilon() + ) + + return tf.expand_dims( + tf.compat.v1.nn.ctc_loss( + inputs=y_pred, labels=sparse_labels, sequence_length=input_length + ), + 1, + ) + + +@keras_export("keras._legacy.backend.ctc_label_dense_to_sparse") +def ctc_label_dense_to_sparse(labels, label_lengths): + """DEPRECATED.""" + label_shape = tf.shape(labels) + num_batches_tns = tf.stack([label_shape[0]]) + max_num_labels_tns = tf.stack([label_shape[1]]) + + def range_less_than(old_input, current_input): + return tf.expand_dims(tf.range(tf.shape(old_input)[1]), 0) < tf.fill( + max_num_labels_tns, current_input + ) + + init = tf.cast(tf.fill([1, label_shape[1]], 0), tf.bool) + dense_mask = tf.compat.v1.scan( + range_less_than, label_lengths, initializer=init, parallel_iterations=1 + ) + dense_mask = dense_mask[:, 0, :] + + label_array = tf.reshape( + tf.tile(tf.range(0, label_shape[1]), num_batches_tns), label_shape + ) + label_ind = tf.compat.v1.boolean_mask(label_array, dense_mask) + + batch_array = tf.transpose( + tf.reshape( + tf.tile(tf.range(0, label_shape[0]), max_num_labels_tns), + reverse(label_shape, 0), + ) + ) + batch_ind = tf.compat.v1.boolean_mask(batch_array, dense_mask) + indices = tf.transpose( + tf.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1]) + ) + + vals_sparse = tf.compat.v1.gather_nd(labels, indices) + + return tf.SparseTensor( + tf.cast(indices, tf.int64), vals_sparse, tf.cast(label_shape, tf.int64) + ) + + +@keras_export("keras._legacy.backend.ctc_decode") +def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1): + """DEPRECATED.""" + input_shape = tf.shape(y_pred) + num_samples, num_steps = input_shape[0], input_shape[1] + y_pred = tf.math.log( + tf.transpose(y_pred, perm=[1, 0, 2]) + backend.epsilon() + ) + input_length = tf.cast(input_length, tf.int32) + + if greedy: + (decoded, log_prob) = tf.nn.ctc_greedy_decoder( + inputs=y_pred, sequence_length=input_length + ) + else: + (decoded, log_prob) = tf.compat.v1.nn.ctc_beam_search_decoder( + inputs=y_pred, + sequence_length=input_length, + beam_width=beam_width, + top_paths=top_paths, + ) + decoded_dense = [] + for st in decoded: + st = tf.SparseTensor(st.indices, st.values, (num_samples, num_steps)) + decoded_dense.append(tf.sparse.to_dense(sp_input=st, default_value=-1)) + return (decoded_dense, log_prob) + + +@keras_export("keras._legacy.backend.cumsum") +def cumsum(x, axis=0): + """DEPRECATED.""" + return tf.cumsum(x, axis=axis) + + +@keras_export("keras._legacy.backend.cumprod") +def cumprod(x, axis=0): + """DEPRECATED.""" + return tf.math.cumprod(x, axis=axis) + + +@keras_export("keras._legacy.backend.depthwise_conv2d") +def depthwise_conv2d( + x, + depthwise_kernel, + strides=(1, 1), + padding="valid", + data_format=None, + dilation_rate=(1, 1), +): + """DEPRECATED.""" + if data_format is None: + data_format = backend.image_data_format() + if data_format not in {"channels_first", "channels_last"}: + raise ValueError(f"Unknown data_format: {data_format}") + + x, tf_data_format = _preprocess_conv2d_input(x, data_format) + padding = _preprocess_padding(padding) + if tf_data_format == "NHWC": + strides = (1,) + strides + (1,) + else: + strides = (1, 1) + strides + + x = tf.nn.depthwise_conv2d( + x, + depthwise_kernel, + strides=strides, + padding=padding, + dilations=dilation_rate, + data_format=tf_data_format, + ) + if data_format == "channels_first" and tf_data_format == "NHWC": + x = tf.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW + return x + + +@keras_export("keras._legacy.backend.dot") +def dot(x, y): + """DEPRECATED.""" + if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2): + x_shape = [] + for i, s in zip(x.shape, tf.unstack(tf.shape(x))): + if i is not None: + x_shape.append(i) + else: + x_shape.append(s) + x_shape = tuple(x_shape) + y_shape = [] + for i, s in zip(y.shape, tf.unstack(tf.shape(y))): + if i is not None: + y_shape.append(i) + else: + y_shape.append(s) + y_shape = tuple(y_shape) + y_permute_dim = list(range(ndim(y))) + y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim + xt = tf.reshape(x, [-1, x_shape[-1]]) + yt = tf.reshape(tf.transpose(y, perm=y_permute_dim), [y_shape[-2], -1]) + return tf.reshape( + tf.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:] + ) + if is_sparse(x): + out = tf.sparse.sparse_dense_matmul(x, y) + else: + out = tf.matmul(x, y) + return out + + +@keras_export("keras._legacy.backend.dropout") +def dropout(x, level, noise_shape=None, seed=None): + """DEPRECATED.""" + if seed is None: + seed = np.random.randint(10e6) + return tf.nn.dropout(x, rate=level, noise_shape=noise_shape, seed=seed) + + +@keras_export("keras._legacy.backend.dtype") +def dtype(x): + """DEPRECATED.""" + return x.dtype.base_dtype.name + + +@keras_export("keras._legacy.backend.elu") +def elu(x, alpha=1.0): + """DEPRECATED.""" + res = tf.nn.elu(x) + if alpha == 1: + return res + else: + return tf.where(x > 0, res, alpha * res) + + +@keras_export("keras._legacy.backend.equal") +def equal(x, y): + """DEPRECATED.""" + return tf.equal(x, y) + + +@keras_export("keras._legacy.backend.eval") +def eval(x): + """DEPRECATED.""" + return get_value(to_dense(x)) + + +@keras_export("keras._legacy.backend.exp") +def exp(x): + """DEPRECATED.""" + return tf.exp(x) + + +@keras_export("keras._legacy.backend.expand_dims") +def expand_dims(x, axis=-1): + """DEPRECATED.""" + return tf.expand_dims(x, axis) + + +@keras_export("keras._legacy.backend.eye") +def eye(size, dtype=None, name=None): + """DEPRECATED.""" + if dtype is None: + dtype = backend.floatx() + tf_dtype = tf.as_dtype(dtype) + return variable(tf.eye(size, dtype=tf_dtype), dtype, name) + + +@keras_export("keras._legacy.backend.flatten") +def flatten(x): + """DEPRECATED.""" + return tf.reshape(x, [-1]) + + +@keras_export("keras._legacy.backend.foldl") +def foldl(fn, elems, initializer=None, name=None): + """DEPRECATED.""" + return tf.compat.v1.foldl(fn, elems, initializer=initializer, name=name) + + +@keras_export("keras._legacy.backend.foldr") +def foldr(fn, elems, initializer=None, name=None): + """DEPRECATED.""" + return tf.compat.v1.foldr(fn, elems, initializer=initializer, name=name) + + +@keras_export("keras._legacy.backend.gather") +def gather(reference, indices): + """DEPRECATED.""" + return tf.compat.v1.gather(reference, indices) + + +@keras_export("keras._legacy.backend.get_value") +def get_value(x): + """DEPRECATED.""" + if not tf.is_tensor(x): + return x + if tf.executing_eagerly() or isinstance(x, tf.__internal__.EagerTensor): + return x.numpy() + if not getattr(x, "_in_graph_mode", True): + # This is a variable which was created in an eager context, but is being + # evaluated from a Graph. + with tf.__internal__.eager_context.eager_mode(): + return x.numpy() + with tf.init_scope(): + return x.numpy() + + +@keras_export("keras._legacy.backend.gradients") +def gradients(loss, variables): + """DEPRECATED.""" + return tf.compat.v1.gradients( + loss, variables, colocate_gradients_with_ops=True + ) + + +@keras_export("keras._legacy.backend.greater") +def greater(x, y): + """DEPRECATED.""" + return tf.greater(x, y) + + +@keras_export("keras._legacy.backend.greater_equal") +def greater_equal(x, y): + """DEPRECATED.""" + return tf.greater_equal(x, y) + + +@keras_export("keras._legacy.backend.hard_sigmoid") +def hard_sigmoid(x): + """DEPRECATED.""" + point_two = tf.convert_to_tensor(0.2, dtype=x.dtype) + point_five = tf.convert_to_tensor(0.5, dtype=x.dtype) + x = tf.multiply(x, point_two) + x = tf.add(x, point_five) + x = tf.clip_by_value(x, 0.0, 1.0) + return x + + +@keras_export("keras._legacy.backend.in_top_k") +def in_top_k(predictions, targets, k): + """DEPRECATED.""" + return tf.compat.v1.math.in_top_k(predictions, targets, k) + + +@keras_export("keras._legacy.backend.int_shape") +def int_shape(x): + """DEPRECATED.""" + try: + shape = x.shape + if not isinstance(shape, tuple): + shape = tuple(shape.as_list()) + return shape + except ValueError: + return None + + +@keras_export("keras._legacy.backend.is_sparse") +def is_sparse(tensor): + """DEPRECATED.""" + spec = getattr(tensor, "_type_spec", None) + if spec is not None: + return isinstance(spec, tf.SparseTensorSpec) + return isinstance(tensor, tf.SparseTensor) + + +@keras_export("keras._legacy.backend.l2_normalize") +def l2_normalize(x, axis=None): + """DEPRECATED.""" + return tf.linalg.l2_normalize(x, axis=axis) + + +@keras_export("keras._legacy.backend.less") +def less(x, y): + """DEPRECATED.""" + return tf.less(x, y) + + +@keras_export("keras._legacy.backend.less_equal") +def less_equal(x, y): + """DEPRECATED.""" + return tf.less_equal(x, y) + + +@keras_export("keras._legacy.backend.log") +def log(x): + """DEPRECATED.""" + return tf.math.log(x) + + +@keras_export("keras._legacy.backend.map_fn") +def map_fn(fn, elems, name=None, dtype=None): + """DEPRECATED.""" + return tf.compat.v1.map_fn(fn, elems, name=name, dtype=dtype) + + +@keras_export("keras._legacy.backend.max") +def max(x, axis=None, keepdims=False): + """DEPRECATED.""" + return tf.reduce_max(x, axis, keepdims) + + +@keras_export("keras._legacy.backend.maximum") +def maximum(x, y): + """DEPRECATED.""" + return tf.maximum(x, y) + + +@keras_export("keras._legacy.backend.mean") +def mean(x, axis=None, keepdims=False): + """DEPRECATED.""" + if x.dtype.base_dtype == tf.bool: + x = tf.cast(x, backend.floatx()) + return tf.reduce_mean(x, axis, keepdims) + + +@keras_export("keras._legacy.backend.min") +def min(x, axis=None, keepdims=False): + """DEPRECATED.""" + return tf.reduce_min(x, axis, keepdims) + + +@keras_export("keras._legacy.backend.minimum") +def minimum(x, y): + """DEPRECATED.""" + return tf.minimum(x, y) + + +@keras_export("keras._legacy.backend.moving_average_update") +def moving_average_update(x, value, momentum): + """DEPRECATED.""" + momentum = tf.cast(momentum, x.dtype) + value = tf.cast(value, x.dtype) + return x.assign_sub((x - value) * (1 - momentum)) + + +@keras_export("keras._legacy.backend.name_scope") +def name_scope(name): + """DEPRECATED.""" + return tf.name_scope(name) + + +@keras_export("keras._legacy.backend.ndim") +def ndim(x): + """DEPRECATED.""" + return x.shape.rank + + +@keras_export("keras._legacy.backend.not_equal") +def not_equal(x, y): + """DEPRECATED.""" + return tf.not_equal(x, y) + + +@keras_export("keras._legacy.backend.one_hot") +def one_hot(indices, num_classes): + """DEPRECATED.""" + return tf.one_hot(indices, depth=num_classes, axis=-1) + + +@keras_export("keras._legacy.backend.ones") +def ones(shape, dtype=None, name=None): + """DEPRECATED.""" + with tf.init_scope(): + if dtype is None: + dtype = backend.floatx() + tf_dtype = tf.as_dtype(dtype) + v = tf.ones(shape=shape, dtype=tf_dtype, name=name) + if py_all(v.shape.as_list()): + return variable(v, dtype=dtype, name=name) + return v + + +@keras_export("keras._legacy.backend.ones_like") +def ones_like(x, dtype=None, name=None): + """DEPRECATED.""" + return tf.ones_like(x, dtype=dtype, name=name) + + +@keras_export("keras._legacy.backend.permute_dimensions") +def permute_dimensions(x, pattern): + """DEPRECATED.""" + return tf.transpose(x, perm=pattern) + + +@keras_export("keras._legacy.backend.pool2d") +def pool2d( + x, + pool_size, + strides=(1, 1), + padding="valid", + data_format=None, + pool_mode="max", +): + """DEPRECATED.""" + if data_format is None: + data_format = backend.image_data_format() + if data_format not in {"channels_first", "channels_last"}: + raise ValueError(f"Unknown data_format: {data_format}") + if len(pool_size) != 2: + raise ValueError("`pool_size` must be a tuple of 2 integers.") + if len(strides) != 2: + raise ValueError("`strides` must be a tuple of 2 integers.") + + x, tf_data_format = _preprocess_conv2d_input(x, data_format) + padding = _preprocess_padding(padding) + if tf_data_format == "NHWC": + strides = (1,) + strides + (1,) + pool_size = (1,) + pool_size + (1,) + else: + strides = (1, 1) + strides + pool_size = (1, 1) + pool_size + + if pool_mode == "max": + x = tf.compat.v1.nn.max_pool( + x, pool_size, strides, padding=padding, data_format=tf_data_format + ) + elif pool_mode == "avg": + x = tf.compat.v1.nn.avg_pool( + x, pool_size, strides, padding=padding, data_format=tf_data_format + ) + else: + raise ValueError("Invalid pooling mode: " + str(pool_mode)) + + if data_format == "channels_first" and tf_data_format == "NHWC": + x = tf.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW + return x + + +@keras_export("keras._legacy.backend.pool3d") +def pool3d( + x, + pool_size, + strides=(1, 1, 1), + padding="valid", + data_format=None, + pool_mode="max", +): + """DEPRECATED.""" + if data_format is None: + data_format = backend.image_data_format() + if data_format not in {"channels_first", "channels_last"}: + raise ValueError(f"Unknown data_format: {data_format}") + + x, tf_data_format = _preprocess_conv3d_input(x, data_format) + padding = _preprocess_padding(padding) + if tf_data_format == "NDHWC": + strides = (1,) + strides + (1,) + pool_size = (1,) + pool_size + (1,) + else: + strides = (1, 1) + strides + pool_size = (1, 1) + pool_size + + if pool_mode == "max": + x = tf.nn.max_pool3d( + x, pool_size, strides, padding=padding, data_format=tf_data_format + ) + elif pool_mode == "avg": + x = tf.nn.avg_pool3d( + x, pool_size, strides, padding=padding, data_format=tf_data_format + ) + else: + raise ValueError("Invalid pooling mode: " + str(pool_mode)) + + if data_format == "channels_first" and tf_data_format == "NDHWC": + x = tf.transpose(x, (0, 4, 1, 2, 3)) + return x + + +@keras_export("keras._legacy.backend.pow") +def pow(x, a): + """DEPRECATED.""" + return tf.pow(x, a) + + +@keras_export("keras._legacy.backend.prod") +def prod(x, axis=None, keepdims=False): + """DEPRECATED.""" + return tf.reduce_prod(x, axis, keepdims) + + +@keras_export("keras._legacy.backend.random_bernoulli") +def random_bernoulli(shape, p=0.0, dtype=None, seed=None): + """DEPRECATED.""" + if dtype is None: + dtype = backend.floatx() + if seed is None: + seed = np.random.randint(10e6) + return tf.where( + tf.random.uniform(shape, dtype=dtype, seed=seed) <= p, + tf.ones(shape, dtype=dtype), + tf.zeros(shape, dtype=dtype), + ) + + +@keras_export("keras._legacy.backend.random_normal") +def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): + """DEPRECATED.""" + if dtype is None: + dtype = backend.floatx() + if seed is None: + seed = np.random.randint(10e6) + return tf.random.normal( + shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed + ) + + +@keras_export("keras._legacy.backend.random_normal_variable") +def random_normal_variable( + shape, mean, scale, dtype=None, name=None, seed=None +): + """DEPRECATED.""" + if dtype is None: + dtype = backend.floatx() + tf_dtype = tf.as_dtype(dtype) + if seed is None: + # ensure that randomness is conditioned by the Numpy RNG + seed = np.random.randint(10e8) + value = tf.compat.v1.random_normal_initializer( + mean, scale, dtype=tf_dtype, seed=seed + )(shape) + return variable(value, dtype=dtype, name=name) + + +@keras_export("keras._legacy.backend.random_uniform") +def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): + """DEPRECATED.""" + if dtype is None: + dtype = backend.floatx() + if seed is None: + seed = np.random.randint(10e6) + return tf.random.uniform( + shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed + ) + + +@keras_export("keras._legacy.backend.random_uniform_variable") +def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None): + """DEPRECATED.""" + if dtype is None: + dtype = backend.floatx() + tf_dtype = tf.as_dtype(dtype) + if seed is None: + # ensure that randomness is conditioned by the Numpy RNG + seed = np.random.randint(10e8) + value = tf.compat.v1.random_uniform_initializer( + low, high, dtype=tf_dtype, seed=seed + )(shape) + return variable(value, dtype=dtype, name=name) + + +@keras_export("keras._legacy.backend.reshape") +def reshape(x, shape): + """DEPRECATED.""" + return tf.reshape(x, shape) + + +@keras_export("keras._legacy.backend.relu") +def relu(x, alpha=0.0, max_value=None, threshold=0.0): + """DEPRECATED.""" + # While x can be a tensor or variable, we also see cases where + # numpy arrays, lists, tuples are passed as well. + # lists, tuples do not have 'dtype' attribute. + dtype = getattr(x, "dtype", backend.floatx()) + if alpha != 0.0: + if max_value is None and threshold == 0: + return tf.nn.leaky_relu(x, alpha=alpha) + + if threshold != 0: + negative_part = tf.nn.relu(-x + threshold) + else: + negative_part = tf.nn.relu(-x) + else: + negative_part = 1 + + clip_max = max_value is not None + + if threshold != 0: + # computes x for x > threshold else 0 + x = x * tf.cast(tf.greater(x, threshold), dtype=dtype) + elif max_value == 6: + # if no threshold, then can use nn.relu6 native TF op for performance + x = tf.nn.relu6(x) + clip_max = False + else: + x = tf.nn.relu(x) + + if clip_max: + max_value = tf.convert_to_tensor(max_value, dtype=x.dtype) + zero = tf.convert_to_tensor(0, dtype=x.dtype) + x = tf.clip_by_value(x, zero, max_value) + + if alpha != 0.0: + alpha = tf.convert_to_tensor(alpha, dtype=x.dtype) + x -= alpha * negative_part + return x + + +@keras_export("keras._legacy.backend.repeat") +def repeat(x, n): + """DEPRECATED.""" + assert ndim(x) == 2 + x = tf.expand_dims(x, 1) + pattern = tf.stack([1, n, 1]) + return tf.tile(x, pattern) + + +@keras_export("keras._legacy.backend.repeat_elements") +def repeat_elements(x, rep, axis): + """DEPRECATED.""" + x_shape = x.shape.as_list() + # For static axis + if x_shape[axis] is not None: + # slices along the repeat axis + splits = tf.split(value=x, num_or_size_splits=x_shape[axis], axis=axis) + # repeat each slice the given number of reps + x_rep = [s for s in splits for _ in range(rep)] + return concatenate(x_rep, axis) + + # Here we use tf.tile to mimic behavior of np.repeat so that + # we can handle dynamic shapes (that include None). + # To do that, we need an auxiliary axis to repeat elements along + # it and then merge them along the desired axis. + + # Repeating + auxiliary_axis = axis + 1 + x_shape = tf.shape(x) + x_rep = tf.expand_dims(x, axis=auxiliary_axis) + reps = np.ones(len(x.shape) + 1) + reps[auxiliary_axis] = rep + x_rep = tf.tile(x_rep, reps) + + # Merging + reps = np.delete(reps, auxiliary_axis) + reps[axis] = rep + reps = tf.constant(reps, dtype="int32") + x_shape *= reps + x_rep = tf.reshape(x_rep, x_shape) + + # Fix shape representation + x_shape = x.shape.as_list() + x_rep.set_shape(x_shape) + return x_rep + + +@keras_export("keras._legacy.backend.resize_images") +def resize_images( + x, height_factor, width_factor, data_format, interpolation="nearest" +): + """DEPRECATED.""" + if data_format == "channels_first": + rows, cols = 2, 3 + elif data_format == "channels_last": + rows, cols = 1, 2 + else: + raise ValueError(f"Invalid `data_format` argument: {data_format}") + + new_shape = x.shape[rows : cols + 1] + if new_shape.is_fully_defined(): + new_shape = tf.constant(new_shape.as_list(), dtype="int32") + else: + new_shape = tf.shape(x)[rows : cols + 1] + new_shape *= tf.constant( + np.array([height_factor, width_factor], dtype="int32") + ) + + if data_format == "channels_first": + x = permute_dimensions(x, [0, 2, 3, 1]) + interpolations = { + "area": tf.image.ResizeMethod.AREA, + "bicubic": tf.image.ResizeMethod.BICUBIC, + "bilinear": tf.image.ResizeMethod.BILINEAR, + "gaussian": tf.image.ResizeMethod.GAUSSIAN, + "lanczos3": tf.image.ResizeMethod.LANCZOS3, + "lanczos5": tf.image.ResizeMethod.LANCZOS5, + "mitchellcubic": tf.image.ResizeMethod.MITCHELLCUBIC, + "nearest": tf.image.ResizeMethod.NEAREST_NEIGHBOR, + } + interploations_list = '"' + '", "'.join(interpolations.keys()) + '"' + if interpolation in interpolations: + x = tf.image.resize(x, new_shape, method=interpolations[interpolation]) + else: + raise ValueError( + "`interpolation` argument should be one of: " + f'{interploations_list}. Received: "{interpolation}".' + ) + if data_format == "channels_first": + x = permute_dimensions(x, [0, 3, 1, 2]) + + return x + + +@keras_export("keras._legacy.backend.resize_volumes") +def resize_volumes(x, depth_factor, height_factor, width_factor, data_format): + """DEPRECATED.""" + if data_format == "channels_first": + output = repeat_elements(x, depth_factor, axis=2) + output = repeat_elements(output, height_factor, axis=3) + output = repeat_elements(output, width_factor, axis=4) + return output + elif data_format == "channels_last": + output = repeat_elements(x, depth_factor, axis=1) + output = repeat_elements(output, height_factor, axis=2) + output = repeat_elements(output, width_factor, axis=3) + return output + else: + raise ValueError(f"Invalid data_format: {data_format}") + + +@keras_export("keras._legacy.backend.reverse") +def reverse(x, axes): + """DEPRECATED.""" + if isinstance(axes, int): + axes = [axes] + return tf.reverse(x, axes) + + +@keras_export("keras._legacy.backend.rnn") +def rnn( + step_function, + inputs, + initial_states, + go_backwards=False, + mask=None, + constants=None, + unroll=False, + input_length=None, + time_major=False, + zero_output_for_mask=False, + return_all_outputs=True, +): + """DEPRECATED.""" + if not tf.__internal__.tf2.enabled(): + return_all_outputs = True # Not supported in TF1. + + def swap_batch_timestep(input_t): + # Swap the batch and timestep dim for the incoming tensor. + axes = list(range(len(input_t.shape))) + axes[0], axes[1] = 1, 0 + return tf.transpose(input_t, axes) + + if not time_major: + inputs = tf.nest.map_structure(swap_batch_timestep, inputs) + + flatted_inputs = tf.nest.flatten(inputs) + time_steps = flatted_inputs[0].shape[0] + batch = flatted_inputs[0].shape[1] + time_steps_t = tf.shape(flatted_inputs[0])[0] + + for input_ in flatted_inputs: + input_.shape.with_rank_at_least(3) + + if mask is not None: + if mask.dtype != tf.bool: + mask = tf.cast(mask, tf.bool) + if len(mask.shape) == 2: + mask = expand_dims(mask) + if not time_major: + mask = swap_batch_timestep(mask) + + if constants is None: + constants = [] + + # tf.where needs its condition tensor to be the same shape as its two + # result tensors, but in our case the condition (mask) tensor is + # (nsamples, 1), and inputs are (nsamples, ndimensions) or even more. + # So we need to broadcast the mask to match the shape of inputs. + # That's what the tile call does, it just repeats the mask along its + # second dimension n times. + def _expand_mask(mask_t, input_t, fixed_dim=1): + if tf.nest.is_nested(mask_t): + raise ValueError( + f"mask_t is expected to be tensor, but got {mask_t}" + ) + if tf.nest.is_nested(input_t): + raise ValueError( + f"input_t is expected to be tensor, but got {input_t}" + ) + rank_diff = len(input_t.shape) - len(mask_t.shape) + for _ in range(rank_diff): + mask_t = tf.expand_dims(mask_t, -1) + multiples = [1] * fixed_dim + input_t.shape.as_list()[fixed_dim:] + return tf.tile(mask_t, multiples) + + if unroll: + if not time_steps: + raise ValueError("Unrolling requires a fixed number of timesteps.") + states = tuple(initial_states) + successive_states = [] + successive_outputs = [] + + # Process the input tensors. The input tensor need to be split on the + # time_step dim, and reverse if go_backwards is True. In the case of + # nested input, the input is flattened and then transformed + # individually. The result of this will be a tuple of lists, each of + # the item in tuple is list of the tensor with shape (batch, feature) + def _process_single_input_t(input_t): + input_t = tf.unstack(input_t) # unstack for time_step dim + if go_backwards: + input_t.reverse() + return input_t + + if tf.nest.is_nested(inputs): + processed_input = tf.nest.map_structure( + _process_single_input_t, inputs + ) + else: + processed_input = (_process_single_input_t(inputs),) + + def _get_input_tensor(time): + inp = [t_[time] for t_ in processed_input] + return tf.nest.pack_sequence_as(inputs, inp) + + if mask is not None: + mask_list = tf.unstack(mask) + if go_backwards: + mask_list.reverse() + + for i in range(time_steps): + inp = _get_input_tensor(i) + mask_t = mask_list[i] + output, new_states = step_function( + inp, tuple(states) + tuple(constants) + ) + tiled_mask_t = _expand_mask(mask_t, output) + + if not successive_outputs: + prev_output = zeros_like(output) + else: + prev_output = successive_outputs[-1] + + output = tf.where(tiled_mask_t, output, prev_output) + + flat_states = tf.nest.flatten(states) + flat_new_states = tf.nest.flatten(new_states) + tiled_mask_t = tuple( + _expand_mask(mask_t, s) for s in flat_states + ) + flat_final_states = tuple( + tf.where(m, s, ps) + for m, s, ps in zip( + tiled_mask_t, flat_new_states, flat_states + ) + ) + states = tf.nest.pack_sequence_as(states, flat_final_states) + + if return_all_outputs: + successive_outputs.append(output) + successive_states.append(states) + else: + successive_outputs = [output] + successive_states = [states] + last_output = successive_outputs[-1] + new_states = successive_states[-1] + outputs = tf.stack(successive_outputs) + + if zero_output_for_mask: + last_output = tf.where( + _expand_mask(mask_list[-1], last_output), + last_output, + zeros_like(last_output), + ) + outputs = tf.where( + _expand_mask(mask, outputs, fixed_dim=2), + outputs, + zeros_like(outputs), + ) + + else: # mask is None + for i in range(time_steps): + inp = _get_input_tensor(i) + output, states = step_function( + inp, tuple(states) + tuple(constants) + ) + if return_all_outputs: + successive_outputs.append(output) + successive_states.append(states) + else: + successive_outputs = [output] + successive_states = [states] + last_output = successive_outputs[-1] + new_states = successive_states[-1] + outputs = tf.stack(successive_outputs) + + else: # Unroll == False + states = tuple(initial_states) + + # Create input tensor array, if the inputs is nested tensors, then it + # will be flattened first, and tensor array will be created one per + # flattened tensor. + input_ta = tuple( + tf.TensorArray( + dtype=inp.dtype, + size=time_steps_t, + tensor_array_name=f"input_ta_{i}", + ) + for i, inp in enumerate(flatted_inputs) + ) + input_ta = tuple( + ( + ta.unstack(input_) + if not go_backwards + else ta.unstack(reverse(input_, 0)) + ) + for ta, input_ in zip(input_ta, flatted_inputs) + ) + + # Get the time(0) input and compute the output for that, the output will + # be used to determine the dtype of output tensor array. Don't read from + # input_ta due to TensorArray clear_after_read default to True. + input_time_zero = tf.nest.pack_sequence_as( + inputs, [inp[0] for inp in flatted_inputs] + ) + # output_time_zero is used to determine the cell output shape and its + # dtype. the value is discarded. + output_time_zero, _ = step_function( + input_time_zero, tuple(initial_states) + tuple(constants) + ) + + output_ta_size = time_steps_t if return_all_outputs else 1 + output_ta = tuple( + tf.TensorArray( + dtype=out.dtype, + size=output_ta_size, + element_shape=out.shape, + tensor_array_name=f"output_ta_{i}", + ) + for i, out in enumerate(tf.nest.flatten(output_time_zero)) + ) + + time = tf.constant(0, dtype="int32", name="time") + + if input_length is None: + max_iterations = time_steps_t + else: + max_iterations = tf.reduce_max(input_length) + + while_loop_kwargs = { + "cond": lambda time, *_: time < time_steps_t, + "maximum_iterations": max_iterations, + "parallel_iterations": 32, + "swap_memory": True, + } + if mask is not None: + if go_backwards: + mask = reverse(mask, 0) + + mask_ta = tf.TensorArray( + dtype=tf.bool, size=time_steps_t, tensor_array_name="mask_ta" + ) + mask_ta = mask_ta.unstack(mask) + + def masking_fn(time): + return mask_ta.read(time) + + def compute_masked_output(mask_t, flat_out, flat_mask): + tiled_mask_t = tuple( + _expand_mask(mask_t, o, fixed_dim=len(mask_t.shape)) + for o in flat_out + ) + return tuple( + tf.where(m, o, fm) + for m, o, fm in zip(tiled_mask_t, flat_out, flat_mask) + ) + + elif isinstance(input_length, tf.Tensor): + if go_backwards: + max_len = tf.reduce_max(input_length, axis=0) + rev_input_length = tf.subtract(max_len - 1, input_length) + + def masking_fn(time): + return tf.less(rev_input_length, time) + + else: + + def masking_fn(time): + return tf.greater(input_length, time) + + def compute_masked_output(mask_t, flat_out, flat_mask): + return tuple( + tf.compat.v1.where(mask_t, o, zo) + for (o, zo) in zip(flat_out, flat_mask) + ) + + else: + masking_fn = None + + if masking_fn is not None: + # Mask for the T output will be base on the output of T - 1. In the + # case T = 0, a zero filled tensor will be used. + flat_zero_output = tuple( + tf.zeros_like(o) for o in tf.nest.flatten(output_time_zero) + ) + + def _step(time, output_ta_t, prev_output, *states): + """RNN step function. + + Args: + time: Current timestep value. + output_ta_t: TensorArray. + prev_output: tuple of outputs from time - 1. + *states: List of states. + + Returns: + Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)` + """ + current_input = tuple(ta.read(time) for ta in input_ta) + # maybe set shape. + current_input = tf.nest.pack_sequence_as(inputs, current_input) + mask_t = masking_fn(time) + output, new_states = step_function( + current_input, tuple(states) + tuple(constants) + ) + # mask output + flat_output = tf.nest.flatten(output) + flat_mask_output = ( + flat_zero_output + if zero_output_for_mask + else tf.nest.flatten(prev_output) + ) + flat_new_output = compute_masked_output( + mask_t, flat_output, flat_mask_output + ) + + # mask states + flat_state = tf.nest.flatten(states) + flat_new_state = tf.nest.flatten(new_states) + for state, new_state in zip(flat_state, flat_new_state): + if isinstance(new_state, tf.Tensor): + new_state.set_shape(state.shape) + flat_final_state = compute_masked_output( + mask_t, flat_new_state, flat_state + ) + new_states = tf.nest.pack_sequence_as( + new_states, flat_final_state + ) + + ta_index_to_write = time if return_all_outputs else 0 + output_ta_t = tuple( + ta.write(ta_index_to_write, out) + for ta, out in zip(output_ta_t, flat_new_output) + ) + + return (time + 1, output_ta_t, tuple(flat_new_output)) + tuple( + new_states + ) + + final_outputs = tf.compat.v1.while_loop( + body=_step, + loop_vars=(time, output_ta, flat_zero_output) + states, + **while_loop_kwargs, + ) + # Skip final_outputs[2] which is the output for final timestep. + new_states = final_outputs[3:] + else: + + def _step(time, output_ta_t, *states): + """RNN step function. + + Args: + time: Current timestep value. + output_ta_t: TensorArray. + *states: List of states. + + Returns: + Tuple: `(time + 1,output_ta_t) + tuple(new_states)` + """ + current_input = tuple(ta.read(time) for ta in input_ta) + current_input = tf.nest.pack_sequence_as(inputs, current_input) + output, new_states = step_function( + current_input, tuple(states) + tuple(constants) + ) + flat_state = tf.nest.flatten(states) + flat_new_state = tf.nest.flatten(new_states) + for state, new_state in zip(flat_state, flat_new_state): + if isinstance(new_state, tf.Tensor): + new_state.set_shape(state.shape) + + flat_output = tf.nest.flatten(output) + ta_index_to_write = time if return_all_outputs else 0 + output_ta_t = tuple( + ta.write(ta_index_to_write, out) + for ta, out in zip(output_ta_t, flat_output) + ) + + new_states = tf.nest.pack_sequence_as( + initial_states, flat_new_state + ) + return (time + 1, output_ta_t) + tuple(new_states) + + final_outputs = tf.compat.v1.while_loop( + body=_step, + loop_vars=(time, output_ta) + states, + **while_loop_kwargs, + ) + new_states = final_outputs[2:] + + output_ta = final_outputs[1] + + outputs = tuple(o.stack() for o in output_ta) + last_output = tuple(o[-1] for o in outputs) + + outputs = tf.nest.pack_sequence_as(output_time_zero, outputs) + last_output = tf.nest.pack_sequence_as(output_time_zero, last_output) + + # static shape inference + def set_shape(output_): + if isinstance(output_, tf.Tensor): + shape = output_.shape.as_list() + if return_all_outputs: + shape[0] = time_steps + else: + shape[0] = 1 + shape[1] = batch + output_.set_shape(shape) + return output_ + + outputs = tf.nest.map_structure(set_shape, outputs) + + if not time_major: + outputs = tf.nest.map_structure(swap_batch_timestep, outputs) + + return last_output, outputs, new_states + + +@keras_export("keras._legacy.backend.round") +def round(x): + """DEPRECATED.""" + return tf.round(x) + + +@keras_export("keras._legacy.backend.separable_conv2d") +def separable_conv2d( + x, + depthwise_kernel, + pointwise_kernel, + strides=(1, 1), + padding="valid", + data_format=None, + dilation_rate=(1, 1), +): + """DEPRECATED.""" + if data_format is None: + data_format = backend.image_data_format() + if data_format not in {"channels_first", "channels_last"}: + raise ValueError(f"Unknown data_format: {data_format}") + if len(strides) != 2: + raise ValueError("`strides` must be a tuple of 2 integers.") + + x, tf_data_format = _preprocess_conv2d_input(x, data_format) + padding = _preprocess_padding(padding) + if not isinstance(strides, tuple): + strides = tuple(strides) + if tf_data_format == "NHWC": + strides = (1,) + strides + (1,) + else: + strides = (1, 1) + strides + + x = tf.nn.separable_conv2d( + x, + depthwise_kernel, + pointwise_kernel, + strides=strides, + padding=padding, + dilations=dilation_rate, + data_format=tf_data_format, + ) + if data_format == "channels_first" and tf_data_format == "NHWC": + x = tf.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW + return x + + +@keras_export("keras._legacy.backend.set_value") +def set_value(x, value): + """DEPRECATED.""" + value = np.asarray(value, dtype=x.dtype.name) + x.assign(value) + + +@keras_export("keras._legacy.backend.shape") +def shape(x): + """DEPRECATED.""" + return tf.shape(x) + + +@keras_export("keras._legacy.backend.sigmoid") +def sigmoid(x): + """DEPRECATED.""" + output = tf.sigmoid(x) + return output + + +@keras_export("keras._legacy.backend.sign") +def sign(x): + """DEPRECATED.""" + return tf.sign(x) + + +@keras_export("keras._legacy.backend.sin") +def sin(x): + """DEPRECATED.""" + return tf.sin(x) + + +@keras_export("keras._legacy.backend.softmax") +def softmax(x, axis=-1): + """DEPRECATED.""" + if x.shape.rank <= 1: + raise ValueError( + f"Cannot apply softmax to a tensor that is 1D. Received input: {x}" + ) + + if isinstance(axis, int): + output = tf.nn.softmax(x, axis=axis) + else: + # nn.softmax does not support tuple axis. + numerator = tf.exp(x - tf.reduce_max(x, axis=axis, keepdims=True)) + denominator = tf.reduce_sum(numerator, axis=axis, keepdims=True) + output = numerator / denominator + + # Cache the logits to use for crossentropy loss. + output._keras_logits = x + return output + + +@keras_export("keras._legacy.backend.softplus") +def softplus(x): + """DEPRECATED.""" + return tf.math.softplus(x) + + +@keras_export("keras._legacy.backend.softsign") +def softsign(x): + """DEPRECATED.""" + return tf.math.softsign(x) + + +@keras_export("keras._legacy.backend.sparse_categorical_crossentropy") +def sparse_categorical_crossentropy( + target, output, from_logits=False, axis=-1, ignore_class=None +): + """DEPRECATED.""" + target = tf.convert_to_tensor(target) + output = tf.convert_to_tensor(output) + + target = cast(target, "int64") + + if not from_logits: + epsilon_ = tf.convert_to_tensor(backend.epsilon(), output.dtype) + output = tf.clip_by_value(output, epsilon_, 1 - epsilon_) + output = tf.math.log(output) + + # Permute output so that the last axis contains the logits/probabilities. + if isinstance(output.shape, (tuple, list)): + output_rank = len(output.shape) + else: + output_rank = output.shape.ndims + if output_rank is not None: + axis %= output_rank + if axis != output_rank - 1: + permutation = list( + itertools.chain( + range(axis), range(axis + 1, output_rank), [axis] + ) + ) + output = tf.transpose(output, perm=permutation) + elif axis != -1: + raise ValueError( + "Cannot compute sparse categorical crossentropy with `axis={}` " + "on an output tensor with unknown rank".format(axis) + ) + + # Try to adjust the shape so that rank of labels = rank of logits - 1. + output_shape = tf.shape(output) + target_rank = target.shape.ndims + + update_shape = ( + target_rank is not None + and output_rank is not None + and target_rank != output_rank - 1 + ) + if update_shape: + target = flatten(target) + output = tf.reshape(output, [-1, output_shape[-1]]) + + if ignore_class is not None: + valid_mask = tf.not_equal(target, cast(ignore_class, target.dtype)) + target = target[valid_mask] + output = output[valid_mask] + + res = tf.nn.sparse_softmax_cross_entropy_with_logits( + labels=target, logits=output + ) + + if ignore_class is not None: + res_shape = cast(output_shape[:-1], "int64") + valid_mask = tf.reshape(valid_mask, res_shape) + res = tf.scatter_nd(tf.where(valid_mask), res, res_shape) + res._keras_mask = valid_mask + + return res + + if update_shape and output_rank >= 3: + # If our output includes timesteps or + # spatial dimensions we need to reshape + res = tf.reshape(res, output_shape[:-1]) + + return res + + +@keras_export("keras._legacy.backend.spatial_2d_padding") +def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None): + """DEPRECATED.""" + assert len(padding) == 2 + assert len(padding[0]) == 2 + assert len(padding[1]) == 2 + if data_format is None: + data_format = backend.image_data_format() + if data_format not in {"channels_first", "channels_last"}: + raise ValueError(f"Unknown data_format: {data_format}") + + if data_format == "channels_first": + pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])] + else: + pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]] + return tf.compat.v1.pad(x, pattern) + + +@keras_export("keras._legacy.backend.spatial_3d_padding") +def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None): + """DEPRECATED.""" + assert len(padding) == 3 + assert len(padding[0]) == 2 + assert len(padding[1]) == 2 + assert len(padding[2]) == 2 + if data_format is None: + data_format = backend.image_data_format() + if data_format not in {"channels_first", "channels_last"}: + raise ValueError(f"Unknown data_format: {data_format}") + + if data_format == "channels_first": + pattern = [ + [0, 0], + [0, 0], + [padding[0][0], padding[0][1]], + [padding[1][0], padding[1][1]], + [padding[2][0], padding[2][1]], + ] + else: + pattern = [ + [0, 0], + [padding[0][0], padding[0][1]], + [padding[1][0], padding[1][1]], + [padding[2][0], padding[2][1]], + [0, 0], + ] + return tf.compat.v1.pad(x, pattern) + + +@keras_export("keras._legacy.backend.sqrt") +def sqrt(x): + """DEPRECATED.""" + zero = tf.convert_to_tensor(0.0, x.dtype) + x = tf.maximum(x, zero) + return tf.sqrt(x) + + +@keras_export("keras._legacy.backend.square") +def square(x): + """DEPRECATED.""" + return tf.square(x) + + +@keras_export("keras._legacy.backend.squeeze") +def squeeze(x, axis): + """DEPRECATED.""" + return tf.squeeze(x, [axis]) + + +@keras_export("keras._legacy.backend.stack") +def stack(x, axis=0): + """DEPRECATED.""" + return tf.stack(x, axis=axis) + + +@keras_export("keras._legacy.backend.std") +def std(x, axis=None, keepdims=False): + """DEPRECATED.""" + if x.dtype.base_dtype == tf.bool: + x = tf.cast(x, backend.floatx()) + return tf.math.reduce_std(x, axis=axis, keepdims=keepdims) + + +@keras_export("keras._legacy.backend.stop_gradient") +def stop_gradient(variables): + """DEPRECATED.""" + if isinstance(variables, (list, tuple)): + return map(tf.stop_gradient, variables) + return tf.stop_gradient(variables) + + +@keras_export("keras._legacy.backend.sum") +def sum(x, axis=None, keepdims=False): + """DEPRECATED.""" + return tf.reduce_sum(x, axis, keepdims) + + +@keras_export("keras._legacy.backend.switch") +def switch(condition, then_expression, else_expression): + """DEPRECATED.""" + if condition.dtype != tf.bool: + condition = tf.cast(condition, "bool") + cond_ndim = ndim(condition) + if not cond_ndim: + if not callable(then_expression): + + def then_expression_fn(): + return then_expression + + else: + then_expression_fn = then_expression + if not callable(else_expression): + + def else_expression_fn(): + return else_expression + + else: + else_expression_fn = else_expression + x = tf.compat.v1.cond(condition, then_expression_fn, else_expression_fn) + else: + # tf.where needs its condition tensor + # to be the same shape as its two + # result tensors + if callable(then_expression): + then_expression = then_expression() + if callable(else_expression): + else_expression = else_expression() + expr_ndim = ndim(then_expression) + if cond_ndim > expr_ndim: + raise ValueError( + "Rank of `condition` should be less than or" + " equal to rank of `then_expression` and " + "`else_expression`. ndim(condition)=" + + str(cond_ndim) + + ", ndim(then_expression)=" + + str(expr_ndim) + ) + if cond_ndim > 1: + ndim_diff = expr_ndim - cond_ndim + cond_shape = tf.concat( + [tf.shape(condition), [1] * ndim_diff], axis=0 + ) + condition = tf.reshape(condition, cond_shape) + expr_shape = tf.shape(then_expression) + shape_diff = expr_shape - cond_shape + tile_shape = tf.where( + shape_diff > 0, expr_shape, tf.ones_like(expr_shape) + ) + condition = tf.tile(condition, tile_shape) + x = tf.where(condition, then_expression, else_expression) + return x + + +@keras_export("keras._legacy.backend.tanh") +def tanh(x): + """DEPRECATED.""" + return tf.tanh(x) + + +@keras_export("keras._legacy.backend.temporal_padding") +def temporal_padding(x, padding=(1, 1)): + """DEPRECATED.""" + assert len(padding) == 2 + pattern = [[0, 0], [padding[0], padding[1]], [0, 0]] + return tf.compat.v1.pad(x, pattern) + + +@keras_export("keras._legacy.backend.tile") +def tile(x, n): + """DEPRECATED.""" + if isinstance(n, int): + n = [n] + return tf.tile(x, n) + + +@keras_export("keras._legacy.backend.to_dense") +def to_dense(tensor): + """DEPRECATED.""" + if is_sparse(tensor): + return tf.sparse.to_dense(tensor) + else: + return tensor + + +@keras_export("keras._legacy.backend.transpose") +def transpose(x): + """DEPRECATED.""" + return tf.transpose(x) + + +@keras_export("keras._legacy.backend.truncated_normal") +def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): + """DEPRECATED.""" + if dtype is None: + dtype = backend.floatx() + if seed is None: + seed = np.random.randint(10e6) + return tf.random.truncated_normal( + shape, mean, stddev, dtype=dtype, seed=seed + ) + + +@keras_export("keras._legacy.backend.update") +def update(x, new_x): + """DEPRECATED.""" + return tf.compat.v1.assign(x, new_x) + + +@keras_export("keras._legacy.backend.update_add") +def update_add(x, increment): + """DEPRECATED.""" + return tf.compat.v1.assign_add(x, increment) + + +@keras_export("keras._legacy.backend.update_sub") +def update_sub(x, decrement): + """DEPRECATED.""" + return tf.compat.v1.assign_sub(x, decrement) + + +@keras_export("keras._legacy.backend.var") +def var(x, axis=None, keepdims=False): + """DEPRECATED.""" + if x.dtype.base_dtype == tf.bool: + x = tf.cast(x, backend.floatx()) + return tf.math.reduce_variance(x, axis=axis, keepdims=keepdims) + + +@keras_export("keras._legacy.backend.variable") +def variable(value, dtype=None, name=None, constraint=None): + """DEPRECATED.""" + if dtype is None: + dtype = backend.floatx() + if hasattr(value, "tocoo"): + sparse_coo = value.tocoo() + indices = np.concatenate( + ( + np.expand_dims(sparse_coo.row, 1), + np.expand_dims(sparse_coo.col, 1), + ), + 1, + ) + v = tf.SparseTensor( + indices=indices, + values=sparse_coo.data, + dense_shape=sparse_coo.shape, + ) + v._keras_shape = sparse_coo.shape + return v + v = tf.Variable( + value, dtype=tf.as_dtype(dtype), name=name, constraint=constraint + ) + return v + + +@keras_export("keras._legacy.backend.zeros") +def zeros(shape, dtype=None, name=None): + """DEPRECATED.""" + with tf.init_scope(): + if dtype is None: + dtype = backend.floatx() + tf_dtype = tf.as_dtype(dtype) + v = tf.zeros(shape=shape, dtype=tf_dtype, name=name) + if py_all(v.shape.as_list()): + return variable(v, dtype=dtype, name=name) + return v + + +@keras_export("keras._legacy.backend.zeros_like") +def zeros_like(x, dtype=None, name=None): + """DEPRECATED.""" + return tf.zeros_like(x, dtype=dtype, name=name) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/layers.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..b51ecf86c75108812278d6c5c47e847f075026dd --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/layers.py @@ -0,0 +1,244 @@ +"""Legacy Keras 1/2 layers. + +AlphaDropout +RandomHeight +RandomWidth +ThresholdedReLU +""" + +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.utils.module_utils import tensorflow as tf + + +@keras_export("keras._legacy.layers.AlphaDropout") +class AlphaDropout(Layer): + """DEPRECATED.""" + + def __init__(self, rate, noise_shape=None, seed=None, **kwargs): + super().__init__(**kwargs) + self.rate = rate + self.seed = seed + self.noise_shape = noise_shape + self.seed_generator = backend.random.SeedGenerator(seed) + self.supports_masking = True + self.built = True + + def call(self, inputs, training=False): + if training and self.rate > 0: + alpha = 1.6732632423543772848170429916717 + scale = 1.0507009873554804934193349852946 + alpha_p = -alpha * scale + + if self.noise_shape is None: + noise_shape = tf.shape(inputs) + else: + noise_shape = self.noise_shape + kept_idx = tf.greater_equal( + backend.random.uniform(noise_shape, seed=self.seed_generator), + self.rate, + ) + kept_idx = tf.cast(kept_idx, inputs.dtype) + + # Get affine transformation params + a = ((1 - self.rate) * (1 + self.rate * alpha_p**2)) ** -0.5 + b = -a * alpha_p * self.rate + + # Apply mask + x = inputs * kept_idx + alpha_p * (1 - kept_idx) + + # Do affine transformation + return a * x + b + return inputs + + def get_config(self): + config = {"rate": self.rate, "seed": self.seed} + base_config = super().get_config() + return {**base_config, **config} + + def compute_output_shape(self, input_shape): + return input_shape + + +@keras_export("keras._legacy.layers.RandomHeight") +class RandomHeight(Layer): + """DEPRECATED.""" + + def __init__(self, factor, interpolation="bilinear", seed=None, **kwargs): + super().__init__(**kwargs) + self.seed_generator = backend.random.SeedGenerator(seed) + self.factor = factor + if isinstance(factor, (tuple, list)): + self.height_lower = factor[0] + self.height_upper = factor[1] + else: + self.height_lower = -factor + self.height_upper = factor + + if self.height_upper < self.height_lower: + raise ValueError( + "`factor` argument cannot have an upper bound lesser than the " + f"lower bound. Received: factor={factor}" + ) + if self.height_lower < -1.0 or self.height_upper < -1.0: + raise ValueError( + "`factor` argument must have values larger than -1. " + f"Received: factor={factor}" + ) + self.interpolation = interpolation + self.seed = seed + + def call(self, inputs, training=True): + inputs = tf.convert_to_tensor(inputs, dtype=self.compute_dtype) + + def random_height_inputs(inputs): + """Inputs height-adjusted with random ops.""" + inputs_shape = tf.shape(inputs) + img_hd = tf.cast(inputs_shape[-3], tf.float32) + img_wd = inputs_shape[-2] + height_factor = backend.random.uniform( + shape=[], + minval=(1.0 + self.height_lower), + maxval=(1.0 + self.height_upper), + seed=self.seed_generator, + ) + adjusted_height = tf.cast(height_factor * img_hd, tf.int32) + adjusted_size = tf.stack([adjusted_height, img_wd]) + output = tf.image.resize( + images=inputs, + size=adjusted_size, + method=self.interpolation, + ) + # tf.resize will output float32 regardless of input type. + output = tf.cast(output, self.compute_dtype) + output_shape = inputs.shape.as_list() + output_shape[-3] = None + output.set_shape(output_shape) + return output + + if training: + return random_height_inputs(inputs) + else: + return inputs + + def compute_output_shape(self, input_shape): + input_shape = list(input_shape) + input_shape[-3] = None + return tuple(input_shape) + + def get_config(self): + config = { + "factor": self.factor, + "interpolation": self.interpolation, + "seed": self.seed, + } + base_config = super().get_config() + return {**base_config, **config} + + +@keras_export("keras._legacy.layers.RandomWidth") +class RandomWidth(Layer): + """DEPRECATED.""" + + def __init__(self, factor, interpolation="bilinear", seed=None, **kwargs): + super().__init__(**kwargs) + self.seed_generator = backend.random.SeedGenerator(seed) + self.factor = factor + if isinstance(factor, (tuple, list)): + self.width_lower = factor[0] + self.width_upper = factor[1] + else: + self.width_lower = -factor + self.width_upper = factor + if self.width_upper < self.width_lower: + raise ValueError( + "`factor` argument cannot have an upper bound less than the " + f"lower bound. Received: factor={factor}" + ) + if self.width_lower < -1.0 or self.width_upper < -1.0: + raise ValueError( + "`factor` argument must have values larger than -1. " + f"Received: factor={factor}" + ) + self.interpolation = interpolation + self.seed = seed + + def call(self, inputs, training=True): + inputs = tf.convert_to_tensor(inputs, dtype=self.compute_dtype) + + def random_width_inputs(inputs): + """Inputs width-adjusted with random ops.""" + inputs_shape = tf.shape(inputs) + img_hd = inputs_shape[-3] + img_wd = tf.cast(inputs_shape[-2], tf.float32) + width_factor = backend.random.uniform( + shape=[], + minval=(1.0 + self.width_lower), + maxval=(1.0 + self.width_upper), + seed=self.seed_generator, + ) + adjusted_width = tf.cast(width_factor * img_wd, tf.int32) + adjusted_size = tf.stack([img_hd, adjusted_width]) + output = tf.image.resize( + images=inputs, + size=adjusted_size, + method=self.interpolation, + ) + # tf.resize will output float32 regardless of input type. + output = tf.cast(output, self.compute_dtype) + output_shape = inputs.shape.as_list() + output_shape[-2] = None + output.set_shape(output_shape) + return output + + if training: + return random_width_inputs(inputs) + else: + return inputs + + def compute_output_shape(self, input_shape): + input_shape = list(input_shape) + input_shape[-2] = None + return tuple(input_shape) + + def get_config(self): + config = { + "factor": self.factor, + "interpolation": self.interpolation, + "seed": self.seed, + } + base_config = super().get_config() + return {**base_config, **config} + + +@keras_export("keras._legacy.layers.ThresholdedReLU") +class ThresholdedReLU(Layer): + """DEPRECATED.""" + + def __init__(self, theta=1.0, **kwargs): + super().__init__(**kwargs) + if theta is None: + raise ValueError( + "Theta of a Thresholded ReLU layer cannot be None, expecting a " + f"float. Received: {theta}" + ) + if theta < 0: + raise ValueError( + "The theta value of a Thresholded ReLU layer " + f"should be >=0. Received: {theta}" + ) + self.supports_masking = True + self.theta = tf.convert_to_tensor(theta, dtype=self.compute_dtype) + + def call(self, inputs): + dtype = self.compute_dtype + return inputs * tf.cast(tf.greater(inputs, self.theta), dtype) + + def get_config(self): + config = {"theta": float(self.theta)} + base_config = super().get_config() + return {**base_config, **config} + + def compute_output_shape(self, input_shape): + return input_shape diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/losses.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/losses.py new file mode 100644 index 0000000000000000000000000000000000000000..a84284bfc38d1f38a6d2a53f269dc18eac2feb61 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/losses.py @@ -0,0 +1,20 @@ +from keras.src.api_export import keras_export + + +@keras_export("keras._legacy.losses.Reduction") +class Reduction: + AUTO = "auto" + NONE = "none" + SUM = "sum" + SUM_OVER_BATCH_SIZE = "sum_over_batch_size" + + @classmethod + def all(cls): + return (cls.AUTO, cls.NONE, cls.SUM, cls.SUM_OVER_BATCH_SIZE) + + @classmethod + def validate(cls, key): + if key not in cls.all(): + raise ValueError( + f'Invalid Reduction Key: {key}. Expected keys are "{cls.all()}"' + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/preprocessing/image.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/preprocessing/image.py new file mode 100644 index 0000000000000000000000000000000000000000..4a0e8b44d395bc87106e289109592c97f0191d72 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/preprocessing/image.py @@ -0,0 +1,1892 @@ +"""Deprecated image preprocessing APIs from Keras 1.""" + +import collections +import multiprocessing +import os +import threading +import warnings + +import numpy as np + +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.trainers.data_adapters.py_dataset_adapter import PyDataset +from keras.src.utils import image_utils +from keras.src.utils import io_utils +from keras.src.utils.module_utils import scipy + + +@keras_export("keras._legacy.preprocessing.image.Iterator") +class Iterator(PyDataset): + """Base class for image data iterators. + + DEPRECATED. + + Every `Iterator` must implement the `_get_batches_of_transformed_samples` + method. + + Args: + n: Integer, total number of samples in the dataset to loop over. + batch_size: Integer, size of a batch. + shuffle: Boolean, whether to shuffle the data between epochs. + seed: Random seeding for data shuffling. + """ + + white_list_formats = ("png", "jpg", "jpeg", "bmp", "ppm", "tif", "tiff") + + def __init__(self, n, batch_size, shuffle, seed): + self.n = n + self.batch_size = batch_size + self.seed = seed + self.shuffle = shuffle + self.batch_index = 0 + self.total_batches_seen = 0 + self.lock = threading.Lock() + self.index_array = None + self.index_generator = self._flow_index() + + def _set_index_array(self): + self.index_array = np.arange(self.n) + if self.shuffle: + self.index_array = np.random.permutation(self.n) + + def __getitem__(self, idx): + if idx >= len(self): + raise ValueError( + "Asked to retrieve element {idx}, " + "but the Sequence " + "has length {length}".format(idx=idx, length=len(self)) + ) + if self.seed is not None: + np.random.seed(self.seed + self.total_batches_seen) + self.total_batches_seen += 1 + if self.index_array is None: + self._set_index_array() + index_array = self.index_array[ + self.batch_size * idx : self.batch_size * (idx + 1) + ] + return self._get_batches_of_transformed_samples(index_array) + + def __len__(self): + return (self.n + self.batch_size - 1) // self.batch_size # round up + + def on_epoch_end(self): + self._set_index_array() + + def reset(self): + self.batch_index = 0 + + def _flow_index(self): + # Ensure self.batch_index is 0. + self.reset() + while 1: + if self.seed is not None: + np.random.seed(self.seed + self.total_batches_seen) + if self.batch_index == 0: + self._set_index_array() + + if self.n == 0: + # Avoiding modulo by zero error + current_index = 0 + else: + current_index = (self.batch_index * self.batch_size) % self.n + if self.n > current_index + self.batch_size: + self.batch_index += 1 + else: + self.batch_index = 0 + self.total_batches_seen += 1 + yield self.index_array[ + current_index : current_index + self.batch_size + ] + + def __iter__(self): + # Needed if we want to do something like: + # for x, y in data_gen.flow(...): + return self + + def __next__(self): + with self.lock: + index_array = next(self.index_generator) + # The transformation of images is not under thread lock + # so it can be done in parallel + return self._get_batches_of_transformed_samples(index_array) + + def _get_batches_of_transformed_samples(self, index_array): + """Gets a batch of transformed samples. + + Args: + index_array: Array of sample indices to include in batch. + Returns: + A batch of transformed samples. + """ + raise NotImplementedError + + +def _iter_valid_files(directory, white_list_formats, follow_links): + """Iterates on files with extension. + + Args: + directory: Absolute path to the directory + containing files to be counted + white_list_formats: Set of strings containing allowed extensions for + the files to be counted. + follow_links: Boolean, follow symbolic links to subdirectories. + Yields: + Tuple of (root, filename) with extension in `white_list_formats`. + """ + + def _recursive_list(subpath): + return sorted( + os.walk(subpath, followlinks=follow_links), key=lambda x: x[0] + ) + + for root, _, files in _recursive_list(directory): + for fname in sorted(files): + if fname.lower().endswith(".tiff"): + warnings.warn( + 'Using ".tiff" files with multiple bands ' + "will cause distortion. Please verify your output." + ) + if fname.lower().endswith(white_list_formats): + yield root, fname + + +def _list_valid_filenames_in_directory( + directory, white_list_formats, split, class_indices, follow_links +): + """Lists paths of files in `subdir` with extensions in `white_list_formats`. + + Args: + directory: absolute path to a directory containing the files to list. + The directory name is used as class label + and must be a key of `class_indices`. + white_list_formats: set of strings containing allowed extensions for + the files to be counted. + split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into + account a certain fraction of files in each directory. + E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent + of images in each directory. + class_indices: dictionary mapping a class name to its index. + follow_links: boolean, follow symbolic links to subdirectories. + + Returns: + classes: a list of class indices + filenames: the path of valid files in `directory`, relative from + `directory`'s parent (e.g., if `directory` is "dataset/class1", + the filenames will be + `["class1/file1.jpg", "class1/file2.jpg", ...]`). + """ + dirname = os.path.basename(directory) + if split: + all_files = list( + _iter_valid_files(directory, white_list_formats, follow_links) + ) + num_files = len(all_files) + start, stop = int(split[0] * num_files), int(split[1] * num_files) + valid_files = all_files[start:stop] + else: + valid_files = _iter_valid_files( + directory, white_list_formats, follow_links + ) + classes = [] + filenames = [] + for root, fname in valid_files: + classes.append(class_indices[dirname]) + absolute_path = os.path.join(root, fname) + relative_path = os.path.join( + dirname, os.path.relpath(absolute_path, directory) + ) + filenames.append(relative_path) + + return classes, filenames + + +class BatchFromFilesMixin: + """Adds methods related to getting batches from filenames. + + It includes the logic to transform image files to batches. + """ + + def set_processing_attrs( + self, + image_data_generator, + target_size, + color_mode, + data_format, + save_to_dir, + save_prefix, + save_format, + subset, + interpolation, + keep_aspect_ratio, + ): + """Sets attributes to use later for processing files into a batch. + + Args: + image_data_generator: Instance of `ImageDataGenerator` + to use for random transformations and normalization. + target_size: tuple of integers, dimensions to resize input images + to. + color_mode: One of `"rgb"`, `"rgba"`, `"grayscale"`. + Color mode to read images. + data_format: String, one of `channels_first`, `channels_last`. + save_to_dir: Optional directory where to save the pictures + being yielded, in a viewable format. This is useful + for visualizing the random transformations being + applied, for debugging purposes. + save_prefix: String prefix to use for saving sample + images (if `save_to_dir` is set). + save_format: Format to use for saving sample images + (if `save_to_dir` is set). + subset: Subset of data (`"training"` or `"validation"`) if + validation_split is set in ImageDataGenerator. + interpolation: Interpolation method used to resample the image if + the target size is different from that of the loaded image. + Supported methods are "nearest", "bilinear", and "bicubic". If + PIL version 1.1.3 or newer is installed, "lanczos" is also + supported. If PIL version 3.4.0 or newer is installed, "box" and + "hamming" are also supported. By default, "nearest" is used. + keep_aspect_ratio: Boolean, whether to resize images to a target + size without aspect ratio distortion. The image is cropped in + the center with target aspect ratio before resizing. + """ + self.image_data_generator = image_data_generator + self.target_size = tuple(target_size) + self.keep_aspect_ratio = keep_aspect_ratio + if color_mode not in {"rgb", "rgba", "grayscale"}: + raise ValueError( + f"Invalid color mode: {color_mode}" + '; expected "rgb", "rgba", or "grayscale".' + ) + self.color_mode = color_mode + self.data_format = data_format + if self.color_mode == "rgba": + if self.data_format == "channels_last": + self.image_shape = self.target_size + (4,) + else: + self.image_shape = (4,) + self.target_size + elif self.color_mode == "rgb": + if self.data_format == "channels_last": + self.image_shape = self.target_size + (3,) + else: + self.image_shape = (3,) + self.target_size + else: + if self.data_format == "channels_last": + self.image_shape = self.target_size + (1,) + else: + self.image_shape = (1,) + self.target_size + self.save_to_dir = save_to_dir + self.save_prefix = save_prefix + self.save_format = save_format + self.interpolation = interpolation + if subset is not None: + validation_split = self.image_data_generator._validation_split + if subset == "validation": + split = (0, validation_split) + elif subset == "training": + split = (validation_split, 1) + else: + raise ValueError( + f"Invalid subset name: {subset};" + 'expected "training" or "validation"' + ) + else: + split = None + self.split = split + self.subset = subset + + def _get_batches_of_transformed_samples(self, index_array): + """Gets a batch of transformed samples. + + Args: + index_array: Array of sample indices to include in batch. + Returns: + A batch of transformed samples. + """ + batch_x = np.zeros( + (len(index_array),) + self.image_shape, dtype=self.dtype + ) + # build batch of image data + # self.filepaths is dynamic, is better to call it once outside the loop + filepaths = self.filepaths + for i, j in enumerate(index_array): + img = image_utils.load_img( + filepaths[j], + color_mode=self.color_mode, + target_size=self.target_size, + interpolation=self.interpolation, + keep_aspect_ratio=self.keep_aspect_ratio, + ) + x = image_utils.img_to_array(img, data_format=self.data_format) + # Pillow images should be closed after `load_img`, + # but not PIL images. + if hasattr(img, "close"): + img.close() + if self.image_data_generator: + params = self.image_data_generator.get_random_transform(x.shape) + x = self.image_data_generator.apply_transform(x, params) + x = self.image_data_generator.standardize(x) + batch_x[i] = x + # optionally save augmented images to disk for debugging purposes + if self.save_to_dir: + for i, j in enumerate(index_array): + img = image_utils.array_to_img( + batch_x[i], self.data_format, scale=True + ) + fname = "{prefix}_{index}_{hash}.{format}".format( + prefix=self.save_prefix, + index=j, + hash=np.random.randint(1e7), + format=self.save_format, + ) + img.save(os.path.join(self.save_to_dir, fname)) + # build batch of labels + if self.class_mode == "input": + batch_y = batch_x.copy() + elif self.class_mode in {"binary", "sparse"}: + batch_y = np.empty(len(batch_x), dtype=self.dtype) + for i, n_observation in enumerate(index_array): + batch_y[i] = self.classes[n_observation] + elif self.class_mode == "categorical": + batch_y = np.zeros( + (len(batch_x), len(self.class_indices)), dtype=self.dtype + ) + for i, n_observation in enumerate(index_array): + batch_y[i, self.classes[n_observation]] = 1.0 + elif self.class_mode == "multi_output": + batch_y = [output[index_array] for output in self.labels] + elif self.class_mode == "raw": + batch_y = self.labels[index_array] + else: + return batch_x + if self.sample_weight is None: + return batch_x, batch_y + else: + return batch_x, batch_y, self.sample_weight[index_array] + + @property + def filepaths(self): + """List of absolute paths to image files.""" + raise NotImplementedError( + "`filepaths` property method has not " + "been implemented in {}.".format(type(self).__name__) + ) + + @property + def labels(self): + """Class labels of every observation.""" + raise NotImplementedError( + "`labels` property method has not been implemented in {}.".format( + type(self).__name__ + ) + ) + + @property + def sample_weight(self): + raise NotImplementedError( + "`sample_weight` property method has not " + "been implemented in {}.".format(type(self).__name__) + ) + + +@keras_export("keras._legacy.preprocessing.image.DirectoryIterator") +class DirectoryIterator(BatchFromFilesMixin, Iterator): + """Iterator capable of reading images from a directory on disk. + + DEPRECATED. + """ + + allowed_class_modes = {"categorical", "binary", "sparse", "input", None} + + def __init__( + self, + directory, + image_data_generator, + target_size=(256, 256), + color_mode="rgb", + classes=None, + class_mode="categorical", + batch_size=32, + shuffle=True, + seed=None, + data_format=None, + save_to_dir=None, + save_prefix="", + save_format="png", + follow_links=False, + subset=None, + interpolation="nearest", + keep_aspect_ratio=False, + dtype=None, + ): + if data_format is None: + data_format = backend.image_data_format() + if dtype is None: + dtype = backend.floatx() + super().set_processing_attrs( + image_data_generator, + target_size, + color_mode, + data_format, + save_to_dir, + save_prefix, + save_format, + subset, + interpolation, + keep_aspect_ratio, + ) + self.directory = directory + self.classes = classes + if class_mode not in self.allowed_class_modes: + raise ValueError( + "Invalid class_mode: {}; expected one of: {}".format( + class_mode, self.allowed_class_modes + ) + ) + self.class_mode = class_mode + self.dtype = dtype + # First, count the number of samples and classes. + self.samples = 0 + + if not classes: + classes = [] + for subdir in sorted(os.listdir(directory)): + if os.path.isdir(os.path.join(directory, subdir)): + classes.append(subdir) + self.num_classes = len(classes) + self.class_indices = dict(zip(classes, range(len(classes)))) + + pool = multiprocessing.pool.ThreadPool() + + # Second, build an index of the images + # in the different class subfolders. + results = [] + self.filenames = [] + i = 0 + for dirpath in (os.path.join(directory, subdir) for subdir in classes): + results.append( + pool.apply_async( + _list_valid_filenames_in_directory, + ( + dirpath, + self.white_list_formats, + self.split, + self.class_indices, + follow_links, + ), + ) + ) + classes_list = [] + for res in results: + classes, filenames = res.get() + classes_list.append(classes) + self.filenames += filenames + self.samples = len(self.filenames) + self.classes = np.zeros((self.samples,), dtype="int32") + for classes in classes_list: + self.classes[i : i + len(classes)] = classes + i += len(classes) + + io_utils.print_msg( + f"Found {self.samples} images belonging to " + f"{self.num_classes} classes." + ) + pool.close() + pool.join() + self._filepaths = [ + os.path.join(self.directory, fname) for fname in self.filenames + ] + super().__init__(self.samples, batch_size, shuffle, seed) + + @property + def filepaths(self): + return self._filepaths + + @property + def labels(self): + return self.classes + + @property # mixin needs this property to work + def sample_weight(self): + # no sample weights will be returned + return None + + +@keras_export("keras._legacy.preprocessing.image.NumpyArrayIterator") +class NumpyArrayIterator(Iterator): + """Iterator yielding data from a Numpy array. + + DEPRECATED. + """ + + def __init__( + self, + x, + y, + image_data_generator, + batch_size=32, + shuffle=False, + sample_weight=None, + seed=None, + data_format=None, + save_to_dir=None, + save_prefix="", + save_format="png", + subset=None, + ignore_class_split=False, + dtype=None, + ): + if data_format is None: + data_format = backend.image_data_format() + if dtype is None: + dtype = backend.floatx() + self.dtype = dtype + if isinstance(x, tuple) or isinstance(x, list): + if not isinstance(x[1], list): + x_misc = [np.asarray(x[1])] + else: + x_misc = [np.asarray(xx) for xx in x[1]] + x = x[0] + for xx in x_misc: + if len(x) != len(xx): + raise ValueError( + "All of the arrays in `x` " + "should have the same length. " + "Found a pair with: " + f"len(x[0]) = {len(x)}, len(x[?]) = {len(xx)}" + ) + else: + x_misc = [] + + if y is not None and len(x) != len(y): + raise ValueError( + "`x` (images tensor) and `y` (labels) " + "should have the same length. " + f"Found: x.shape = {np.asarray(x).shape}, " + f"y.shape = {np.asarray(y).shape}" + ) + if sample_weight is not None and len(x) != len(sample_weight): + raise ValueError( + "`x` (images tensor) and `sample_weight` " + "should have the same length. " + f"Found: x.shape = {np.asarray(x).shape}, " + f"sample_weight.shape = {np.asarray(sample_weight).shape}" + ) + if subset is not None: + if subset not in {"training", "validation"}: + raise ValueError( + f"Invalid subset name: {subset}" + '; expected "training" or "validation".' + ) + split_idx = int(len(x) * image_data_generator._validation_split) + + if ( + y is not None + and not ignore_class_split + and not np.array_equal( + np.unique(y[:split_idx]), np.unique(y[split_idx:]) + ) + ): + raise ValueError( + "Training and validation subsets " + "have different number of classes after " + "the split. If your numpy arrays are " + "sorted by the label, you might want " + "to shuffle them." + ) + + if subset == "validation": + x = x[:split_idx] + x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc] + if y is not None: + y = y[:split_idx] + else: + x = x[split_idx:] + x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc] + if y is not None: + y = y[split_idx:] + + self.x = np.asarray(x, dtype=self.dtype) + self.x_misc = x_misc + if self.x.ndim != 4: + raise ValueError( + "Input data in `NumpyArrayIterator` " + "should have rank 4. You passed an array " + f"with shape {self.x.shape}" + ) + channels_axis = 3 if data_format == "channels_last" else 1 + if self.x.shape[channels_axis] not in {1, 3, 4}: + warnings.warn( + 'NumpyArrayIterator is set to use the data format convention "' + + data_format + + '" (channels on axis ' + + str(channels_axis) + + "), i.e. expected either 1, 3, or 4 channels on axis " + + str(channels_axis) + + ". However, it was passed an array with shape " + + str(self.x.shape) + + " (" + + str(self.x.shape[channels_axis]) + + " channels)." + ) + if y is not None: + self.y = np.asarray(y) + else: + self.y = None + if sample_weight is not None: + self.sample_weight = np.asarray(sample_weight) + else: + self.sample_weight = None + self.image_data_generator = image_data_generator + self.data_format = data_format + self.save_to_dir = save_to_dir + self.save_prefix = save_prefix + self.save_format = save_format + super().__init__(x.shape[0], batch_size, shuffle, seed) + + def _get_batches_of_transformed_samples(self, index_array): + batch_x = np.zeros( + tuple([len(index_array)] + list(self.x.shape)[1:]), dtype=self.dtype + ) + for i, j in enumerate(index_array): + x = self.x[j] + params = self.image_data_generator.get_random_transform(x.shape) + x = self.image_data_generator.apply_transform( + x.astype(self.dtype), params + ) + x = self.image_data_generator.standardize(x) + batch_x[i] = x + + if self.save_to_dir: + for i, j in enumerate(index_array): + img = image_utils.array_to_img( + batch_x[i], self.data_format, scale=True + ) + fname = "{prefix}_{index}_{hash}.{format}".format( + prefix=self.save_prefix, + index=j, + hash=np.random.randint(1e4), + format=self.save_format, + ) + img.save(os.path.join(self.save_to_dir, fname)) + batch_x_miscs = [xx[index_array] for xx in self.x_misc] + output = (batch_x if not batch_x_miscs else [batch_x] + batch_x_miscs,) + if self.y is None: + return output[0] + output += (self.y[index_array],) + if self.sample_weight is not None: + output += (self.sample_weight[index_array],) + return output + + +def validate_filename(filename, white_list_formats): + """Check if a filename refers to a valid file. + + Args: + filename: String, absolute path to a file + white_list_formats: Set, allowed file extensions + Returns: + A boolean value indicating if the filename is valid or not + """ + return filename.lower().endswith(white_list_formats) and os.path.isfile( + filename + ) + + +class DataFrameIterator(BatchFromFilesMixin, Iterator): + """Iterator capable of reading images from a directory as a dataframe.""" + + allowed_class_modes = { + "binary", + "categorical", + "input", + "multi_output", + "raw", + "sparse", + None, + } + + def __init__( + self, + dataframe, + directory=None, + image_data_generator=None, + x_col="filename", + y_col="class", + weight_col=None, + target_size=(256, 256), + color_mode="rgb", + classes=None, + class_mode="categorical", + batch_size=32, + shuffle=True, + seed=None, + data_format="channels_last", + save_to_dir=None, + save_prefix="", + save_format="png", + subset=None, + interpolation="nearest", + keep_aspect_ratio=False, + dtype="float32", + validate_filenames=True, + ): + super().set_processing_attrs( + image_data_generator, + target_size, + color_mode, + data_format, + save_to_dir, + save_prefix, + save_format, + subset, + interpolation, + keep_aspect_ratio, + ) + df = dataframe.copy() + self.directory = directory or "" + self.class_mode = class_mode + self.dtype = dtype + # check that inputs match the required class_mode + self._check_params(df, x_col, y_col, weight_col, classes) + if ( + validate_filenames + ): # check which image files are valid and keep them + df = self._filter_valid_filepaths(df, x_col) + if class_mode not in ["input", "multi_output", "raw", None]: + df, classes = self._filter_classes(df, y_col, classes) + num_classes = len(classes) + # build an index of all the unique classes + self.class_indices = dict(zip(classes, range(len(classes)))) + # retrieve only training or validation set + if self.split: + num_files = len(df) + start = int(self.split[0] * num_files) + stop = int(self.split[1] * num_files) + df = df.iloc[start:stop, :] + # get labels for each observation + if class_mode not in ["input", "multi_output", "raw", None]: + self.classes = self.get_classes(df, y_col) + self.filenames = df[x_col].tolist() + self._sample_weight = df[weight_col].values if weight_col else None + + if class_mode == "multi_output": + self._targets = [np.array(df[col].tolist()) for col in y_col] + if class_mode == "raw": + self._targets = df[y_col].values + self.samples = len(self.filenames) + validated_string = ( + "validated" if validate_filenames else "non-validated" + ) + if class_mode in ["input", "multi_output", "raw", None]: + io_utils.print_msg( + f"Found {self.samples} {validated_string} image filenames." + ) + else: + io_utils.print_msg( + f"Found {self.samples} {validated_string} image filenames " + f"belonging to {num_classes} classes." + ) + self._filepaths = [ + os.path.join(self.directory, fname) for fname in self.filenames + ] + super().__init__(self.samples, batch_size, shuffle, seed) + + def _check_params(self, df, x_col, y_col, weight_col, classes): + # check class mode is one of the currently supported + if self.class_mode not in self.allowed_class_modes: + raise ValueError( + "Invalid class_mode: {}; expected one of: {}".format( + self.class_mode, self.allowed_class_modes + ) + ) + # check that y_col has several column names if class_mode is + # multi_output + if (self.class_mode == "multi_output") and not isinstance(y_col, list): + raise TypeError( + 'If class_mode="{}", y_col must be a list. Received {}.'.format( + self.class_mode, type(y_col).__name__ + ) + ) + # check that filenames/filepaths column values are all strings + if not all(df[x_col].apply(lambda x: isinstance(x, str))): + raise TypeError( + f"All values in column x_col={x_col} must be strings." + ) + # check labels are string if class_mode is binary or sparse + if self.class_mode in {"binary", "sparse"}: + if not all(df[y_col].apply(lambda x: isinstance(x, str))): + raise TypeError( + 'If class_mode="{}", y_col="{}" column ' + "values must be strings.".format(self.class_mode, y_col) + ) + # check that if binary there are only 2 different classes + if self.class_mode == "binary": + if classes: + classes = set(classes) + if len(classes) != 2: + raise ValueError( + 'If class_mode="binary" there must be 2 ' + "classes. {} class/es were given.".format(len(classes)) + ) + elif df[y_col].nunique() != 2: + raise ValueError( + 'If class_mode="binary" there must be 2 classes. ' + "Found {} classes.".format(df[y_col].nunique()) + ) + # check values are string, list or tuple if class_mode is categorical + if self.class_mode == "categorical": + types = (str, list, tuple) + if not all(df[y_col].apply(lambda x: isinstance(x, types))): + raise TypeError( + 'If class_mode="{}", y_col="{}" column ' + "values must be type string, list or tuple.".format( + self.class_mode, y_col + ) + ) + # raise warning if classes are given but will be unused + if classes and self.class_mode in { + "input", + "multi_output", + "raw", + None, + }: + warnings.warn( + '`classes` will be ignored given the class_mode="{}"'.format( + self.class_mode + ) + ) + # check that if weight column that the values are numerical + if weight_col and not issubclass(df[weight_col].dtype.type, np.number): + raise TypeError(f"Column weight_col={weight_col} must be numeric.") + + def get_classes(self, df, y_col): + labels = [] + for label in df[y_col]: + if isinstance(label, (list, tuple)): + labels.append([self.class_indices[lbl] for lbl in label]) + else: + labels.append(self.class_indices[label]) + return labels + + @staticmethod + def _filter_classes(df, y_col, classes): + df = df.copy() + + def remove_classes(labels, classes): + if isinstance(labels, (list, tuple)): + labels = [cls for cls in labels if cls in classes] + return labels or None + elif isinstance(labels, str): + return labels if labels in classes else None + else: + raise TypeError( + "Expect string, list or tuple " + "but found {} in {} column ".format(type(labels), y_col) + ) + + if classes: + # prepare for membership lookup + classes = list(collections.OrderedDict.fromkeys(classes).keys()) + df[y_col] = df[y_col].apply(lambda x: remove_classes(x, classes)) + else: + classes = set() + for v in df[y_col]: + if isinstance(v, (list, tuple)): + classes.update(v) + else: + classes.add(v) + classes = sorted(classes) + return df.dropna(subset=[y_col]), classes + + def _filter_valid_filepaths(self, df, x_col): + """Keep only dataframe rows with valid filenames. + + Args: + df: Pandas dataframe containing filenames in a column + x_col: string, column in `df` that contains the filenames or + filepaths + Returns: + absolute paths to image files + """ + filepaths = df[x_col].map( + lambda fname: os.path.join(self.directory, fname) + ) + mask = filepaths.apply( + validate_filename, args=(self.white_list_formats,) + ) + n_invalid = (~mask).sum() + if n_invalid: + warnings.warn( + 'Found {} invalid image filename(s) in x_col="{}". ' + "These filename(s) will be ignored.".format(n_invalid, x_col) + ) + return df[mask] + + @property + def filepaths(self): + return self._filepaths + + @property + def labels(self): + if self.class_mode in {"multi_output", "raw"}: + return self._targets + else: + return self.classes + + @property + def sample_weight(self): + return self._sample_weight + + +def flip_axis(x, axis): + x = np.asarray(x).swapaxes(axis, 0) + x = x[::-1, ...] + x = x.swapaxes(0, axis) + return x + + +@keras_export("keras._legacy.preprocessing.image.ImageDataGenerator") +class ImageDataGenerator: + """DEPRECATED.""" + + def __init__( + self, + featurewise_center=False, + samplewise_center=False, + featurewise_std_normalization=False, + samplewise_std_normalization=False, + zca_whitening=False, + zca_epsilon=1e-6, + rotation_range=0, + width_shift_range=0.0, + height_shift_range=0.0, + brightness_range=None, + shear_range=0.0, + zoom_range=0.0, + channel_shift_range=0.0, + fill_mode="nearest", + cval=0.0, + horizontal_flip=False, + vertical_flip=False, + rescale=None, + preprocessing_function=None, + data_format=None, + validation_split=0.0, + interpolation_order=1, + dtype=None, + ): + if data_format is None: + data_format = backend.image_data_format() + if dtype is None: + dtype = backend.floatx() + + self.featurewise_center = featurewise_center + self.samplewise_center = samplewise_center + self.featurewise_std_normalization = featurewise_std_normalization + self.samplewise_std_normalization = samplewise_std_normalization + self.zca_whitening = zca_whitening + self.zca_epsilon = zca_epsilon + self.rotation_range = rotation_range + self.width_shift_range = width_shift_range + self.height_shift_range = height_shift_range + self.shear_range = shear_range + self.zoom_range = zoom_range + self.channel_shift_range = channel_shift_range + self.fill_mode = fill_mode + self.cval = cval + self.horizontal_flip = horizontal_flip + self.vertical_flip = vertical_flip + self.rescale = rescale + self.preprocessing_function = preprocessing_function + self.dtype = dtype + self.interpolation_order = interpolation_order + + if data_format not in {"channels_last", "channels_first"}: + raise ValueError( + '`data_format` should be `"channels_last"` ' + "(channel after row and column) or " + '`"channels_first"` (channel before row and column). ' + f"Received: {data_format}" + ) + self.data_format = data_format + if data_format == "channels_first": + self.channel_axis = 1 + self.row_axis = 2 + self.col_axis = 3 + if data_format == "channels_last": + self.channel_axis = 3 + self.row_axis = 1 + self.col_axis = 2 + if validation_split and not 0 < validation_split < 1: + raise ValueError( + "`validation_split` must be strictly between 0 and 1. " + f" Received: {validation_split}" + ) + self._validation_split = validation_split + + self.mean = None + self.std = None + self.zca_whitening_matrix = None + + if isinstance(zoom_range, (float, int)): + self.zoom_range = [1 - zoom_range, 1 + zoom_range] + elif len(zoom_range) == 2 and all( + isinstance(val, (float, int)) for val in zoom_range + ): + self.zoom_range = [zoom_range[0], zoom_range[1]] + else: + raise ValueError( + "`zoom_range` should be a float or " + "a tuple or list of two floats. " + f"Received: {zoom_range}" + ) + if zca_whitening: + if not featurewise_center: + self.featurewise_center = True + warnings.warn( + "This ImageDataGenerator specifies " + "`zca_whitening`, which overrides " + "setting of `featurewise_center`." + ) + if featurewise_std_normalization: + self.featurewise_std_normalization = False + warnings.warn( + "This ImageDataGenerator specifies " + "`zca_whitening` " + "which overrides setting of" + "`featurewise_std_normalization`." + ) + if featurewise_std_normalization: + if not featurewise_center: + self.featurewise_center = True + warnings.warn( + "This ImageDataGenerator specifies " + "`featurewise_std_normalization`, " + "which overrides setting of " + "`featurewise_center`." + ) + if samplewise_std_normalization: + if not samplewise_center: + self.samplewise_center = True + warnings.warn( + "This ImageDataGenerator specifies " + "`samplewise_std_normalization`, " + "which overrides setting of " + "`samplewise_center`." + ) + if brightness_range is not None: + if ( + not isinstance(brightness_range, (tuple, list)) + or len(brightness_range) != 2 + ): + raise ValueError( + "`brightness_range should be tuple or list of two floats. " + f"Received: {brightness_range}" + ) + self.brightness_range = brightness_range + + def flow( + self, + x, + y=None, + batch_size=32, + shuffle=True, + sample_weight=None, + seed=None, + save_to_dir=None, + save_prefix="", + save_format="png", + ignore_class_split=False, + subset=None, + ): + return NumpyArrayIterator( + x, + y, + self, + batch_size=batch_size, + shuffle=shuffle, + sample_weight=sample_weight, + seed=seed, + data_format=self.data_format, + save_to_dir=save_to_dir, + save_prefix=save_prefix, + save_format=save_format, + ignore_class_split=ignore_class_split, + subset=subset, + dtype=self.dtype, + ) + + def flow_from_directory( + self, + directory, + target_size=(256, 256), + color_mode="rgb", + classes=None, + class_mode="categorical", + batch_size=32, + shuffle=True, + seed=None, + save_to_dir=None, + save_prefix="", + save_format="png", + follow_links=False, + subset=None, + interpolation="nearest", + keep_aspect_ratio=False, + ): + return DirectoryIterator( + directory, + self, + target_size=target_size, + color_mode=color_mode, + keep_aspect_ratio=keep_aspect_ratio, + classes=classes, + class_mode=class_mode, + data_format=self.data_format, + batch_size=batch_size, + shuffle=shuffle, + seed=seed, + save_to_dir=save_to_dir, + save_prefix=save_prefix, + save_format=save_format, + follow_links=follow_links, + subset=subset, + interpolation=interpolation, + dtype=self.dtype, + ) + + def flow_from_dataframe( + self, + dataframe, + directory=None, + x_col="filename", + y_col="class", + weight_col=None, + target_size=(256, 256), + color_mode="rgb", + classes=None, + class_mode="categorical", + batch_size=32, + shuffle=True, + seed=None, + save_to_dir=None, + save_prefix="", + save_format="png", + subset=None, + interpolation="nearest", + validate_filenames=True, + **kwargs, + ): + if "has_ext" in kwargs: + warnings.warn( + "has_ext is deprecated, filenames in the dataframe have " + "to match the exact filenames in disk.", + DeprecationWarning, + ) + if "sort" in kwargs: + warnings.warn( + "sort is deprecated, batches will be created in the" + "same order than the filenames provided if `shuffle`" + "is set to `False`.", + DeprecationWarning, + ) + if class_mode == "other": + warnings.warn( + '`class_mode="other"` is deprecated, please use ' + '`class_mode="raw"`.', + DeprecationWarning, + ) + class_mode = "raw" + if "drop_duplicates" in kwargs: + warnings.warn( + "drop_duplicates is deprecated, you can drop duplicates " + "by using the pandas.DataFrame.drop_duplicates method.", + DeprecationWarning, + ) + + return DataFrameIterator( + dataframe, + directory, + self, + x_col=x_col, + y_col=y_col, + weight_col=weight_col, + target_size=target_size, + color_mode=color_mode, + classes=classes, + class_mode=class_mode, + data_format=self.data_format, + batch_size=batch_size, + shuffle=shuffle, + seed=seed, + save_to_dir=save_to_dir, + save_prefix=save_prefix, + save_format=save_format, + subset=subset, + interpolation=interpolation, + validate_filenames=validate_filenames, + dtype=self.dtype, + ) + + def standardize(self, x): + """Applies the normalization configuration in-place to a batch of + inputs. + + `x` is changed in-place since the function is mainly used internally + to standardize images and feed them to your network. If a copy of `x` + would be created instead it would have a significant performance cost. + If you want to apply this method without changing the input in-place + you can call the method creating a copy before: + + standardize(np.copy(x)) + + Args: + x: Batch of inputs to be normalized. + + Returns: + The inputs, normalized. + """ + if self.preprocessing_function: + x = self.preprocessing_function(x) + if self.rescale: + x *= self.rescale + if self.samplewise_center: + x -= np.mean(x, keepdims=True) + if self.samplewise_std_normalization: + x /= np.std(x, keepdims=True) + 1e-6 + + if self.featurewise_center: + if self.mean is not None: + x -= self.mean + else: + warnings.warn( + "This ImageDataGenerator specifies " + "`featurewise_center`, but it hasn't " + "been fit on any training data. Fit it " + "first by calling `.fit(numpy_data)`." + ) + if self.featurewise_std_normalization: + if self.std is not None: + x /= self.std + 1e-6 + else: + warnings.warn( + "This ImageDataGenerator specifies " + "`featurewise_std_normalization`, " + "but it hasn't " + "been fit on any training data. Fit it " + "first by calling `.fit(numpy_data)`." + ) + if self.zca_whitening: + if self.zca_whitening_matrix is not None: + flat_x = x.reshape(-1, np.prod(x.shape[-3:])) + white_x = flat_x @ self.zca_whitening_matrix + x = np.reshape(white_x, x.shape) + else: + warnings.warn( + "This ImageDataGenerator specifies " + "`zca_whitening`, but it hasn't " + "been fit on any training data. Fit it " + "first by calling `.fit(numpy_data)`." + ) + return x + + def get_random_transform(self, img_shape, seed=None): + """Generates random parameters for a transformation. + + Args: + img_shape: Tuple of integers. + Shape of the image that is transformed. + seed: Random seed. + + Returns: + A dictionary containing randomly chosen parameters describing the + transformation. + """ + img_row_axis = self.row_axis - 1 + img_col_axis = self.col_axis - 1 + + if seed is not None: + np.random.seed(seed) + + if self.rotation_range: + theta = np.random.uniform(-self.rotation_range, self.rotation_range) + else: + theta = 0 + + if self.height_shift_range: + try: # 1-D array-like or int + tx = np.random.choice(self.height_shift_range) + tx *= np.random.choice([-1, 1]) + except ValueError: # floating point + tx = np.random.uniform( + -self.height_shift_range, self.height_shift_range + ) + if np.max(self.height_shift_range) < 1: + tx *= img_shape[img_row_axis] + else: + tx = 0 + + if self.width_shift_range: + try: # 1-D array-like or int + ty = np.random.choice(self.width_shift_range) + ty *= np.random.choice([-1, 1]) + except ValueError: # floating point + ty = np.random.uniform( + -self.width_shift_range, self.width_shift_range + ) + if np.max(self.width_shift_range) < 1: + ty *= img_shape[img_col_axis] + else: + ty = 0 + + if self.shear_range: + shear = np.random.uniform(-self.shear_range, self.shear_range) + else: + shear = 0 + + if self.zoom_range[0] == 1 and self.zoom_range[1] == 1: + zx, zy = 1, 1 + else: + zx, zy = np.random.uniform( + self.zoom_range[0], self.zoom_range[1], 2 + ) + + flip_horizontal = (np.random.random() < 0.5) * self.horizontal_flip + flip_vertical = (np.random.random() < 0.5) * self.vertical_flip + + channel_shift_intensity = None + if self.channel_shift_range != 0: + channel_shift_intensity = np.random.uniform( + -self.channel_shift_range, self.channel_shift_range + ) + + brightness = None + if self.brightness_range is not None: + brightness = np.random.uniform( + self.brightness_range[0], self.brightness_range[1] + ) + + transform_parameters = { + "theta": theta, + "tx": tx, + "ty": ty, + "shear": shear, + "zx": zx, + "zy": zy, + "flip_horizontal": flip_horizontal, + "flip_vertical": flip_vertical, + "channel_shift_intensity": channel_shift_intensity, + "brightness": brightness, + } + + return transform_parameters + + def apply_transform(self, x, transform_parameters): + """Applies a transformation to an image according to given parameters. + + Args: + x: 3D tensor, single image. + transform_parameters: Dictionary with string - parameter pairs + describing the transformation. + Currently, the following parameters + from the dictionary are used: + - `'theta'`: Float. Rotation angle in degrees. + - `'tx'`: Float. Shift in the x direction. + - `'ty'`: Float. Shift in the y direction. + - `'shear'`: Float. Shear angle in degrees. + - `'zx'`: Float. Zoom in the x direction. + - `'zy'`: Float. Zoom in the y direction. + - `'flip_horizontal'`: Boolean. Horizontal flip. + - `'flip_vertical'`: Boolean. Vertical flip. + - `'channel_shift_intensity'`: Float. Channel shift intensity. + - `'brightness'`: Float. Brightness shift intensity. + + Returns: + A transformed version of the input (same shape). + """ + # x is a single image, so it doesn't have image number at index 0 + img_row_axis = self.row_axis - 1 + img_col_axis = self.col_axis - 1 + img_channel_axis = self.channel_axis - 1 + + x = apply_affine_transform( + x, + transform_parameters.get("theta", 0), + transform_parameters.get("tx", 0), + transform_parameters.get("ty", 0), + transform_parameters.get("shear", 0), + transform_parameters.get("zx", 1), + transform_parameters.get("zy", 1), + row_axis=img_row_axis, + col_axis=img_col_axis, + channel_axis=img_channel_axis, + fill_mode=self.fill_mode, + cval=self.cval, + order=self.interpolation_order, + ) + + if transform_parameters.get("channel_shift_intensity") is not None: + x = apply_channel_shift( + x, + transform_parameters["channel_shift_intensity"], + img_channel_axis, + ) + + if transform_parameters.get("flip_horizontal", False): + x = flip_axis(x, img_col_axis) + + if transform_parameters.get("flip_vertical", False): + x = flip_axis(x, img_row_axis) + + if transform_parameters.get("brightness") is not None: + x = apply_brightness_shift( + x, transform_parameters["brightness"], False + ) + + return x + + def random_transform(self, x, seed=None): + """Applies a random transformation to an image. + + Args: + x: 3D tensor, single image. + seed: Random seed. + + Returns: + A randomly transformed version of the input (same shape). + """ + params = self.get_random_transform(x.shape, seed) + return self.apply_transform(x, params) + + def fit(self, x, augment=False, rounds=1, seed=None): + """Fits the data generator to some sample data. + + This computes the internal data stats related to the + data-dependent transformations, based on an array of sample data. + + Only required if `featurewise_center` or + `featurewise_std_normalization` or `zca_whitening` + are set to `True`. + + When `rescale` is set to a value, rescaling is applied to + sample data before computing the internal data stats. + + Args: + x: Sample data. Should have rank 4. + In case of grayscale data, + the channels axis should have value 1, in case + of RGB data, it should have value 3, and in case + of RGBA data, it should have value 4. + augment: Boolean (default: False). + Whether to fit on randomly augmented samples. + rounds: Int (default: 1). + If using data augmentation (`augment=True`), + this is how many augmentation passes over the data to use. + seed: Int (default: None). Random seed. + """ + x = np.asarray(x, dtype=self.dtype) + if x.ndim != 4: + raise ValueError( + "Input to `.fit()` should have rank 4. Got array with shape: " + + str(x.shape) + ) + if x.shape[self.channel_axis] not in {1, 3, 4}: + warnings.warn( + "Expected input to be images (as Numpy array) " + 'following the data format convention "' + + self.data_format + + '" (channels on axis ' + + str(self.channel_axis) + + "), i.e. expected either 1, 3 or 4 channels on axis " + + str(self.channel_axis) + + ". However, it was passed an array with shape " + + str(x.shape) + + " (" + + str(x.shape[self.channel_axis]) + + " channels)." + ) + + if seed is not None: + np.random.seed(seed) + + x = np.copy(x) + if self.rescale: + x *= self.rescale + + if augment: + ax = np.zeros( + tuple([rounds * x.shape[0]] + list(x.shape)[1:]), + dtype=self.dtype, + ) + for r in range(rounds): + for i in range(x.shape[0]): + ax[i + r * x.shape[0]] = self.random_transform(x[i]) + x = ax + + if self.featurewise_center: + self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis)) + broadcast_shape = [1, 1, 1] + broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis] + self.mean = np.reshape(self.mean, broadcast_shape) + x -= self.mean + + if self.featurewise_std_normalization: + self.std = np.std(x, axis=(0, self.row_axis, self.col_axis)) + broadcast_shape = [1, 1, 1] + broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis] + self.std = np.reshape(self.std, broadcast_shape) + x /= self.std + 1e-6 + + if self.zca_whitening: + n = len(x) + flat_x = np.reshape(x, (n, -1)) + + u, s, _ = np.linalg.svd(flat_x.T, full_matrices=False) + s_inv = np.sqrt(n) / (s + self.zca_epsilon) + self.zca_whitening_matrix = (u * s_inv).dot(u.T) + + +@keras_export("keras._legacy.preprocessing.image.random_rotation") +def random_rotation( + x, + rg, + row_axis=1, + col_axis=2, + channel_axis=0, + fill_mode="nearest", + cval=0.0, + interpolation_order=1, +): + """DEPRECATED.""" + theta = np.random.uniform(-rg, rg) + x = apply_affine_transform( + x, + theta=theta, + row_axis=row_axis, + col_axis=col_axis, + channel_axis=channel_axis, + fill_mode=fill_mode, + cval=cval, + order=interpolation_order, + ) + return x + + +@keras_export("keras._legacy.preprocessing.image.random_shift") +def random_shift( + x, + wrg, + hrg, + row_axis=1, + col_axis=2, + channel_axis=0, + fill_mode="nearest", + cval=0.0, + interpolation_order=1, +): + """DEPRECATED.""" + h, w = x.shape[row_axis], x.shape[col_axis] + tx = np.random.uniform(-hrg, hrg) * h + ty = np.random.uniform(-wrg, wrg) * w + x = apply_affine_transform( + x, + tx=tx, + ty=ty, + row_axis=row_axis, + col_axis=col_axis, + channel_axis=channel_axis, + fill_mode=fill_mode, + cval=cval, + order=interpolation_order, + ) + return x + + +@keras_export("keras._legacy.preprocessing.image.random_shear") +def random_shear( + x, + intensity, + row_axis=1, + col_axis=2, + channel_axis=0, + fill_mode="nearest", + cval=0.0, + interpolation_order=1, +): + """DEPRECATED.""" + shear = np.random.uniform(-intensity, intensity) + x = apply_affine_transform( + x, + shear=shear, + row_axis=row_axis, + col_axis=col_axis, + channel_axis=channel_axis, + fill_mode=fill_mode, + cval=cval, + order=interpolation_order, + ) + return x + + +@keras_export("keras._legacy.preprocessing.image.random_zoom") +def random_zoom( + x, + zoom_range, + row_axis=1, + col_axis=2, + channel_axis=0, + fill_mode="nearest", + cval=0.0, + interpolation_order=1, +): + """DEPRECATED.""" + if len(zoom_range) != 2: + raise ValueError( + "`zoom_range` should be a tuple or list of two floats. " + f"Received: {zoom_range}" + ) + + if zoom_range[0] == 1 and zoom_range[1] == 1: + zx, zy = 1, 1 + else: + zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2) + x = apply_affine_transform( + x, + zx=zx, + zy=zy, + row_axis=row_axis, + col_axis=col_axis, + channel_axis=channel_axis, + fill_mode=fill_mode, + cval=cval, + order=interpolation_order, + ) + return x + + +@keras_export("keras._legacy.preprocessing.image.apply_channel_shift") +def apply_channel_shift(x, intensity, channel_axis=0): + """Performs a channel shift. + + DEPRECATED. + + Args: + x: Input tensor. Must be 3D. + intensity: Transformation intensity. + channel_axis: Index of axis for channels in the input tensor. + + Returns: + Numpy image tensor. + """ + x = np.rollaxis(x, channel_axis, 0) + min_x, max_x = np.min(x), np.max(x) + channel_images = [ + np.clip(x_channel + intensity, min_x, max_x) for x_channel in x + ] + x = np.stack(channel_images, axis=0) + x = np.rollaxis(x, 0, channel_axis + 1) + return x + + +@keras_export("keras._legacy.preprocessing.image.random_channel_shift") +def random_channel_shift(x, intensity_range, channel_axis=0): + """Performs a random channel shift. + + DEPRECATED. + + Args: + x: Input tensor. Must be 3D. + intensity_range: Transformation intensity. + channel_axis: Index of axis for channels in the input tensor. + + Returns: + Numpy image tensor. + """ + intensity = np.random.uniform(-intensity_range, intensity_range) + return apply_channel_shift(x, intensity, channel_axis=channel_axis) + + +@keras_export("keras._legacy.preprocessing.image.apply_brightness_shift") +def apply_brightness_shift(x, brightness, scale=True): + """Performs a brightness shift. + + DEPRECATED. + + Args: + x: Input tensor. Must be 3D. + brightness: Float. The new brightness value. + scale: Whether to rescale the image such that minimum and maximum values + are 0 and 255 respectively. Default: True. + + Returns: + Numpy image tensor. + + Raises: + ImportError: if PIL is not available. + """ + from PIL import ImageEnhance + + x_min, x_max = np.min(x), np.max(x) + local_scale = (x_min < 0) or (x_max > 255) + x = image_utils.array_to_img(x, scale=local_scale or scale) + x = imgenhancer_Brightness = ImageEnhance.Brightness(x) + x = imgenhancer_Brightness.enhance(brightness) + x = image_utils.img_to_array(x) + if not scale and local_scale: + x = x / 255 * (x_max - x_min) + x_min + return x + + +@keras_export("keras._legacy.preprocessing.image.random_brightness") +def random_brightness(x, brightness_range, scale=True): + """Performs a random brightness shift. + + DEPRECATED. + + Args: + x: Input tensor. Must be 3D. + brightness_range: Tuple of floats; brightness range. + scale: Whether to rescale the image such that minimum and maximum values + are 0 and 255 respectively. Default: True. + + Returns: + Numpy image tensor. + + Raises: + ValueError if `brightness_range` isn't a tuple. + """ + if len(brightness_range) != 2: + raise ValueError( + "`brightness_range should be tuple or list of two floats. " + f"Received: {brightness_range}" + ) + + u = np.random.uniform(brightness_range[0], brightness_range[1]) + return apply_brightness_shift(x, u, scale) + + +def transform_matrix_offset_center(matrix, x, y): + o_x = float(x) / 2 - 0.5 + o_y = float(y) / 2 - 0.5 + offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]]) + reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]]) + transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix) + return transform_matrix + + +@keras_export("keras._legacy.preprocessing.image.apply_affine_transform") +def apply_affine_transform( + x, + theta=0, + tx=0, + ty=0, + shear=0, + zx=1, + zy=1, + row_axis=1, + col_axis=2, + channel_axis=0, + fill_mode="nearest", + cval=0.0, + order=1, +): + """Applies an affine transformation specified by the parameters given. + + DEPRECATED. + """ + # Input sanity checks: + # 1. x must 2D image with one or more channels (i.e., a 3D tensor) + # 2. channels must be either first or last dimension + if np.unique([row_axis, col_axis, channel_axis]).size != 3: + raise ValueError( + "'row_axis', 'col_axis', and 'channel_axis' must be distinct" + ) + + # shall we support negative indices? + valid_indices = set([0, 1, 2]) + actual_indices = set([row_axis, col_axis, channel_axis]) + if actual_indices != valid_indices: + raise ValueError( + f"Invalid axis' indices: {actual_indices - valid_indices}" + ) + + if x.ndim != 3: + raise ValueError("Input arrays must be multi-channel 2D images.") + if channel_axis not in [0, 2]: + raise ValueError( + "Channels are allowed and the first and last dimensions." + ) + + transform_matrix = None + if theta != 0: + theta = np.deg2rad(theta) + rotation_matrix = np.array( + [ + [np.cos(theta), -np.sin(theta), 0], + [np.sin(theta), np.cos(theta), 0], + [0, 0, 1], + ] + ) + transform_matrix = rotation_matrix + + if tx != 0 or ty != 0: + shift_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]]) + if transform_matrix is None: + transform_matrix = shift_matrix + else: + transform_matrix = np.dot(transform_matrix, shift_matrix) + + if shear != 0: + shear = np.deg2rad(shear) + shear_matrix = np.array( + [[1, -np.sin(shear), 0], [0, np.cos(shear), 0], [0, 0, 1]] + ) + if transform_matrix is None: + transform_matrix = shear_matrix + else: + transform_matrix = np.dot(transform_matrix, shear_matrix) + + if zx != 1 or zy != 1: + zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]]) + if transform_matrix is None: + transform_matrix = zoom_matrix + else: + transform_matrix = np.dot(transform_matrix, zoom_matrix) + + if transform_matrix is not None: + h, w = x.shape[row_axis], x.shape[col_axis] + transform_matrix = transform_matrix_offset_center( + transform_matrix, h, w + ) + x = np.rollaxis(x, channel_axis, 0) + + # Matrix construction assumes that coordinates are x, y (in that order). + # However, regular numpy arrays use y,x (aka i,j) indexing. + # Possible solution is: + # 1. Swap the x and y axes. + # 2. Apply transform. + # 3. Swap the x and y axes again to restore image-like data ordering. + # Mathematically, it is equivalent to the following transformation: + # M' = PMP, where P is the permutation matrix, M is the original + # transformation matrix. + if col_axis > row_axis: + transform_matrix[:, [0, 1]] = transform_matrix[:, [1, 0]] + transform_matrix[[0, 1]] = transform_matrix[[1, 0]] + final_affine_matrix = transform_matrix[:2, :2] + final_offset = transform_matrix[:2, 2] + + channel_images = [ + scipy.ndimage.interpolation.affine_transform( + x_channel, + final_affine_matrix, + final_offset, + order=order, + mode=fill_mode, + cval=cval, + ) + for x_channel in x + ] + x = np.stack(channel_images, axis=0) + x = np.rollaxis(x, 0, channel_axis + 1) + return x diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/preprocessing/sequence.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/preprocessing/sequence.py new file mode 100644 index 0000000000000000000000000000000000000000..1d0f360c50c7c280bde92d44d5358ea9274e28ce --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/preprocessing/sequence.py @@ -0,0 +1,320 @@ +"""Deprecated sequence preprocessing APIs from Keras 1.""" + +import json +import random + +import numpy as np + +from keras.src.api_export import keras_export +from keras.src.trainers.data_adapters.py_dataset_adapter import PyDataset + + +@keras_export("keras._legacy.preprocessing.sequence.TimeseriesGenerator") +class TimeseriesGenerator(PyDataset): + """Utility class for generating batches of temporal data. + + DEPRECATED. + + This class takes in a sequence of data-points gathered at + equal intervals, along with time series parameters such as + stride, length of history, etc., to produce batches for + training/validation. + + Arguments: + data: Indexable generator (such as list or Numpy array) + containing consecutive data points (timesteps). + The data should be at 2D, and axis 0 is expected + to be the time dimension. + targets: Targets corresponding to timesteps in `data`. + It should have same length as `data`. + length: Length of the output sequences (in number of timesteps). + sampling_rate: Period between successive individual timesteps + within sequences. For rate `r`, timesteps + `data[i]`, `data[i-r]`, ... `data[i - length]` + are used for create a sample sequence. + stride: Period between successive output sequences. + For stride `s`, consecutive output samples would + be centered around `data[i]`, `data[i+s]`, `data[i+2*s]`, etc. + start_index: Data points earlier than `start_index` will not be used + in the output sequences. This is useful to reserve part of the + data for test or validation. + end_index: Data points later than `end_index` will not be used + in the output sequences. This is useful to reserve part of the + data for test or validation. + shuffle: Whether to shuffle output samples, + or instead draw them in chronological order. + reverse: Boolean: if `true`, timesteps in each output sample will be + in reverse chronological order. + batch_size: Number of timeseries samples in each batch + (except maybe the last one). + + Returns: + A PyDataset instance. + """ + + def __init__( + self, + data, + targets, + length, + sampling_rate=1, + stride=1, + start_index=0, + end_index=None, + shuffle=False, + reverse=False, + batch_size=128, + ): + if len(data) != len(targets): + raise ValueError( + "Data and targets have to be " + f"of same length. Data length is {len(data)} " + f"while target length is {len(targets)}" + ) + + self.data = data + self.targets = targets + self.length = length + self.sampling_rate = sampling_rate + self.stride = stride + self.start_index = start_index + length + if end_index is None: + end_index = len(data) - 1 + self.end_index = end_index + self.shuffle = shuffle + self.reverse = reverse + self.batch_size = batch_size + + if self.start_index > self.end_index: + raise ValueError( + f"`start_index+length={self.start_index} " + f"> end_index={self.end_index}` " + "is disallowed, as no part of the sequence " + "would be left to be used as current step." + ) + + def __len__(self): + return ( + self.end_index - self.start_index + self.batch_size * self.stride + ) // (self.batch_size * self.stride) + + def __getitem__(self, index): + if self.shuffle: + rows = np.random.randint( + self.start_index, self.end_index + 1, size=self.batch_size + ) + else: + i = self.start_index + self.batch_size * self.stride * index + rows = np.arange( + i, + min(i + self.batch_size * self.stride, self.end_index + 1), + self.stride, + ) + + samples = np.array( + [ + self.data[row - self.length : row : self.sampling_rate] + for row in rows + ] + ) + targets = np.array([self.targets[row] for row in rows]) + + if self.reverse: + return samples[:, ::-1, ...], targets + return samples, targets + + def get_config(self): + """Returns the TimeseriesGenerator configuration as Python dictionary. + + Returns: + A Python dictionary with the TimeseriesGenerator configuration. + """ + data = self.data + if type(self.data).__module__ == np.__name__: + data = self.data.tolist() + try: + json_data = json.dumps(data) + except TypeError as e: + raise TypeError(f"Data not JSON Serializable: {data}") from e + + targets = self.targets + if type(self.targets).__module__ == np.__name__: + targets = self.targets.tolist() + try: + json_targets = json.dumps(targets) + except TypeError as e: + raise TypeError(f"Targets not JSON Serializable: {targets}") from e + + return { + "data": json_data, + "targets": json_targets, + "length": self.length, + "sampling_rate": self.sampling_rate, + "stride": self.stride, + "start_index": self.start_index, + "end_index": self.end_index, + "shuffle": self.shuffle, + "reverse": self.reverse, + "batch_size": self.batch_size, + } + + def to_json(self, **kwargs): + """Returns a JSON string containing the generator's configuration. + + Args: + **kwargs: Additional keyword arguments to be passed + to `json.dumps()`. + + Returns: + A JSON string containing the tokenizer configuration. + """ + config = self.get_config() + timeseries_generator_config = { + "class_name": self.__class__.__name__, + "config": config, + } + return json.dumps(timeseries_generator_config, **kwargs) + + +@keras_export("keras._legacy.preprocessing.sequence.make_sampling_table") +def make_sampling_table(size, sampling_factor=1e-5): + """Generates a word rank-based probabilistic sampling table. + + DEPRECATED. + + Used for generating the `sampling_table` argument for `skipgrams`. + `sampling_table[i]` is the probability of sampling + the word i-th most common word in a dataset + (more common words should be sampled less frequently, for balance). + + The sampling probabilities are generated according + to the sampling distribution used in word2vec: + + ``` + p(word) = (min(1, sqrt(word_frequency / sampling_factor) / + (word_frequency / sampling_factor))) + ``` + + We assume that the word frequencies follow Zipf's law (s=1) to derive + a numerical approximation of frequency(rank): + + `frequency(rank) ~ 1/(rank * (log(rank) + gamma) + 1/2 - 1/(12*rank))` + where `gamma` is the Euler-Mascheroni constant. + + Args: + size: Int, number of possible words to sample. + sampling_factor: The sampling factor in the word2vec formula. + + Returns: + A 1D Numpy array of length `size` where the ith entry + is the probability that a word of rank i should be sampled. + """ + gamma = 0.577 + rank = np.arange(size) + rank[0] = 1 + inv_fq = rank * (np.log(rank) + gamma) + 0.5 - 1.0 / (12.0 * rank) + f = sampling_factor * inv_fq + + return np.minimum(1.0, f / np.sqrt(f)) + + +@keras_export("keras._legacy.preprocessing.sequence.skipgrams") +def skipgrams( + sequence, + vocabulary_size, + window_size=4, + negative_samples=1.0, + shuffle=True, + categorical=False, + sampling_table=None, + seed=None, +): + """Generates skipgram word pairs. + + DEPRECATED. + + This function transforms a sequence of word indexes (list of integers) + into tuples of words of the form: + + - (word, word in the same window), with label 1 (positive samples). + - (word, random word from the vocabulary), with label 0 (negative samples). + + Read more about Skipgram in this gnomic paper by Mikolov et al.: + [Efficient Estimation of Word Representations in + Vector Space](http://arxiv.org/pdf/1301.3781v3.pdf) + + Args: + sequence: A word sequence (sentence), encoded as a list + of word indices (integers). If using a `sampling_table`, + word indices are expected to match the rank + of the words in a reference dataset (e.g. 10 would encode + the 10-th most frequently occurring token). + Note that index 0 is expected to be a non-word and will be skipped. + vocabulary_size: Int, maximum possible word index + 1 + window_size: Int, size of sampling windows (technically half-window). + The window of a word `w_i` will be + `[i - window_size, i + window_size+1]`. + negative_samples: Float >= 0. 0 for no negative (i.e. random) samples. + 1 for same number as positive samples. + shuffle: Whether to shuffle the word couples before returning them. + categorical: bool. if False, labels will be + integers (eg. `[0, 1, 1 .. ]`), + if `True`, labels will be categorical, e.g. + `[[1,0],[0,1],[0,1] .. ]`. + sampling_table: 1D array of size `vocabulary_size` where the entry i + encodes the probability to sample a word of rank i. + seed: Random seed. + + Returns: + couples, labels: where `couples` are int pairs and + `labels` are either 0 or 1. + + Note: + By convention, index 0 in the vocabulary is + a non-word and will be skipped. + """ + couples = [] + labels = [] + for i, wi in enumerate(sequence): + if not wi: + continue + if sampling_table is not None: + if sampling_table[wi] < random.random(): + continue + + window_start = max(0, i - window_size) + window_end = min(len(sequence), i + window_size + 1) + for j in range(window_start, window_end): + if j != i: + wj = sequence[j] + if not wj: + continue + couples.append([wi, wj]) + if categorical: + labels.append([0, 1]) + else: + labels.append(1) + + if negative_samples > 0: + num_negative_samples = int(len(labels) * negative_samples) + words = [c[0] for c in couples] + random.shuffle(words) + + couples += [ + [words[i % len(words)], random.randint(1, vocabulary_size - 1)] + for i in range(num_negative_samples) + ] + if categorical: + labels += [[1, 0]] * num_negative_samples + else: + labels += [0] * num_negative_samples + + if shuffle: + if seed is None: + seed = random.randint(0, 10e6) + random.seed(seed) + random.shuffle(couples) + random.seed(seed) + random.shuffle(labels) + + return couples, labels diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/preprocessing/text.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/preprocessing/text.py new file mode 100644 index 0000000000000000000000000000000000000000..44dcdae166a5d4fc2deb6f26ac5d3963f26c91d9 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/preprocessing/text.py @@ -0,0 +1,336 @@ +"""Deprecated text preprocessing APIs from Keras 1.""" + +import collections +import hashlib +import json +import warnings + +import numpy as np + +from keras.src.api_export import keras_export + + +@keras_export("keras._legacy.preprocessing.text.text_to_word_sequence") +def text_to_word_sequence( + input_text, + filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', + lower=True, + split=" ", +): + """DEPRECATED.""" + if lower: + input_text = input_text.lower() + + translate_dict = {c: split for c in filters} + translate_map = str.maketrans(translate_dict) + input_text = input_text.translate(translate_map) + + seq = input_text.split(split) + return [i for i in seq if i] + + +@keras_export("keras._legacy.preprocessing.text.one_hot") +def one_hot( + input_text, + n, + filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', + lower=True, + split=" ", + analyzer=None, +): + """DEPRECATED.""" + return hashing_trick( + input_text, + n, + hash_function=hash, + filters=filters, + lower=lower, + split=split, + analyzer=analyzer, + ) + + +@keras_export("keras._legacy.preprocessing.text.hashing_trick") +def hashing_trick( + text, + n, + hash_function=None, + filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', + lower=True, + split=" ", + analyzer=None, +): + """DEPRECATED.""" + if hash_function is None: + hash_function = hash + elif hash_function == "md5": + + def hash_function(w): + return int(hashlib.md5(w.encode()).hexdigest(), 16) + + if analyzer is None: + seq = text_to_word_sequence( + text, filters=filters, lower=lower, split=split + ) + else: + seq = analyzer(text) + + return [(hash_function(w) % (n - 1) + 1) for w in seq] + + +@keras_export("keras._legacy.preprocessing.text.Tokenizer") +class Tokenizer: + """DEPRECATED.""" + + def __init__( + self, + num_words=None, + filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', + lower=True, + split=" ", + char_level=False, + oov_token=None, + analyzer=None, + **kwargs, + ): + # Legacy support + if "nb_words" in kwargs: + warnings.warn( + "The `nb_words` argument in `Tokenizer` " + "has been renamed `num_words`." + ) + num_words = kwargs.pop("nb_words") + document_count = kwargs.pop("document_count", 0) + if kwargs: + raise TypeError("Unrecognized keyword arguments: " + str(kwargs)) + + self.word_counts = collections.OrderedDict() + self.word_docs = collections.defaultdict(int) + self.filters = filters + self.split = split + self.lower = lower + self.num_words = num_words + self.document_count = document_count + self.char_level = char_level + self.oov_token = oov_token + self.index_docs = collections.defaultdict(int) + self.word_index = {} + self.index_word = {} + self.analyzer = analyzer + + def fit_on_texts(self, texts): + for text in texts: + self.document_count += 1 + if self.char_level or isinstance(text, list): + if self.lower: + if isinstance(text, list): + text = [text_elem.lower() for text_elem in text] + else: + text = text.lower() + seq = text + else: + if self.analyzer is None: + seq = text_to_word_sequence( + text, + filters=self.filters, + lower=self.lower, + split=self.split, + ) + else: + seq = self.analyzer(text) + for w in seq: + if w in self.word_counts: + self.word_counts[w] += 1 + else: + self.word_counts[w] = 1 + for w in set(seq): + # In how many documents each word occurs + self.word_docs[w] += 1 + + wcounts = list(self.word_counts.items()) + wcounts.sort(key=lambda x: x[1], reverse=True) + # forcing the oov_token to index 1 if it exists + if self.oov_token is None: + sorted_voc = [] + else: + sorted_voc = [self.oov_token] + sorted_voc.extend(wc[0] for wc in wcounts) + + # note that index 0 is reserved, never assigned to an existing word + self.word_index = dict( + zip(sorted_voc, list(range(1, len(sorted_voc) + 1))) + ) + + self.index_word = {c: w for w, c in self.word_index.items()} + + for w, c in list(self.word_docs.items()): + self.index_docs[self.word_index[w]] = c + + def fit_on_sequences(self, sequences): + self.document_count += len(sequences) + for seq in sequences: + seq = set(seq) + for i in seq: + self.index_docs[i] += 1 + + def texts_to_sequences(self, texts): + return list(self.texts_to_sequences_generator(texts)) + + def texts_to_sequences_generator(self, texts): + num_words = self.num_words + oov_token_index = self.word_index.get(self.oov_token) + for text in texts: + if self.char_level or isinstance(text, list): + if self.lower: + if isinstance(text, list): + text = [text_elem.lower() for text_elem in text] + else: + text = text.lower() + seq = text + else: + if self.analyzer is None: + seq = text_to_word_sequence( + text, + filters=self.filters, + lower=self.lower, + split=self.split, + ) + else: + seq = self.analyzer(text) + vect = [] + for w in seq: + i = self.word_index.get(w) + if i is not None: + if num_words and i >= num_words: + if oov_token_index is not None: + vect.append(oov_token_index) + else: + vect.append(i) + elif self.oov_token is not None: + vect.append(oov_token_index) + yield vect + + def sequences_to_texts(self, sequences): + return list(self.sequences_to_texts_generator(sequences)) + + def sequences_to_texts_generator(self, sequences): + num_words = self.num_words + oov_token_index = self.word_index.get(self.oov_token) + for seq in sequences: + vect = [] + for num in seq: + word = self.index_word.get(num) + if word is not None: + if num_words and num >= num_words: + if oov_token_index is not None: + vect.append(self.index_word[oov_token_index]) + else: + vect.append(word) + elif self.oov_token is not None: + vect.append(self.index_word[oov_token_index]) + vect = " ".join(vect) + yield vect + + def texts_to_matrix(self, texts, mode="binary"): + sequences = self.texts_to_sequences(texts) + return self.sequences_to_matrix(sequences, mode=mode) + + def sequences_to_matrix(self, sequences, mode="binary"): + if not self.num_words: + if self.word_index: + num_words = len(self.word_index) + 1 + else: + raise ValueError( + "Specify a dimension (`num_words` argument), " + "or fit on some text data first." + ) + else: + num_words = self.num_words + + if mode == "tfidf" and not self.document_count: + raise ValueError( + "Fit the Tokenizer on some data before using tfidf mode." + ) + + x = np.zeros((len(sequences), num_words)) + for i, seq in enumerate(sequences): + if not seq: + continue + counts = collections.defaultdict(int) + for j in seq: + if j >= num_words: + continue + counts[j] += 1 + for j, c in list(counts.items()): + if mode == "count": + x[i][j] = c + elif mode == "freq": + x[i][j] = c / len(seq) + elif mode == "binary": + x[i][j] = 1 + elif mode == "tfidf": + # Use weighting scheme 2 in + # https://en.wikipedia.org/wiki/Tf%E2%80%93idf + tf = 1 + np.log(c) + idf = np.log( + 1 + + self.document_count / (1 + self.index_docs.get(j, 0)) + ) + x[i][j] = tf * idf + else: + raise ValueError("Unknown vectorization mode:", mode) + return x + + def get_config(self): + json_word_counts = json.dumps(self.word_counts) + json_word_docs = json.dumps(self.word_docs) + json_index_docs = json.dumps(self.index_docs) + json_word_index = json.dumps(self.word_index) + json_index_word = json.dumps(self.index_word) + + return { + "num_words": self.num_words, + "filters": self.filters, + "lower": self.lower, + "split": self.split, + "char_level": self.char_level, + "oov_token": self.oov_token, + "document_count": self.document_count, + "word_counts": json_word_counts, + "word_docs": json_word_docs, + "index_docs": json_index_docs, + "index_word": json_index_word, + "word_index": json_word_index, + } + + def to_json(self, **kwargs): + config = self.get_config() + tokenizer_config = { + "class_name": self.__class__.__name__, + "config": config, + } + return json.dumps(tokenizer_config, **kwargs) + + +@keras_export("keras._legacy.preprocessing.text.tokenizer_from_json") +def tokenizer_from_json(json_string): + """DEPRECATED.""" + tokenizer_config = json.loads(json_string) + config = tokenizer_config.get("config") + + word_counts = json.loads(config.pop("word_counts")) + word_docs = json.loads(config.pop("word_docs")) + index_docs = json.loads(config.pop("index_docs")) + # Integer indexing gets converted to strings with json.dumps() + index_docs = {int(k): v for k, v in index_docs.items()} + index_word = json.loads(config.pop("index_word")) + index_word = {int(k): v for k, v in index_word.items()} + word_index = json.loads(config.pop("word_index")) + + tokenizer = Tokenizer(**config) + tokenizer.word_counts = word_counts + tokenizer.word_docs = word_docs + tokenizer.index_docs = index_docs + tokenizer.word_index = word_index + tokenizer.index_word = index_word + return tokenizer diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b3af750083bba8da3a728f73bf1f7bd2b9152b5 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/__pycache__/json_utils.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/__pycache__/json_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aaaa776bdb2906011648baa16212ea5538098fb7 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/__pycache__/json_utils.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/__pycache__/legacy_h5_format.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/__pycache__/legacy_h5_format.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec16a98a2580e105d3663da8ca424964f08fcc73 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/__pycache__/legacy_h5_format.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/__pycache__/saving_options.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/__pycache__/saving_options.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f782fb86c4f46ec1ab5de5759783c675f9f29ad Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/__pycache__/saving_options.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/__pycache__/saving_utils.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/__pycache__/saving_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86da522eab953ac617cabcc78e8cef8eeb6fccb4 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/__pycache__/saving_utils.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/__pycache__/serialization.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/__pycache__/serialization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37bb37cc25088f01bb97acfb0ffae45e372036f7 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/__pycache__/serialization.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/json_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/json_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0dbc578d25abb54e8ddd31a882ffb7e888283dd2 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/json_utils.py @@ -0,0 +1,220 @@ +"""JSON utilities for legacy saving formats (h5 and SavedModel)""" + +import collections +import enum +import functools +import json + +import numpy as np + +from keras.src.legacy.saving import serialization +from keras.src.saving import serialization_lib +from keras.src.utils.module_utils import tensorflow as tf + +_EXTENSION_TYPE_SPEC = "_EXTENSION_TYPE_SPEC" + + +class Encoder(json.JSONEncoder): + """JSON encoder and decoder that handles TensorShapes and tuples.""" + + def default(self, obj): + """Encodes objects for types that aren't handled by the default + encoder.""" + if tf.available and isinstance(obj, tf.TensorShape): + items = obj.as_list() if obj.rank is not None else None + return {"class_name": "TensorShape", "items": items} + return get_json_type(obj) + + def encode(self, obj): + return super().encode(_encode_tuple(obj)) + + +def _encode_tuple(x): + if isinstance(x, tuple): + return { + "class_name": "__tuple__", + "items": tuple(_encode_tuple(i) for i in x), + } + elif isinstance(x, list): + return [_encode_tuple(i) for i in x] + elif isinstance(x, dict): + return {key: _encode_tuple(value) for key, value in x.items()} + else: + return x + + +def decode(json_string): + return json.loads(json_string, object_hook=_decode_helper) + + +def decode_and_deserialize( + json_string, module_objects=None, custom_objects=None +): + """Decodes the JSON and deserializes any Keras objects found in the dict.""" + return json.loads( + json_string, + object_hook=functools.partial( + _decode_helper, + deserialize=True, + module_objects=module_objects, + custom_objects=custom_objects, + ), + ) + + +def _decode_helper( + obj, deserialize=False, module_objects=None, custom_objects=None +): + """A decoding helper that is TF-object aware. + + Args: + obj: A decoded dictionary that may represent an object. + deserialize: Boolean. When True, deserializes any Keras + objects found in `obj`. Defaults to `False`. + module_objects: A dictionary of built-in objects to look the name up in. + Generally, `module_objects` is provided by midlevel library + implementers. + custom_objects: A dictionary of custom objects to look the name up in. + Generally, `custom_objects` is provided by the end user. + + Returns: + The decoded object. + """ + if isinstance(obj, dict) and "class_name" in obj: + if tf.available: + if obj["class_name"] == "TensorShape": + return tf.TensorShape(obj["items"]) + elif obj["class_name"] == "TypeSpec": + from tensorflow.python.framework import type_spec_registry + + return type_spec_registry.lookup(obj["type_spec"])._deserialize( + _decode_helper(obj["serialized"]) + ) + elif obj["class_name"] == "CompositeTensor": + spec = obj["spec"] + tensors = [] + for dtype, tensor in obj["tensors"]: + tensors.append( + tf.constant(tensor, dtype=tf.dtypes.as_dtype(dtype)) + ) + return tf.nest.pack_sequence_as( + _decode_helper(spec), tensors, expand_composites=True + ) + + if obj["class_name"] == "__tuple__": + return tuple(_decode_helper(i) for i in obj["items"]) + elif obj["class_name"] == "__ellipsis__": + return Ellipsis + elif deserialize and "__passive_serialization__" in obj: + # __passive_serialization__ is added by the JSON encoder when + # encoding an object that has a `get_config()` method. + try: + if ( + "module" not in obj + ): # TODO(nkovela): Add TF SavedModel scope + return serialization.deserialize_keras_object( + obj, + module_objects=module_objects, + custom_objects=custom_objects, + ) + else: + return serialization_lib.deserialize_keras_object( + obj, + module_objects=module_objects, + custom_objects=custom_objects, + ) + except ValueError: + pass + elif obj["class_name"] == "__bytes__": + return obj["value"].encode("utf-8") + return obj + + +def get_json_type(obj): + """Serializes any object to a JSON-serializable structure. + + Args: + obj: the object to serialize + + Returns: + JSON-serializable structure representing `obj`. + + Raises: + TypeError: if `obj` cannot be serialized. + """ + # if obj is a serializable Keras class instance + # e.g. optimizer, layer + if hasattr(obj, "get_config"): + # TODO(nkovela): Replace with legacy serialization + serialized = serialization.serialize_keras_object(obj) + serialized["__passive_serialization__"] = True + return serialized + + # if obj is any numpy type + if type(obj).__module__ == np.__name__: + if isinstance(obj, np.ndarray): + return obj.tolist() + else: + return obj.item() + + # misc functions (e.g. loss function) + if callable(obj): + return obj.__name__ + + # if obj is a python 'type' + if type(obj).__name__ == type.__name__: + return obj.__name__ + + if tf.available and isinstance(obj, tf.compat.v1.Dimension): + return obj.value + + if tf.available and isinstance(obj, tf.TensorShape): + return obj.as_list() + + if tf.available and isinstance(obj, tf.DType): + return obj.name + + if isinstance(obj, collections.abc.Mapping): + return dict(obj) + + if obj is Ellipsis: + return {"class_name": "__ellipsis__"} + + # if isinstance(obj, wrapt.ObjectProxy): + # return obj.__wrapped__ + + if tf.available and isinstance(obj, tf.TypeSpec): + from tensorflow.python.framework import type_spec_registry + + try: + type_spec_name = type_spec_registry.get_name(type(obj)) + return { + "class_name": "TypeSpec", + "type_spec": type_spec_name, + "serialized": obj._serialize(), + } + except ValueError: + raise ValueError( + f"Unable to serialize {obj} to JSON, because the TypeSpec " + f"class {type(obj)} has not been registered." + ) + if tf.available and isinstance(obj, tf.__internal__.CompositeTensor): + spec = tf.type_spec_from_value(obj) + tensors = [] + for tensor in tf.nest.flatten(obj, expand_composites=True): + tensors.append((tensor.dtype.name, tensor.numpy().tolist())) + return { + "class_name": "CompositeTensor", + "spec": get_json_type(spec), + "tensors": tensors, + } + + if isinstance(obj, enum.Enum): + return obj.value + + if isinstance(obj, bytes): + return {"class_name": "__bytes__", "value": obj.decode("utf-8")} + + raise TypeError( + f"Unable to serialize {obj} to JSON. Unrecognized type {type(obj)}." + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/legacy_h5_format.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/legacy_h5_format.py new file mode 100644 index 0000000000000000000000000000000000000000..d7f3c3eb7ded9ba889f0f2656f41f7ffeaf22fd4 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/legacy_h5_format.py @@ -0,0 +1,640 @@ +import json +import os +import warnings + +import numpy as np +from absl import logging + +from keras.src import backend +from keras.src import optimizers +from keras.src.backend.common import global_state +from keras.src.legacy.saving import json_utils +from keras.src.legacy.saving import saving_options +from keras.src.legacy.saving import saving_utils +from keras.src.saving import object_registration +from keras.src.utils import io_utils + +try: + import h5py +except ImportError: + h5py = None + + +HDF5_OBJECT_HEADER_LIMIT = 64512 + + +def save_model_to_hdf5(model, filepath, overwrite=True, include_optimizer=True): + if h5py is None: + raise ImportError( + "`save_model()` using h5 format requires h5py. Could not " + "import h5py." + ) + + if not isinstance(filepath, h5py.File): + # If file exists and should not be overwritten. + if not overwrite and os.path.isfile(filepath): + proceed = io_utils.ask_to_proceed_with_overwrite(filepath) + if not proceed: + return + + dirpath = os.path.dirname(filepath) + if dirpath and not os.path.exists(dirpath): + os.makedirs(dirpath, exist_ok=True) + + f = h5py.File(filepath, mode="w") + opened_new_file = True + else: + f = filepath + opened_new_file = False + try: + with saving_options.keras_option_scope(use_legacy_config=True): + model_metadata = saving_utils.model_metadata( + model, include_optimizer + ) + for k, v in model_metadata.items(): + if isinstance(v, (dict, list, tuple)): + f.attrs[k] = json.dumps( + v, default=json_utils.get_json_type + ).encode("utf8") + else: + f.attrs[k] = v + + model_weights_group = f.create_group("model_weights") + save_weights_to_hdf5_group(model_weights_group, model) + + # TODO(b/128683857): Add integration tests between tf.keras and + # external Keras, to avoid breaking TF.js users. + if include_optimizer and hasattr(model, "optimizer"): + save_optimizer_weights_to_hdf5_group(f, model.optimizer) + + f.flush() + finally: + if opened_new_file: + f.close() + + +def load_model_from_hdf5(filepath, custom_objects=None, compile=True): + """Loads a model saved via `save_model_to_hdf5`. + + Args: + filepath: One of the following: + - String, path to the saved model + - `h5py.File` object from which to load the model + custom_objects: Optional dictionary mapping names + (strings) to custom classes or functions to be + considered during deserialization. + compile: Boolean, whether to compile the model + after loading. + + Returns: + A Keras model instance. If an optimizer was found + as part of the saved model, the model is already + compiled. Otherwise, the model is uncompiled and + a warning will be displayed. When `compile` is set + to `False`, the compilation is omitted without any + warning. + + Raises: + ImportError: if h5py is not available. + ValueError: In case of an invalid savefile. + """ + if h5py is None: + raise ImportError( + "`load_model()` using h5 format requires h5py. Could not " + "import h5py." + ) + + if not custom_objects: + custom_objects = {} + + gco = object_registration.GLOBAL_CUSTOM_OBJECTS + tlco = global_state.get_global_attribute("custom_objects_scope_dict", {}) + custom_objects = {**custom_objects, **gco, **tlco} + + opened_new_file = not isinstance(filepath, h5py.File) + if opened_new_file: + f = h5py.File(filepath, mode="r") + else: + f = filepath + + model = None + try: + # instantiate model + model_config = f.attrs.get("model_config") + if model_config is None: + raise ValueError( + f"No model config found in the file at {filepath}." + ) + if hasattr(model_config, "decode"): + model_config = model_config.decode("utf-8") + model_config = json_utils.decode(model_config) + + with saving_options.keras_option_scope(use_legacy_config=True): + model = saving_utils.model_from_config( + model_config, custom_objects=custom_objects + ) + + # set weights + load_weights_from_hdf5_group(f["model_weights"], model) + + if compile: + # instantiate optimizer + training_config = f.attrs.get("training_config") + if hasattr(training_config, "decode"): + training_config = training_config.decode("utf-8") + if training_config is None: + logging.warning( + "No training configuration found in the save file, so " + "the model was *not* compiled. Compile it manually." + ) + return model + training_config = json_utils.decode(training_config) + + # Compile model. + model.compile( + **saving_utils.compile_args_from_training_config( + training_config, custom_objects + ) + ) + saving_utils.try_build_compiled_arguments(model) + + # Set optimizer weights. + if "optimizer_weights" in f: + try: + if isinstance(model.optimizer, optimizers.Optimizer): + model.optimizer.build(model._trainable_variables) + else: + model.optimizer._create_all_weights( + model._trainable_variables + ) + except (NotImplementedError, AttributeError): + logging.warning( + "Error when creating the weights of optimizer {}, " + "making it impossible to restore the saved optimizer " + "state. As a result, your model is starting with " + "a freshly initialized optimizer." + ) + + optimizer_weight_values = ( + load_optimizer_weights_from_hdf5_group(f) + ) + try: + model.optimizer.set_weights(optimizer_weight_values) + except ValueError: + logging.warning( + "Error in loading the saved optimizer " + "state. As a result, your model is " + "starting with a freshly initialized " + "optimizer." + ) + finally: + if opened_new_file: + f.close() + return model + + +def save_weights_to_hdf5_group(f, model): + """Saves the weights of a list of layers to a HDF5 group. + + Args: + f: HDF5 group. + model: Model instance. + """ + from keras.src import __version__ as keras_version + + save_attributes_to_hdf5_group( + f, "layer_names", [layer.name.encode("utf8") for layer in model.layers] + ) + f.attrs["backend"] = backend.backend().encode("utf8") + f.attrs["keras_version"] = str(keras_version).encode("utf8") + + # Sort model layers by layer name to ensure that group names are strictly + # growing to avoid prefix issues. + for layer in sorted(model.layers, key=lambda x: x.name): + g = f.create_group(layer.name) + weights = _legacy_weights(layer) + save_subset_weights_to_hdf5_group(g, weights) + weights = list( + v + for v in model._trainable_variables + model._non_trainable_variables + if v in model.weights + ) + g = f.create_group("top_level_model_weights") + save_subset_weights_to_hdf5_group(g, weights) + + +def save_subset_weights_to_hdf5_group(f, weights): + """Save top-level weights of a model to a HDF5 group. + + Args: + f: HDF5 group. + weights: List of weight variables. + """ + weight_values = [backend.convert_to_numpy(w) for w in weights] + weight_names = [str(w.path).encode("utf8") for w in weights] + save_attributes_to_hdf5_group(f, "weight_names", weight_names) + for name, val in zip(weight_names, weight_values): + param_dset = f.create_dataset(name, val.shape, dtype=val.dtype) + if not val.shape: + # scalar + param_dset[()] = val + else: + param_dset[:] = val + + +def save_optimizer_weights_to_hdf5_group(hdf5_group, optimizer): + """Saves optimizer weights of a optimizer to a HDF5 group. + + Args: + hdf5_group: HDF5 group. + optimizer: optimizer instance. + """ + if isinstance(optimizer, optimizers.Optimizer): + symbolic_weights = optimizer.variables + else: + symbolic_weights = getattr(optimizer, "weights") + if symbolic_weights: + weights_group = hdf5_group.create_group("optimizer_weights") + weight_names = [str(w.path).encode("utf8") for w in symbolic_weights] + save_attributes_to_hdf5_group( + weights_group, "weight_names", weight_names + ) + weight_values = [backend.convert_to_numpy(w) for w in symbolic_weights] + for name, val in zip(weight_names, weight_values): + param_dset = weights_group.create_dataset( + name, val.shape, dtype=val.dtype + ) + if not val.shape: + # scalar + param_dset[()] = val + else: + param_dset[:] = val + + +def save_attributes_to_hdf5_group(group, name, data): + """Saves attributes (data) of the specified name into the HDF5 group. + + This method deals with an inherent problem of HDF5 file which is not + able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes. + + Args: + group: A pointer to a HDF5 group. + name: A name of the attributes to save. + data: Attributes data to store. + + Raises: + RuntimeError: If any single attribute is too large to be saved. + """ + # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` + # because in that case even chunking the array would not make the saving + # possible. + bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT] + + # Expecting this to never be true. + if bad_attributes: + raise RuntimeError( + "The following attributes cannot be saved to HDF5 file because " + f"they are larger than {HDF5_OBJECT_HEADER_LIMIT} " + f"bytes: {bad_attributes}" + ) + + data_npy = np.asarray(data) + + num_chunks = 1 + chunked_data = np.array_split(data_npy, num_chunks) + + # This will never loop forever thanks to the test above. + while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data): + num_chunks += 1 + chunked_data = np.array_split(data_npy, num_chunks) + + if num_chunks > 1: + for chunk_id, chunk_data in enumerate(chunked_data): + group.attrs["%s%d" % (name, chunk_id)] = chunk_data + else: + group.attrs[name] = data + + +def load_weights_from_hdf5_group(f, model): + """Implements topological (order-based) weight loading. + + Args: + f: A pointer to a HDF5 group. + model: Model instance. + + Raises: + ValueError: in case of mismatch between provided layers + and weights file. + """ + if "keras_version" in f.attrs: + original_keras_version = f.attrs["keras_version"] + if hasattr(original_keras_version, "decode"): + original_keras_version = original_keras_version.decode("utf8") + else: + original_keras_version = "1" + if "backend" in f.attrs: + original_backend = f.attrs["backend"] + if hasattr(original_backend, "decode"): + original_backend = original_backend.decode("utf8") + else: + original_backend = None + + filtered_layers = [] + for layer in model.layers: + weights = _legacy_weights(layer) + if weights: + filtered_layers.append(layer) + + layer_names = load_attributes_from_hdf5_group(f, "layer_names") + filtered_layer_names = [] + for name in layer_names: + g = f[name] + weight_names = load_attributes_from_hdf5_group(g, "weight_names") + if weight_names: + filtered_layer_names.append(name) + layer_names = filtered_layer_names + if len(layer_names) != len(filtered_layers): + raise ValueError( + "Layer count mismatch when loading weights from file. " + f"Model expected {len(filtered_layers)} layers, found " + f"{len(layer_names)} saved layers." + ) + + for k, name in enumerate(layer_names): + g = f[name] + layer = filtered_layers[k] + symbolic_weights = _legacy_weights(layer) + weight_values = load_subset_weights_from_hdf5_group(g) + if len(weight_values) != len(symbolic_weights): + raise ValueError( + f"Weight count mismatch for layer #{k} (named {layer.name} in " + f"the current model, {name} in the save file). " + f"Layer expects {len(symbolic_weights)} weight(s). Received " + f"{len(weight_values)} saved weight(s)" + ) + _set_weights( + layer, + symbolic_weights, + weight_values, + name=f"layer #{k} (named {layer.name})", + ) + + if "top_level_model_weights" in f: + symbolic_weights = list( + # model.weights + v + for v in model._trainable_variables + model._non_trainable_variables + if v in model.weights + ) + weight_values = load_subset_weights_from_hdf5_group( + f["top_level_model_weights"] + ) + if len(weight_values) != len(symbolic_weights): + raise ValueError( + "Weight count mismatch for top-level weights when loading " + "weights from file. " + f"Model expects {len(symbolic_weights)} top-level weight(s). " + f"Received {len(weight_values)} saved top-level weight(s)" + ) + _set_weights( + model, + symbolic_weights, + weight_values, + name="top-level model", + ) + + +def _set_weights( + instance, symbolic_weights, weight_values, name, skip_mismatch=False +): + """Safely set weights into a model or a layer. + + Args: + instance: Model or layer instance, + symbolic_weights: symbolic tensors representing + the weights of the variables to load, + weight_values: values of the weights to load, + skip_mismatch: Boolean, whether to skip loading of weights + where there is a mismatch in the shape of the weights, + name: name used to identify the group. + + Raises: + ValueError: in case of mismatch between provided + model/layer and weights. + """ + for i, weight_value in enumerate(weight_values): + expected_shape = symbolic_weights[i].shape + received_shape = weight_value.shape + if expected_shape != received_shape: + if skip_mismatch: + warnings.warn( + f"Skipping loading weights for {name}" + f"due to mismatch in shape for " + f"weight {symbolic_weights[i].path}. " + f"Weight expects shape {expected_shape}. " + "Received saved weight " + f"with shape {received_shape}", + stacklevel=2, + ) + continue + raise ValueError( + f"Shape mismatch in {name}" + f"for weight {symbolic_weights[i].path}. " + f"Weight expects shape {expected_shape}. " + "Received saved weight " + f"with shape {received_shape}" + ) + symbolic_weights[i].assign(weight_value) + + if hasattr(instance, "finalize_state") and symbolic_weights: + instance.finalize_state() + + +def load_weights_from_hdf5_group_by_name(f, model, skip_mismatch=False): + """Implements name-based weight loading (instead of topological loading). + + Layers that have no matching name are skipped. + + Args: + f: A pointer to a HDF5 group. + model: Model instance. + skip_mismatch: Boolean, whether to skip loading of layers + where there is a mismatch in the number of weights, + or a mismatch in the shape of the weights. + + Raises: + ValueError: in case of mismatch between provided layers + and weights file and skip_match=False. + """ + if "keras_version" in f.attrs: + original_keras_version = f.attrs["keras_version"] + if hasattr(original_keras_version, "decode"): + original_keras_version = original_keras_version.decode("utf8") + else: + original_keras_version = "1" + if "backend" in f.attrs: + original_backend = f.attrs["backend"] + if hasattr(original_backend, "decode"): + original_backend = original_backend.decode("utf8") + else: + original_backend = None + + # New file format. + layer_names = load_attributes_from_hdf5_group(f, "layer_names") + + # Reverse index of layer name to list of layers with name. + index = {} + for layer in model.layers: + if layer.name: + index.setdefault(layer.name, []).append(layer) + + for k, name in enumerate(layer_names): + g = f[name] + weight_values = load_subset_weights_from_hdf5_group(g) + for layer in index.get(name, []): + symbolic_weights = _legacy_weights(layer) + if len(weight_values) != len(symbolic_weights): + if skip_mismatch: + warnings.warn( + f"Skipping loading of weights for layer #{k} (named " + f"{layer.name}) due to mismatch in number of weights. " + f"Layer expects {len(symbolic_weights)} weight(s). " + f"Received {len(weight_values)} saved weight(s)", + stacklevel=2, + ) + continue + raise ValueError( + f"Weight count mismatch for layer #{k} " + f"(named {layer.name}). " + f"Layer expects {len(symbolic_weights)} weight(s). " + f"Received {len(weight_values)} saved weight(s)" + ) + # Set values. + _set_weights( + layer, + symbolic_weights, + weight_values, + skip_mismatch=skip_mismatch, + name=f"layer #{k} (named {layer.name})", + ) + + if "top_level_model_weights" in f: + symbolic_weights = ( + model._trainable_variables + model._non_trainable_variables + ) + weight_values = load_subset_weights_from_hdf5_group( + f["top_level_model_weights"] + ) + + if len(weight_values) != len(symbolic_weights): + if skip_mismatch: + warnings.warn( + "Skipping loading top-level weights for model due to " + "mismatch in number of weights. " + f"Model expects {len(symbolic_weights)} " + "top-level weight(s). " + f"Received {len(weight_values)} saved top-level weight(s)", + stacklevel=2, + ) + else: + raise ValueError( + "Weight count mismatch for top-level weights of model. " + f"Model expects {len(symbolic_weights)} " + "top-level weight(s). " + f"Received {len(weight_values)} saved top-level weight(s)" + ) + else: + _set_weights( + model, + symbolic_weights, + weight_values, + skip_mismatch=skip_mismatch, + name="top-level model", + ) + + +def load_subset_weights_from_hdf5_group(f): + """Load layer weights of a model from hdf5. + + Args: + f: A pointer to a HDF5 group. + + Returns: + List of NumPy arrays of the weight values. + + Raises: + ValueError: in case of mismatch between provided model + and weights file. + """ + weight_names = load_attributes_from_hdf5_group(f, "weight_names") + return [np.asarray(f[weight_name]) for weight_name in weight_names] + + +def load_optimizer_weights_from_hdf5_group(hdf5_group): + """Load optimizer weights from a HDF5 group. + + Args: + hdf5_group: A pointer to a HDF5 group. + + Returns: + data: List of optimizer weight names. + """ + weights_group = hdf5_group["optimizer_weights"] + optimizer_weight_names = load_attributes_from_hdf5_group( + weights_group, "weight_names" + ) + return [ + weights_group[weight_name] for weight_name in optimizer_weight_names + ] + + +def load_attributes_from_hdf5_group(group, name): + """Loads attributes of the specified name from the HDF5 group. + + This method deals with an inherent problem + of HDF5 file which is not able to store + data larger than HDF5_OBJECT_HEADER_LIMIT bytes. + + Args: + group: A pointer to a HDF5 group. + name: A name of the attributes to load. + + Returns: + data: Attributes data. + """ + if name in group.attrs: + data = [ + n.decode("utf8") if hasattr(n, "decode") else n + for n in group.attrs[name] + ] + else: + data = [] + chunk_id = 0 + while f"{name}{chunk_id}" in group.attrs: + data.extend( + [ + n.decode("utf8") if hasattr(n, "decode") else n + for n in group.attrs[f"{name}{chunk_id}"] + ] + ) + chunk_id += 1 + return data + + +def _legacy_weights(layer): + """Legacy weight order converter. + + For legacy reason, the layer.weights was in the order of + [self.trainable_weights + self.non_trainable_weights], and this order was + used for preserving the weights in h5 format. The new order of layer.weights + are the same as layer.get_weights() which is more intuitive for user. To + keep supporting the existing saved h5 file, this method should be used to + save/load weights. + + Args: + layer: a `Model` or `Layer` instance. + + Returns: + A list of variables with the legacy weight order. + """ + return layer.trainable_weights + layer.non_trainable_weights diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/saving_options.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/saving_options.py new file mode 100644 index 0000000000000000000000000000000000000000..6f270fb23290785800203d84e804fbf690839590 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/saving_options.py @@ -0,0 +1,17 @@ +import contextlib + +from keras.src.backend.common import global_state + + +@contextlib.contextmanager +def keras_option_scope(use_legacy_config=True): + use_legacy_config_prev_value = global_state.get_global_attribute( + "use_legacy_config", None + ) + global_state.set_global_attribute("use_legacy_config", use_legacy_config) + try: + yield + finally: + global_state.set_global_attribute( + "use_legacy_config", use_legacy_config_prev_value + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/saving_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/saving_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..aec10780213831d40c50803470ed1006b5842b60 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/saving_utils.py @@ -0,0 +1,260 @@ +import json +import threading + +from absl import logging + +from keras.src import backend +from keras.src import layers +from keras.src import losses +from keras.src import metrics as metrics_module +from keras.src import models +from keras.src import optimizers +from keras.src import tree +from keras.src.legacy.saving import serialization +from keras.src.saving import object_registration + +MODULE_OBJECTS = threading.local() + +# Legacy lambda arguments not found in Keras 3 +LAMBDA_DEP_ARGS = ( + "module", + "function_type", + "output_shape_type", + "output_shape_module", +) + + +def model_from_config(config, custom_objects=None): + """Instantiates a Keras model from its config. + + Args: + config: Configuration dictionary. + custom_objects: Optional dictionary mapping names + (strings) to custom classes or functions to be + considered during deserialization. + + Returns: + A Keras model instance (uncompiled). + + Raises: + TypeError: if `config` is not a dictionary. + """ + if isinstance(config, list): + raise TypeError( + "`model_from_config` expects a dictionary, not a list. " + f"Received: config={config}. Did you meant to use " + "`Sequential.from_config(config)`?" + ) + + global MODULE_OBJECTS + + if not hasattr(MODULE_OBJECTS, "ALL_OBJECTS"): + MODULE_OBJECTS.ALL_OBJECTS = layers.__dict__ + MODULE_OBJECTS.ALL_OBJECTS["InputLayer"] = layers.InputLayer + MODULE_OBJECTS.ALL_OBJECTS["Functional"] = models.Functional + MODULE_OBJECTS.ALL_OBJECTS["Model"] = models.Model + MODULE_OBJECTS.ALL_OBJECTS["Sequential"] = models.Sequential + + batch_input_shape = config["config"].pop("batch_input_shape", None) + if batch_input_shape is not None: + if config["class_name"] == "InputLayer": + config["config"]["batch_shape"] = batch_input_shape + else: + config["config"]["input_shape"] = batch_input_shape + + axis = config["config"].pop("axis", None) + if axis is not None and isinstance(axis, list) and len(axis) == 1: + config["config"]["axis"] = int(axis[0]) + + # Handle backwards compatibility for Keras lambdas + if config["class_name"] == "Lambda": + for dep_arg in LAMBDA_DEP_ARGS: + _ = config["config"].pop(dep_arg, None) + function_config = config["config"]["function"] + if isinstance(function_config, list): + function_dict = {"class_name": "__lambda__", "config": {}} + function_dict["config"]["code"] = function_config[0] + function_dict["config"]["defaults"] = function_config[1] + function_dict["config"]["closure"] = function_config[2] + config["config"]["function"] = function_dict + + # TODO(nkovela): Swap find and replace args during Keras 3.0 release + # Replace keras refs with keras + config = _find_replace_nested_dict(config, "keras.", "keras.") + + return serialization.deserialize_keras_object( + config, + module_objects=MODULE_OBJECTS.ALL_OBJECTS, + custom_objects=custom_objects, + printable_module_name="layer", + ) + + +def model_metadata(model, include_optimizer=True, require_config=True): + """Returns a dictionary containing the model metadata.""" + from keras.src import __version__ as keras_version + + model_config = {"class_name": model.__class__.__name__} + try: + model_config["config"] = model.get_config() + except NotImplementedError as e: + if require_config: + raise e + + metadata = dict( + keras_version=str(keras_version), + backend=backend.backend(), + model_config=model_config, + ) + if getattr(model, "optimizer", False) and include_optimizer: + if model.compiled: + training_config = model._compile_config.config + training_config.pop("optimizer", None) # Handled separately. + metadata["training_config"] = _serialize_nested_config( + training_config + ) + optimizer_config = { + "class_name": object_registration.get_registered_name( + model.optimizer.__class__ + ), + "config": model.optimizer.get_config(), + } + metadata["training_config"]["optimizer_config"] = optimizer_config + return metadata + + +def compile_args_from_training_config(training_config, custom_objects=None): + """Return model.compile arguments from training config.""" + if custom_objects is None: + custom_objects = {} + + with object_registration.CustomObjectScope(custom_objects): + optimizer_config = training_config["optimizer_config"] + optimizer = optimizers.deserialize(optimizer_config) + # Ensure backwards compatibility for optimizers in legacy H5 files + optimizer = _resolve_compile_arguments_compat( + optimizer, optimizer_config, optimizers + ) + + # Recover losses. + loss = None + loss_config = training_config.get("loss", None) + if loss_config is not None: + loss = _deserialize_nested_config(losses.deserialize, loss_config) + # Ensure backwards compatibility for losses in legacy H5 files + loss = _resolve_compile_arguments_compat(loss, loss_config, losses) + + # Recover metrics. + metrics = None + metrics_config = training_config.get("metrics", None) + if metrics_config is not None: + metrics = _deserialize_nested_config( + _deserialize_metric, metrics_config + ) + # Ensure backwards compatibility for metrics in legacy H5 files + metrics = _resolve_compile_arguments_compat( + metrics, metrics_config, metrics_module + ) + + # Recover weighted metrics. + weighted_metrics = None + weighted_metrics_config = training_config.get("weighted_metrics", None) + if weighted_metrics_config is not None: + weighted_metrics = _deserialize_nested_config( + _deserialize_metric, weighted_metrics_config + ) + + loss_weights = training_config["loss_weights"] + + return dict( + optimizer=optimizer, + loss=loss, + metrics=metrics, + weighted_metrics=weighted_metrics, + loss_weights=loss_weights, + ) + + +def _serialize_nested_config(config): + """Serialized a nested structure of Keras objects.""" + + def _serialize_fn(obj): + if callable(obj): + return serialization.serialize_keras_object(obj) + return obj + + return tree.map_structure(_serialize_fn, config) + + +def _deserialize_nested_config(deserialize_fn, config): + """Deserializes arbitrary Keras `config` using `deserialize_fn`.""" + + def _is_single_object(obj): + if isinstance(obj, dict) and "class_name" in obj: + return True # Serialized Keras object. + if isinstance(obj, str): + return True # Serialized function or string. + return False + + if config is None: + return None + if _is_single_object(config): + return deserialize_fn(config) + elif isinstance(config, dict): + return { + k: _deserialize_nested_config(deserialize_fn, v) + for k, v in config.items() + } + elif isinstance(config, (tuple, list)): + return [ + _deserialize_nested_config(deserialize_fn, obj) for obj in config + ] + + raise ValueError( + "Saved configuration not understood. Configuration should be a " + f"dictionary, string, tuple or list. Received: config={config}." + ) + + +def _deserialize_metric(metric_config): + """Deserialize metrics, leaving special strings untouched.""" + if metric_config in ["accuracy", "acc", "crossentropy", "ce"]: + # Do not deserialize accuracy and cross-entropy strings as we have + # special case handling for these in compile, based on model output + # shape. + return metric_config + return metrics_module.deserialize(metric_config) + + +def _find_replace_nested_dict(config, find, replace): + dict_str = json.dumps(config) + dict_str = dict_str.replace(find, replace) + config = json.loads(dict_str) + return config + + +def _resolve_compile_arguments_compat(obj, obj_config, module): + """Resolves backwards compatibility issues with training config arguments. + + This helper function accepts built-in Keras modules such as optimizers, + losses, and metrics to ensure an object being deserialized is compatible + with Keras 3 built-ins. For legacy H5 files saved within Keras 3, + this does nothing. + """ + if isinstance(obj, str) and obj not in module.ALL_OBJECTS_DICT: + obj = module.get(obj_config["config"]["name"]) + return obj + + +def try_build_compiled_arguments(model): + try: + if not model.compiled_loss.built: + model.compiled_loss.build(model.outputs) + if not model.compiled_metrics.built: + model.compiled_metrics.build(model.outputs, model.outputs) + except: + logging.warning( + "Compiled the loaded model, but the compiled metrics have " + "yet to be built. `model.compile_metrics` will be empty " + "until you train or evaluate the model." + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/serialization.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/serialization.py new file mode 100644 index 0000000000000000000000000000000000000000..7fa7eb44c507ccb6dfe2a3efd5693001d91aff77 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/legacy/saving/serialization.py @@ -0,0 +1,574 @@ +"""Legacy serialization logic for Keras models.""" + +import contextlib +import inspect +import json +import threading +import weakref + +# isort: off +from keras.src.api_export import keras_export +from keras.src.saving import object_registration + +# Flag that determines whether to skip the NotImplementedError when calling +# get_config in custom models and layers. This is only enabled when saving to +# SavedModel, when the config isn't required. +_SKIP_FAILED_SERIALIZATION = False +# If a layer does not have a defined config, then the returned config will be a +# dictionary with the below key. +_LAYER_UNDEFINED_CONFIG_KEY = "layer was saved without config" + +# Store a unique, per-object ID for shared objects. +# +# We store a unique ID for each object so that we may, at loading time, +# re-create the network properly. Without this ID, we would have no way of +# determining whether a config is a description of a new object that +# should be created or is merely a reference to an already-created object. +SHARED_OBJECT_KEY = "shared_object_id" + +SHARED_OBJECT_DISABLED = threading.local() +SHARED_OBJECT_LOADING = threading.local() +SHARED_OBJECT_SAVING = threading.local() + + +# Attributes on the threadlocal variable must be set per-thread, thus we +# cannot initialize these globally. Instead, we have accessor functions with +# default values. +def _shared_object_disabled(): + """Get whether shared object handling is disabled in a threadsafe manner.""" + return getattr(SHARED_OBJECT_DISABLED, "disabled", False) + + +def _shared_object_loading_scope(): + """Get the current shared object saving scope in a threadsafe manner.""" + return getattr(SHARED_OBJECT_LOADING, "scope", NoopLoadingScope()) + + +def _shared_object_saving_scope(): + """Get the current shared object saving scope in a threadsafe manner.""" + return getattr(SHARED_OBJECT_SAVING, "scope", None) + + +class DisableSharedObjectScope: + """A context manager for disabling handling of shared objects. + + Disables shared object handling for both saving and loading. + + Created primarily for use with `clone_model`, which does extra surgery that + is incompatible with shared objects. + """ + + def __enter__(self): + SHARED_OBJECT_DISABLED.disabled = True + self._orig_loading_scope = _shared_object_loading_scope() + self._orig_saving_scope = _shared_object_saving_scope() + + def __exit__(self, *args, **kwargs): + SHARED_OBJECT_DISABLED.disabled = False + SHARED_OBJECT_LOADING.scope = self._orig_loading_scope + SHARED_OBJECT_SAVING.scope = self._orig_saving_scope + + +class NoopLoadingScope: + """The default shared object loading scope. It does nothing. + + Created to simplify serialization code that doesn't care about shared + objects (e.g. when serializing a single object). + """ + + def get(self, unused_object_id): + return None + + def set(self, object_id, obj): + pass + + +class SharedObjectLoadingScope: + """A context manager for keeping track of loaded objects. + + During the deserialization process, we may come across objects that are + shared across multiple layers. In order to accurately restore the network + structure to its original state, `SharedObjectLoadingScope` allows us to + re-use shared objects rather than cloning them. + """ + + def __enter__(self): + if _shared_object_disabled(): + return NoopLoadingScope() + + global SHARED_OBJECT_LOADING + SHARED_OBJECT_LOADING.scope = self + self._obj_ids_to_obj = {} + return self + + def get(self, object_id): + """Given a shared object ID, returns a previously instantiated object. + + Args: + object_id: shared object ID to use when attempting to find + already-loaded object. + + Returns: + The object, if we've seen this ID before. Else, `None`. + """ + # Explicitly check for `None` internally to make external calling code a + # bit cleaner. + if object_id is None: + return + return self._obj_ids_to_obj.get(object_id) + + def set(self, object_id, obj): + """Stores an instantiated object for future lookup and sharing.""" + if object_id is None: + return + self._obj_ids_to_obj[object_id] = obj + + def __exit__(self, *args, **kwargs): + global SHARED_OBJECT_LOADING + SHARED_OBJECT_LOADING.scope = NoopLoadingScope() + + +class SharedObjectConfig(dict): + """A configuration container that keeps track of references. + + `SharedObjectConfig` will automatically attach a shared object ID to any + configs which are referenced more than once, allowing for proper shared + object reconstruction at load time. + + In most cases, it would be more proper to subclass something like + `collections.UserDict` or `collections.Mapping` rather than `dict` directly. + Unfortunately, python's json encoder does not support `Mapping`s. This is + important functionality to retain, since we are dealing with serialization. + + We should be safe to subclass `dict` here, since we aren't actually + overriding any core methods, only augmenting with a new one for reference + counting. + """ + + def __init__(self, base_config, object_id, **kwargs): + self.ref_count = 1 + self.object_id = object_id + super().__init__(base_config, **kwargs) + + def increment_ref_count(self): + # As soon as we've seen the object more than once, we want to attach the + # shared object ID. This allows us to only attach the shared object ID + # when it's strictly necessary, making backwards compatibility breakage + # less likely. + if self.ref_count == 1: + self[SHARED_OBJECT_KEY] = self.object_id + self.ref_count += 1 + + +class SharedObjectSavingScope: + """Keeps track of shared object configs when serializing.""" + + def __enter__(self): + if _shared_object_disabled(): + return None + + global SHARED_OBJECT_SAVING + + # Serialization can happen at a number of layers for a number of + # reasons. We may end up with a case where we're opening a saving scope + # within another saving scope. In that case, we'd like to use the + # outermost scope available and ignore inner scopes, since there is not + # (yet) a reasonable use case for having these nested and distinct. + if _shared_object_saving_scope() is not None: + self._passthrough = True + return _shared_object_saving_scope() + else: + self._passthrough = False + + SHARED_OBJECT_SAVING.scope = self + self._shared_objects_config = weakref.WeakKeyDictionary() + self._next_id = 0 + return self + + def get_config(self, obj): + """Gets a `SharedObjectConfig` if one has already been seen for `obj`. + + Args: + obj: The object for which to retrieve the `SharedObjectConfig`. + + Returns: + The SharedObjectConfig for a given object, if already seen. Else, + `None`. + """ + try: + shared_object_config = self._shared_objects_config[obj] + except (TypeError, KeyError): + # If the object is unhashable (e.g. a subclass of + # `AbstractBaseClass` that has not overridden `__hash__`), a + # `TypeError` will be thrown. We'll just continue on without shared + # object support. + return None + shared_object_config.increment_ref_count() + return shared_object_config + + def create_config(self, base_config, obj): + """Create a new SharedObjectConfig for a given object.""" + shared_object_config = SharedObjectConfig(base_config, self._next_id) + self._next_id += 1 + try: + self._shared_objects_config[obj] = shared_object_config + except TypeError: + # If the object is unhashable (e.g. a subclass of + # `AbstractBaseClass` that has not overridden `__hash__`), a + # `TypeError` will be thrown. We'll just continue on without shared + # object support. + pass + return shared_object_config + + def __exit__(self, *args, **kwargs): + if not getattr(self, "_passthrough", False): + global SHARED_OBJECT_SAVING + SHARED_OBJECT_SAVING.scope = None + + +def serialize_keras_class_and_config( + cls_name, cls_config, obj=None, shared_object_id=None +): + """Returns the serialization of the class with the given config.""" + base_config = {"class_name": cls_name, "config": cls_config} + + # We call `serialize_keras_class_and_config` for some branches of the load + # path. In that case, we may already have a shared object ID we'd like to + # retain. + if shared_object_id is not None: + base_config[SHARED_OBJECT_KEY] = shared_object_id + + # If we have an active `SharedObjectSavingScope`, check whether we've + # already serialized this config. If so, just use that config. This will + # store an extra ID field in the config, allowing us to re-create the shared + # object relationship at load time. + if _shared_object_saving_scope() is not None and obj is not None: + shared_object_config = _shared_object_saving_scope().get_config(obj) + if shared_object_config is None: + return _shared_object_saving_scope().create_config(base_config, obj) + return shared_object_config + + return base_config + + +@contextlib.contextmanager +def skip_failed_serialization(): + global _SKIP_FAILED_SERIALIZATION + prev = _SKIP_FAILED_SERIALIZATION + try: + _SKIP_FAILED_SERIALIZATION = True + yield + finally: + _SKIP_FAILED_SERIALIZATION = prev + + +@keras_export( + [ + "keras.legacy.saving.serialize_keras_object", + "keras.utils.legacy.serialize_keras_object", + ] +) +def serialize_keras_object(instance): + """Serialize a Keras object into a JSON-compatible representation. + + Calls to `serialize_keras_object` while underneath the + `SharedObjectSavingScope` context manager will cause any objects re-used + across multiple layers to be saved with a special shared object ID. This + allows the network to be re-created properly during deserialization. + + Args: + instance: The object to serialize. + + Returns: + A dict-like, JSON-compatible representation of the object's config. + """ + + # _, instance = tf.__internal__.decorator.unwrap(instance) + instance = inspect.unwrap(instance) + if instance is None: + return None + + if hasattr(instance, "get_config"): + name = object_registration.get_registered_name(instance.__class__) + try: + config = instance.get_config() + except NotImplementedError as e: + if _SKIP_FAILED_SERIALIZATION: + return serialize_keras_class_and_config( + name, {_LAYER_UNDEFINED_CONFIG_KEY: True} + ) + raise e + serialization_config = {} + for key, item in config.items(): + if isinstance(item, str): + serialization_config[key] = item + continue + + # Any object of a different type needs to be converted to string or + # dict for serialization (e.g. custom functions, custom classes) + try: + serialized_item = serialize_keras_object(item) + if isinstance(serialized_item, dict) and not isinstance( + item, dict + ): + serialized_item["__passive_serialization__"] = True + serialization_config[key] = serialized_item + except ValueError: + serialization_config[key] = item + + name = object_registration.get_registered_name(instance.__class__) + return serialize_keras_class_and_config( + name, serialization_config, instance + ) + if hasattr(instance, "__name__"): + return object_registration.get_registered_name(instance) + raise ValueError( + f"Cannot serialize {instance} because it doesn't implement " + "`get_config()`." + ) + + +def class_and_config_for_serialized_keras_object( + config, + module_objects=None, + custom_objects=None, + printable_module_name="object", +): + """Returns the class name and config for a serialized keras object.""" + + if ( + not isinstance(config, dict) + or "class_name" not in config + or "config" not in config + ): + raise ValueError( + f"Improper config format for {config}. " + "Expecting python dict contains `class_name` and `config` as keys" + ) + + class_name = config["class_name"] + cls = object_registration.get_registered_object( + class_name, custom_objects, module_objects + ) + if cls is None: + raise ValueError( + f"Unknown {printable_module_name}: '{class_name}'. " + "Please ensure you are using a `keras.utils.custom_object_scope` " + "and that this object is included in the scope. See " + "https://www.tensorflow.org/guide/keras/save_and_serialize" + "#registering_the_custom_object for details." + ) + + cls_config = config["config"] + # Check if `cls_config` is a list. If it is a list, return the class and the + # associated class configs for recursively deserialization. This case will + # happen on the old version of sequential model (e.g. `keras_version` == + # "2.0.6"), which is serialized in a different structure, for example + # "{'class_name': 'Sequential', + # 'config': [{'class_name': 'Embedding', 'config': ...}, {}, ...]}". + if isinstance(cls_config, list): + return (cls, cls_config) + + deserialized_objects = {} + for key, item in cls_config.items(): + if key == "name": + # Assume that the value of 'name' is a string that should not be + # deserialized as a function. This avoids the corner case where + # cls_config['name'] has an identical name to a custom function and + # gets converted into that function. + deserialized_objects[key] = item + elif isinstance(item, dict) and "__passive_serialization__" in item: + deserialized_objects[key] = deserialize_keras_object( + item, + module_objects=module_objects, + custom_objects=custom_objects, + printable_module_name="config_item", + ) + # TODO(momernick): Should this also have 'module_objects'? + elif isinstance(item, str) and inspect.isfunction( + object_registration.get_registered_object(item, custom_objects) + ): + # Handle custom functions here. When saving functions, we only save + # the function's name as a string. If we find a matching string in + # the custom objects during deserialization, we convert the string + # back to the original function. + # Note that a potential issue is that a string field could have a + # naming conflict with a custom function name, but this should be a + # rare case. This issue does not occur if a string field has a + # naming conflict with a custom object, since the config of an + # object will always be a dict. + deserialized_objects[key] = ( + object_registration.get_registered_object(item, custom_objects) + ) + for key, item in deserialized_objects.items(): + cls_config[key] = deserialized_objects[key] + + return (cls, cls_config) + + +@keras_export( + [ + "keras.legacy.saving.deserialize_keras_object", + "keras.utils.legacy.deserialize_keras_object", + ] +) +def deserialize_keras_object( + identifier, + module_objects=None, + custom_objects=None, + printable_module_name="object", +): + """Turns the serialized form of a Keras object back into an actual object. + + This function is for mid-level library implementers rather than end users. + + Importantly, this utility requires you to provide the dict of + `module_objects` to use for looking up the object config; this is not + populated by default. If you need a deserialization utility that has + preexisting knowledge of built-in Keras objects, use e.g. + `keras.layers.deserialize(config)`, `keras.metrics.deserialize(config)`, + etc. + + Calling `deserialize_keras_object` while underneath the + `SharedObjectLoadingScope` context manager will cause any already-seen + shared objects to be returned as-is rather than creating a new object. + + Args: + identifier: the serialized form of the object. + module_objects: A dictionary of built-in objects to look the name up in. + Generally, `module_objects` is provided by midlevel library + implementers. + custom_objects: A dictionary of custom objects to look the name up in. + Generally, `custom_objects` is provided by the end user. + printable_module_name: A human-readable string representing the type of + the object. Printed in case of exception. + + Returns: + The deserialized object. + + Example: + + A mid-level library implementer might want to implement a utility for + retrieving an object from its config, as such: + + ```python + def deserialize(config, custom_objects=None): + return deserialize_keras_object( + identifier, + module_objects=globals(), + custom_objects=custom_objects, + name="MyObjectType", + ) + ``` + + This is how e.g. `keras.layers.deserialize()` is implemented. + """ + + if identifier is None: + return None + + if isinstance(identifier, dict): + # In this case we are dealing with a Keras config dictionary. + config = identifier + (cls, cls_config) = class_and_config_for_serialized_keras_object( + config, module_objects, custom_objects, printable_module_name + ) + + # If this object has already been loaded (i.e. it's shared between + # multiple objects), return the already-loaded object. + shared_object_id = config.get(SHARED_OBJECT_KEY) + shared_object = _shared_object_loading_scope().get(shared_object_id) + if shared_object is not None: + return shared_object + + if hasattr(cls, "from_config"): + arg_spec = inspect.getfullargspec(cls.from_config) + custom_objects = custom_objects or {} + + # TODO(nkovela): Swap find and replace args during Keras 3.0 release + # Replace keras refs with keras + cls_config = _find_replace_nested_dict( + cls_config, "keras.", "keras." + ) + + if "custom_objects" in arg_spec.args: + deserialized_obj = cls.from_config( + cls_config, + custom_objects={ + **object_registration.GLOBAL_CUSTOM_OBJECTS, + **custom_objects, + }, + ) + else: + with object_registration.CustomObjectScope(custom_objects): + deserialized_obj = cls.from_config(cls_config) + else: + # Then `cls` may be a function returning a class. + # in this case by convention `config` holds + # the kwargs of the function. + custom_objects = custom_objects or {} + with object_registration.CustomObjectScope(custom_objects): + deserialized_obj = cls(**cls_config) + + # Add object to shared objects, in case we find it referenced again. + _shared_object_loading_scope().set(shared_object_id, deserialized_obj) + + return deserialized_obj + + elif isinstance(identifier, str): + object_name = identifier + if custom_objects and object_name in custom_objects: + obj = custom_objects.get(object_name) + elif ( + object_name + in object_registration._THREAD_LOCAL_CUSTOM_OBJECTS.__dict__ + ): + obj = object_registration._THREAD_LOCAL_CUSTOM_OBJECTS.__dict__[ + object_name + ] + elif object_name in object_registration._GLOBAL_CUSTOM_OBJECTS: + obj = object_registration._GLOBAL_CUSTOM_OBJECTS[object_name] + else: + obj = module_objects.get(object_name) + if obj is None: + raise ValueError( + f"Unknown {printable_module_name}: '{object_name}'. " + "Please ensure you are using a " + "`keras.utils.custom_object_scope` " + "and that this object is included in the scope. See " + "https://www.tensorflow.org/guide/keras/save_and_serialize" + "#registering_the_custom_object for details." + ) + + # Classes passed by name are instantiated with no args, functions are + # returned as-is. + if inspect.isclass(obj): + return obj() + return obj + elif inspect.isfunction(identifier): + # If a function has already been deserialized, return as is. + return identifier + else: + raise ValueError( + "Could not interpret serialized " + f"{printable_module_name}: {identifier}" + ) + + +def validate_config(config): + """Determines whether config appears to be a valid layer config.""" + return ( + isinstance(config, dict) and _LAYER_UNDEFINED_CONFIG_KEY not in config + ) + + +def is_default(method): + """Check if a method is decorated with the `default` wrapper.""" + return getattr(method, "_is_default", False) + + +def _find_replace_nested_dict(config, find, replace): + dict_str = json.dumps(config) + dict_str = dict_str.replace(find, replace) + config = json.loads(dict_str) + return config diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/losses/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/losses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7afeb55a01d17e3422538406e0e199c3f516dee6 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/losses/__init__.py @@ -0,0 +1,207 @@ +import inspect + +from keras.src.api_export import keras_export +from keras.src.losses.loss import Loss +from keras.src.losses.losses import CTC +from keras.src.losses.losses import BinaryCrossentropy +from keras.src.losses.losses import BinaryFocalCrossentropy +from keras.src.losses.losses import CategoricalCrossentropy +from keras.src.losses.losses import CategoricalFocalCrossentropy +from keras.src.losses.losses import CategoricalHinge +from keras.src.losses.losses import Circle +from keras.src.losses.losses import CosineSimilarity +from keras.src.losses.losses import Dice +from keras.src.losses.losses import Hinge +from keras.src.losses.losses import Huber +from keras.src.losses.losses import KLDivergence +from keras.src.losses.losses import LogCosh +from keras.src.losses.losses import LossFunctionWrapper +from keras.src.losses.losses import MeanAbsoluteError +from keras.src.losses.losses import MeanAbsolutePercentageError +from keras.src.losses.losses import MeanSquaredError +from keras.src.losses.losses import MeanSquaredLogarithmicError +from keras.src.losses.losses import Poisson +from keras.src.losses.losses import SparseCategoricalCrossentropy +from keras.src.losses.losses import SquaredHinge +from keras.src.losses.losses import Tversky +from keras.src.losses.losses import binary_crossentropy +from keras.src.losses.losses import binary_focal_crossentropy +from keras.src.losses.losses import categorical_crossentropy +from keras.src.losses.losses import categorical_focal_crossentropy +from keras.src.losses.losses import categorical_hinge +from keras.src.losses.losses import circle +from keras.src.losses.losses import cosine_similarity +from keras.src.losses.losses import ctc +from keras.src.losses.losses import dice +from keras.src.losses.losses import hinge +from keras.src.losses.losses import huber +from keras.src.losses.losses import kl_divergence +from keras.src.losses.losses import log_cosh +from keras.src.losses.losses import mean_absolute_error +from keras.src.losses.losses import mean_absolute_percentage_error +from keras.src.losses.losses import mean_squared_error +from keras.src.losses.losses import mean_squared_logarithmic_error +from keras.src.losses.losses import poisson +from keras.src.losses.losses import sparse_categorical_crossentropy +from keras.src.losses.losses import squared_hinge +from keras.src.losses.losses import tversky +from keras.src.saving import serialization_lib + +ALL_OBJECTS = { + # Base + Loss, + LossFunctionWrapper, + # Probabilistic + KLDivergence, + Poisson, + BinaryCrossentropy, + BinaryFocalCrossentropy, + CategoricalCrossentropy, + CategoricalFocalCrossentropy, + SparseCategoricalCrossentropy, + # Regression + MeanSquaredError, + MeanAbsoluteError, + MeanAbsolutePercentageError, + MeanSquaredLogarithmicError, + CosineSimilarity, + LogCosh, + Huber, + # Hinge + Hinge, + SquaredHinge, + CategoricalHinge, + # Image segmentation + Dice, + Tversky, + # Similarity + Circle, + # Sequence + CTC, + # Probabilistic + kl_divergence, + poisson, + binary_crossentropy, + binary_focal_crossentropy, + categorical_crossentropy, + categorical_focal_crossentropy, + sparse_categorical_crossentropy, + # Regression + mean_squared_error, + mean_absolute_error, + mean_absolute_percentage_error, + mean_squared_logarithmic_error, + cosine_similarity, + log_cosh, + huber, + # Hinge + hinge, + squared_hinge, + categorical_hinge, + # Image segmentation + dice, + tversky, + # Similarity + circle, + # Sequence + ctc, +} + +ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS} +ALL_OBJECTS_DICT.update( + { + "bce": binary_crossentropy, + "BCE": binary_crossentropy, + "kld": kl_divergence, + "KLD": kl_divergence, + "mae": mean_absolute_error, + "MAE": mean_absolute_error, + "mse": mean_squared_error, + "MSE": mean_squared_error, + "mape": mean_absolute_percentage_error, + "MAPE": mean_absolute_percentage_error, + "msle": mean_squared_logarithmic_error, + "MSLE": mean_squared_logarithmic_error, + } +) + + +@keras_export("keras.losses.serialize") +def serialize(loss): + """Serializes loss function or `Loss` instance. + + Args: + loss: A Keras `Loss` instance or a loss function. + + Returns: + Loss configuration dictionary. + """ + return serialization_lib.serialize_keras_object(loss) + + +@keras_export("keras.losses.deserialize") +def deserialize(name, custom_objects=None): + """Deserializes a serialized loss class/function instance. + + Args: + name: Loss configuration. + custom_objects: Optional dictionary mapping names (strings) to custom + objects (classes and functions) to be considered during + deserialization. + + Returns: + A Keras `Loss` instance or a loss function. + """ + return serialization_lib.deserialize_keras_object( + name, + module_objects=ALL_OBJECTS_DICT, + custom_objects=custom_objects, + ) + + +@keras_export("keras.losses.get") +def get(identifier): + """Retrieves a Keras loss as a `function`/`Loss` class instance. + + The `identifier` may be the string name of a loss function or `Loss` class. + + >>> loss = losses.get("categorical_crossentropy") + >>> type(loss) + + >>> loss = losses.get("CategoricalCrossentropy") + >>> type(loss) + + + You can also specify `config` of the loss to this function by passing dict + containing `class_name` and `config` as an identifier. Also note that the + `class_name` must map to a `Loss` class + + >>> identifier = {"class_name": "CategoricalCrossentropy", + ... "config": {"from_logits": True}} + >>> loss = losses.get(identifier) + >>> type(loss) + + + Args: + identifier: A loss identifier. One of None or string name of a loss + function/class or loss configuration dictionary or a loss function + or a loss class instance. + + Returns: + A Keras loss as a `function`/ `Loss` class instance. + """ + if identifier is None: + return None + if isinstance(identifier, dict): + obj = deserialize(identifier) + elif isinstance(identifier, str): + obj = ALL_OBJECTS_DICT.get(identifier, None) + else: + obj = identifier + + if callable(obj): + if inspect.isclass(obj): + obj = obj() + return obj + else: + raise ValueError(f"Could not interpret loss identifier: {identifier}") diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/losses/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/losses/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..301c3af6f94710718e05ac4575c291f4d2193c79 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/losses/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/losses/__pycache__/loss.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/losses/__pycache__/loss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc05fdd0843879bde4605e1ef7e5b53b562effc7 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/losses/__pycache__/loss.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/losses/__pycache__/losses.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/losses/__pycache__/losses.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b6cf690ec11b9a10d9d2df67a66b82393c294d3 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/losses/__pycache__/losses.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/losses/loss.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/losses/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..6af73902d0fd08464e1e96d978f45e6898e595ac --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/losses/loss.py @@ -0,0 +1,256 @@ +from keras.src import backend +from keras.src import dtype_policies +from keras.src import ops +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.saving.keras_saveable import KerasSaveable +from keras.src.utils.naming import auto_name + + +@keras_export(["keras.Loss", "keras.losses.Loss"]) +class Loss(KerasSaveable): + """Loss base class. + + This is the class to subclass in order to create new custom losses. + + Args: + reduction: Type of reduction to apply to the loss. In almost all cases + this should be `"sum_over_batch_size"`. Supported options are + `"sum"`, `"sum_over_batch_size"`, `"mean"`, + `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss, + `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the + sample size, and `"mean_with_sample_weight"` sums the loss and + divides by the sum of the sample weights. `"none"` and `None` + perform no aggregation. Defaults to `"sum_over_batch_size"`. + name: Optional name for the loss instance. + dtype: The dtype of the loss's computations. Defaults to `None`, which + means using `keras.backend.floatx()`. `keras.backend.floatx()` is a + `"float32"` unless set to different value + (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is + provided, then the `compute_dtype` will be utilized. + + To be implemented by subclasses: + + * `call()`: Contains the logic for loss calculation using `y_true`, + `y_pred`. + + Example subclass implementation: + + ```python + class MeanSquaredError(Loss): + def call(self, y_true, y_pred): + return ops.mean(ops.square(y_pred - y_true), axis=-1) + ``` + """ + + def __init__(self, name=None, reduction="sum_over_batch_size", dtype=None): + self.name = name or auto_name(self.__class__.__name__) + self.reduction = standardize_reduction(reduction) + self._dtype_policy = dtype_policies.get(dtype or backend.floatx()) + self._dtype = self._dtype_policy.compute_dtype + + @property + def dtype(self): + return self._dtype + + def __call__(self, y_true, y_pred, sample_weight=None): + in_mask = backend.get_keras_mask(y_pred) + + with ops.name_scope(self.name): + y_pred = tree.map_structure( + lambda x: ops.convert_to_tensor(x, dtype=self.dtype), y_pred + ) + y_true = tree.map_structure( + lambda x: ops.convert_to_tensor(x, dtype=self.dtype), y_true + ) + + losses = self.call(y_true, y_pred) + out_mask = backend.get_keras_mask(losses) + + if in_mask is not None and out_mask is not None: + mask = in_mask & out_mask + elif in_mask is not None: + mask = in_mask + elif out_mask is not None: + mask = out_mask + else: + mask = None + + return reduce_weighted_values( + losses, + sample_weight=sample_weight, + mask=mask, + reduction=self.reduction, + dtype=self.dtype, + ) + + def call(self, y_true, y_pred): + raise NotImplementedError + + def get_config(self): + return {"name": self.name, "reduction": self.reduction} + + @classmethod + def from_config(cls, config): + return cls(**config) + + def _obj_type(self): + return "Loss" + + +def standardize_reduction(reduction): + allowed = { + "sum_over_batch_size", + "sum", + None, + "none", + "mean", + "mean_with_sample_weight", + } + if reduction not in allowed: + raise ValueError( + "Invalid value for argument `reduction`. " + f"Expected one of {allowed}. Received: " + f"reduction={reduction}" + ) + return reduction + + +def squeeze_or_expand_to_same_rank(x1, x2, expand_rank_1=True): + """Squeeze/expand last dim if ranks differ from expected by exactly 1.""" + x1_rank = len(x1.shape) + x2_rank = len(x2.shape) + if x1_rank == x2_rank: + return x1, x2 + if x1_rank == x2_rank + 1: + if x1.shape[-1] == 1: + if x2_rank == 1 and expand_rank_1: + x2 = ops.expand_dims(x2, axis=-1) + else: + x1 = ops.squeeze(x1, axis=-1) + if x2_rank == x1_rank + 1: + if x2.shape[-1] == 1: + if x1_rank == 1 and expand_rank_1: + x1 = ops.expand_dims(x1, axis=-1) + else: + x2 = ops.squeeze(x2, axis=-1) + return x1, x2 + + +def reduce_values(values, sample_weight=None, reduction="sum_over_batch_size"): + if ( + reduction is None + or reduction == "none" + or tuple(values.shape) == () + or tuple(values.shape) == (0,) + ): + return values + loss = ops.sum(values) + if reduction in ("sum_over_batch_size", "mean", "mean_with_sample_weight"): + if reduction == "mean_with_sample_weight" and sample_weight is not None: + divisor = ops.cast(ops.sum(sample_weight), loss.dtype) + else: + divisor = ops.cast( + ops.prod( + ops.convert_to_tensor(ops.shape(values), dtype="int32") + ), + loss.dtype, + ) + loss = ops.divide_no_nan(loss, divisor) + loss = scale_loss_for_distribution(loss) + return loss + + +def reduce_weighted_values( + values, + sample_weight=None, + mask=None, + reduction="sum_over_batch_size", + dtype=None, +): + reduction = standardize_reduction(reduction) + + values = ops.convert_to_tensor(values, dtype=dtype) + if sample_weight is not None: + sample_weight = ops.convert_to_tensor(sample_weight, dtype=dtype) + if mask is not None: + mask = ops.convert_to_tensor(mask, dtype=dtype) + + # Merge mask and sample weight into sample weight. + sample_weight = apply_mask( + sample_weight, mask, dtype=values.dtype, reduction=reduction + ) + + if sample_weight is not None: + sample_weight = ops.cast(sample_weight, values.dtype) + # Update dimensions of `sample_weight` to match `losses`. + values, sample_weight = squeeze_or_expand_to_same_rank( + values, sample_weight + ) + values = values * sample_weight + + # Apply reduction function to the individual weighted losses. + loss = reduce_values(values, sample_weight, reduction) + return loss + + +def apply_mask(sample_weight, mask, dtype, reduction): + """Applies any mask on predictions to sample weights.""" + if mask is not None: + mask = ops.cast(mask, dtype=dtype) + if reduction in ("mean", "sum_over_batch_size"): + # Valid entries have weight `total/valid`, while invalid ones + # have 0. When summed over batch, they will be reduced to: + # + # mean(loss * sample_weight * total / valid) + # = sum(loss * sample_weight * total / valid) / total + # = sum(loss * sample_weight) / total * total / valid + # = sum(loss * sample_weight) / valid + total = ops.cast( + ops.prod(ops.convert_to_tensor(ops.shape(mask), dtype="int32")), + dtype, + ) + valid = ops.sum(mask) # May be 0! + mask *= total / (valid + backend.epsilon()) + + if sample_weight is not None: + sample_weight = ops.cast(sample_weight, dtype=dtype) + mask, sample_weight = squeeze_or_expand_to_same_rank( + mask, sample_weight + ) + sample_weight *= mask + else: + sample_weight = mask + return sample_weight + + +def scale_loss_for_distribution(value): + """Scales the given value by the number of replicas in the strategy. + + Currently, this function is only effective when using the tensorflow backend + and `tf.distribute`. + """ + if backend.backend() == "tensorflow": + import tensorflow as tf + + num_replicas = tf.distribute.get_strategy().num_replicas_in_sync + if num_replicas > 1: + value = ops.multiply( + value, ops.cast(1.0 / num_replicas, value.dtype) + ) + return value + + +def unscale_loss_for_distribution(value): + """Unscales the given value by the number of replicas in the strategy. + + Currently, this function is only effective when using the tensorflow backend + and `tf.distribute`. + """ + if backend.backend() == "tensorflow": + import tensorflow as tf + + num_replicas = tf.distribute.get_strategy().num_replicas_in_sync + if num_replicas > 1: + value = ops.multiply(value, ops.cast(num_replicas, value.dtype)) + return value diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/losses/losses.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/losses/losses.py new file mode 100644 index 0000000000000000000000000000000000000000..559bb4726560fd091e30c91f9ffcc3686b33bbef --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/losses/losses.py @@ -0,0 +1,2599 @@ +import warnings + +from keras.src import backend +from keras.src import ops +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.losses.loss import Loss +from keras.src.losses.loss import squeeze_or_expand_to_same_rank +from keras.src.saving import serialization_lib +from keras.src.utils.numerical_utils import build_pos_neg_masks +from keras.src.utils.numerical_utils import normalize + + +class LossFunctionWrapper(Loss): + def __init__( + self, + fn, + reduction="sum_over_batch_size", + name=None, + dtype=None, + **kwargs, + ): + super().__init__(name=name, reduction=reduction, dtype=dtype) + self.fn = fn + self._fn_kwargs = kwargs + + def call(self, y_true, y_pred): + y_true_y_pred = tree.map_structure( + squeeze_or_expand_to_same_rank, y_true, y_pred + ) + y_true = tree.map_structure_up_to(y_true, lambda x: x[0], y_true_y_pred) + y_pred = tree.map_structure_up_to(y_pred, lambda x: x[1], y_true_y_pred) + return self.fn(y_true, y_pred, **self._fn_kwargs) + + def get_config(self): + config = super().get_config() + config.update({"fn": serialization_lib.serialize_keras_object(self.fn)}) + config.update(serialization_lib.serialize_keras_object(self._fn_kwargs)) + return config + + @classmethod + def from_config(cls, config): + if "fn" in config: + config = serialization_lib.deserialize_keras_object(config) + return cls(**config) + + def __repr__(self): + return f"" + + +@keras_export("keras.losses.MeanSquaredError") +class MeanSquaredError(LossFunctionWrapper): + """Computes the mean of squares of errors between labels and predictions. + + Formula: + + ```python + loss = mean(square(y_true - y_pred)) + ``` + + Args: + reduction: Type of reduction to apply to the loss. In almost all cases + this should be `"sum_over_batch_size"`. Supported options are + `"sum"`, `"sum_over_batch_size"`, `"mean"`, + `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss, + `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the + sample size, and `"mean_with_sample_weight"` sums the loss and + divides by the sum of the sample weights. `"none"` and `None` + perform no aggregation. Defaults to `"sum_over_batch_size"`. + name: Optional name for the loss instance. + dtype: The dtype of the loss's computations. Defaults to `None`, which + means using `keras.backend.floatx()`. `keras.backend.floatx()` is a + `"float32"` unless set to different value + (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is + provided, then the `compute_dtype` will be utilized. + """ + + def __init__( + self, + reduction="sum_over_batch_size", + name="mean_squared_error", + dtype=None, + ): + super().__init__( + mean_squared_error, name=name, reduction=reduction, dtype=dtype + ) + + def get_config(self): + return Loss.get_config(self) + + +@keras_export("keras.losses.MeanAbsoluteError") +class MeanAbsoluteError(LossFunctionWrapper): + """Computes the mean of absolute difference between labels and predictions. + + Formula: + + ```python + loss = mean(abs(y_true - y_pred)) + ``` + + Args: + reduction: Type of reduction to apply to the loss. In almost all cases + this should be `"sum_over_batch_size"`. Supported options are + `"sum"`, `"sum_over_batch_size"`, `"mean"`, + `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss, + `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the + sample size, and `"mean_with_sample_weight"` sums the loss and + divides by the sum of the sample weights. `"none"` and `None` + perform no aggregation. Defaults to `"sum_over_batch_size"`. + name: Optional name for the loss instance. + dtype: The dtype of the loss's computations. Defaults to `None`, which + means using `keras.backend.floatx()`. `keras.backend.floatx()` is a + `"float32"` unless set to different value + (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is + provided, then the `compute_dtype` will be utilized. + """ + + def __init__( + self, + reduction="sum_over_batch_size", + name="mean_absolute_error", + dtype=None, + ): + super().__init__( + mean_absolute_error, name=name, reduction=reduction, dtype=dtype + ) + + def get_config(self): + return Loss.get_config(self) + + +@keras_export("keras.losses.MeanAbsolutePercentageError") +class MeanAbsolutePercentageError(LossFunctionWrapper): + """Computes the mean absolute percentage error between `y_true` & `y_pred`. + + Formula: + + ```python + loss = 100 * mean(abs((y_true - y_pred) / y_true)) + ``` + + Args: + reduction: Type of reduction to apply to the loss. In almost all cases + this should be `"sum_over_batch_size"`. Supported options are + `"sum"`, `"sum_over_batch_size"`, `"mean"`, + `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss, + `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the + sample size, and `"mean_with_sample_weight"` sums the loss and + divides by the sum of the sample weights. `"none"` and `None` + perform no aggregation. Defaults to `"sum_over_batch_size"`. + name: Optional name for the loss instance. + dtype: The dtype of the loss's computations. Defaults to `None`, which + means using `keras.backend.floatx()`. `keras.backend.floatx()` is a + `"float32"` unless set to different value + (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is + provided, then the `compute_dtype` will be utilized. + """ + + def __init__( + self, + reduction="sum_over_batch_size", + name="mean_absolute_percentage_error", + dtype=None, + ): + super().__init__( + mean_absolute_percentage_error, + name=name, + reduction=reduction, + dtype=dtype, + ) + + def get_config(self): + return Loss.get_config(self) + + +@keras_export("keras.losses.MeanSquaredLogarithmicError") +class MeanSquaredLogarithmicError(LossFunctionWrapper): + """Computes the mean squared logarithmic error between `y_true` & `y_pred`. + + Formula: + + ```python + loss = mean(square(log(y_true + 1) - log(y_pred + 1))) + ``` + + Args: + reduction: Type of reduction to apply to the loss. In almost all cases + this should be `"sum_over_batch_size"`. Supported options are + `"sum"`, `"sum_over_batch_size"`, `"mean"`, + `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss, + `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the + sample size, and `"mean_with_sample_weight"` sums the loss and + divides by the sum of the sample weights. `"none"` and `None` + perform no aggregation. Defaults to `"sum_over_batch_size"`. + name: Optional name for the loss instance. + dtype: The dtype of the loss's computations. Defaults to `None`, which + means using `keras.backend.floatx()`. `keras.backend.floatx()` is a + `"float32"` unless set to different value + (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is + provided, then the `compute_dtype` will be utilized. + """ + + def __init__( + self, + reduction="sum_over_batch_size", + name="mean_squared_logarithmic_error", + dtype=None, + ): + super().__init__( + mean_squared_logarithmic_error, + name=name, + reduction=reduction, + dtype=dtype, + ) + + def get_config(self): + return Loss.get_config(self) + + +@keras_export("keras.losses.CosineSimilarity") +class CosineSimilarity(LossFunctionWrapper): + """Computes the cosine similarity between `y_true` & `y_pred`. + + Note that it is a number between -1 and 1. When it is a negative number + between -1 and 0, 0 indicates orthogonality and values closer to -1 + indicate greater similarity. This makes it usable as a loss function in a + setting where you try to maximize the proximity between predictions and + targets. If either `y_true` or `y_pred` is a zero vector, cosine similarity + will be 0 regardless of the proximity between predictions and targets. + + Formula: + + ```python + loss = -sum(l2_norm(y_true) * l2_norm(y_pred)) + ``` + + Args: + axis: The axis along which the cosine similarity is computed + (the features axis). Defaults to `-1`. + reduction: Type of reduction to apply to the loss. In almost all cases + this should be `"sum_over_batch_size"`. Supported options are + `"sum"`, `"sum_over_batch_size"`, `"mean"`, + `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss, + `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the + sample size, and `"mean_with_sample_weight"` sums the loss and + divides by the sum of the sample weights. `"none"` and `None` + perform no aggregation. Defaults to `"sum_over_batch_size"`. + name: Optional name for the loss instance. + dtype: The dtype of the loss's computations. Defaults to `None`, which + means using `keras.backend.floatx()`. `keras.backend.floatx()` is a + `"float32"` unless set to different value + (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is + provided, then the `compute_dtype` will be utilized. + """ + + def __init__( + self, + axis=-1, + reduction="sum_over_batch_size", + name="cosine_similarity", + dtype=None, + ): + super().__init__( + cosine_similarity, + name=name, + reduction=reduction, + dtype=dtype, + axis=axis, + ) + + def get_config(self): + return Loss.get_config(self) + + +@keras_export("keras.losses.Huber") +class Huber(LossFunctionWrapper): + """Computes the Huber loss between `y_true` & `y_pred`. + + Formula: + + ```python + for x in error: + if abs(x) <= delta: + loss.append(0.5 * x^2) + elif abs(x) > delta: + loss.append(delta * abs(x) - 0.5 * delta^2) + + loss = mean(loss, axis=-1) + ``` + See: [Huber loss](https://en.wikipedia.org/wiki/Huber_loss). + + Args: + delta: A float, the point where the Huber loss function changes from a + quadratic to linear. + reduction: Type of reduction to apply to the loss. In almost all cases + this should be `"sum_over_batch_size"`. Supported options are + `"sum"`, `"sum_over_batch_size"`, `"mean"`, + `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss, + `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the + sample size, and `"mean_with_sample_weight"` sums the loss and + divides by the sum of the sample weights. `"none"` and `None` + perform no aggregation. Defaults to `"sum_over_batch_size"`. + name: Optional name for the instance. + dtype: The dtype of the loss's computations. Defaults to `None`, which + means using `keras.backend.floatx()`. `keras.backend.floatx()` is a + `"float32"` unless set to different value + (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is + provided, then the `compute_dtype` will be utilized. + """ + + def __init__( + self, + delta=1.0, + reduction="sum_over_batch_size", + name="huber_loss", + dtype=None, + ): + super().__init__( + huber, + name=name, + reduction=reduction, + dtype=dtype, + delta=delta, + ) + + def get_config(self): + return Loss.get_config(self) + + +@keras_export("keras.losses.LogCosh") +class LogCosh(LossFunctionWrapper): + """Computes the logarithm of the hyperbolic cosine of the prediction error. + + Formula: + + ```python + error = y_pred - y_true + logcosh = mean(log((exp(error) + exp(-error))/2), axis=-1)` + ``` + where x is the error `y_pred - y_true`. + + Args: + reduction: Type of reduction to apply to the loss. In almost all cases + this should be `"sum_over_batch_size"`. Supported options are + `"sum"`, `"sum_over_batch_size"`, `"mean"`, + `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss, + `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the + sample size, and `"mean_with_sample_weight"` sums the loss and + divides by the sum of the sample weights. `"none"` and `None` + perform no aggregation. Defaults to `"sum_over_batch_size"`. + name: Optional name for the instance. + dtype: The dtype of the loss's computations. Defaults to `None`, which + means using `keras.backend.floatx()`. `keras.backend.floatx()` is a + `"float32"` unless set to different value + (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is + provided, then the `compute_dtype` will be utilized. + """ + + def __init__( + self, + reduction="sum_over_batch_size", + name="log_cosh", + dtype=None, + ): + super().__init__(log_cosh, name=name, reduction=reduction, dtype=dtype) + + def get_config(self): + return Loss.get_config(self) + + +@keras_export("keras.losses.Hinge") +class Hinge(LossFunctionWrapper): + """Computes the hinge loss between `y_true` & `y_pred`. + + Formula: + + ```python + loss = maximum(1 - y_true * y_pred, 0) + ``` + + `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are + provided we will convert them to -1 or 1. + + Args: + reduction: Type of reduction to apply to the loss. In almost all cases + this should be `"sum_over_batch_size"`. Supported options are + `"sum"`, `"sum_over_batch_size"`, `"mean"`, + `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss, + `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the + sample size, and `"mean_with_sample_weight"` sums the loss and + divides by the sum of the sample weights. `"none"` and `None` + perform no aggregation. Defaults to `"sum_over_batch_size"`. + name: Optional name for the loss instance. + dtype: The dtype of the loss's computations. Defaults to `None`, which + means using `keras.backend.floatx()`. `keras.backend.floatx()` is a + `"float32"` unless set to different value + (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is + provided, then the `compute_dtype` will be utilized. + """ + + def __init__( + self, + reduction="sum_over_batch_size", + name="hinge", + dtype=None, + ): + super().__init__(hinge, name=name, reduction=reduction, dtype=dtype) + + def get_config(self): + return Loss.get_config(self) + + +@keras_export("keras.losses.SquaredHinge") +class SquaredHinge(LossFunctionWrapper): + """Computes the squared hinge loss between `y_true` & `y_pred`. + + Formula: + + ```python + loss = square(maximum(1 - y_true * y_pred, 0)) + ``` + + `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are + provided we will convert them to -1 or 1. + + Args: + reduction: Type of reduction to apply to the loss. In almost all cases + this should be `"sum_over_batch_size"`. Supported options are + `"sum"`, `"sum_over_batch_size"`, `"mean"`, + `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss, + `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the + sample size, and `"mean_with_sample_weight"` sums the loss and + divides by the sum of the sample weights. `"none"` and `None` + perform no aggregation. Defaults to `"sum_over_batch_size"`. + name: Optional name for the loss instance. + dtype: The dtype of the loss's computations. Defaults to `None`, which + means using `keras.backend.floatx()`. `keras.backend.floatx()` is a + `"float32"` unless set to different value + (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is + provided, then the `compute_dtype` will be utilized. + """ + + def __init__( + self, reduction="sum_over_batch_size", name="squared_hinge", dtype=None + ): + super().__init__( + squared_hinge, name=name, reduction=reduction, dtype=dtype + ) + + def get_config(self): + return Loss.get_config(self) + + +@keras_export("keras.losses.CategoricalHinge") +class CategoricalHinge(LossFunctionWrapper): + """Computes the categorical hinge loss between `y_true` & `y_pred`. + + Formula: + + ```python + loss = maximum(neg - pos + 1, 0) + ``` + + where `neg=maximum((1-y_true)*y_pred)` and `pos=sum(y_true*y_pred)` + + Args: + reduction: Type of reduction to apply to the loss. In almost all cases + this should be `"sum_over_batch_size"`. Supported options are + `"sum"`, `"sum_over_batch_size"`, `"mean"`, + `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss, + `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the + sample size, and `"mean_with_sample_weight"` sums the loss and + divides by the sum of the sample weights. `"none"` and `None` + perform no aggregation. Defaults to `"sum_over_batch_size"`. + name: Optional name for the loss instance. + dtype: The dtype of the loss's computations. Defaults to `None`, which + means using `keras.backend.floatx()`. `keras.backend.floatx()` is a + `"float32"` unless set to different value + (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is + provided, then the `compute_dtype` will be utilized. + """ + + def __init__( + self, + reduction="sum_over_batch_size", + name="categorical_hinge", + dtype=None, + ): + super().__init__( + categorical_hinge, name=name, reduction=reduction, dtype=dtype + ) + + def get_config(self): + return Loss.get_config(self) + + +@keras_export("keras.losses.KLDivergence") +class KLDivergence(LossFunctionWrapper): + """Computes Kullback-Leibler divergence loss between `y_true` & `y_pred`. + + Formula: + + ```python + loss = y_true * log(y_true / y_pred) + ``` + + `y_true` and `y_pred` are expected to be probability + distributions, with values between 0 and 1. They will get + clipped to the `[0, 1]` range. + + Args: + reduction: Type of reduction to apply to the loss. In almost all cases + this should be `"sum_over_batch_size"`. Supported options are + `"sum"`, `"sum_over_batch_size"`, `"mean"`, + `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss, + `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the + sample size, and `"mean_with_sample_weight"` sums the loss and + divides by the sum of the sample weights. `"none"` and `None` + perform no aggregation. Defaults to `"sum_over_batch_size"`. + name: Optional name for the loss instance. + dtype: The dtype of the loss's computations. Defaults to `None`, which + means using `keras.backend.floatx()`. `keras.backend.floatx()` is a + `"float32"` unless set to different value + (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is + provided, then the `compute_dtype` will be utilized. + """ + + def __init__( + self, reduction="sum_over_batch_size", name="kl_divergence", dtype=None + ): + super().__init__( + kl_divergence, name=name, reduction=reduction, dtype=dtype + ) + + def get_config(self): + return Loss.get_config(self) + + +@keras_export("keras.losses.Poisson") +class Poisson(LossFunctionWrapper): + """Computes the Poisson loss between `y_true` & `y_pred`. + + Formula: + + ```python + loss = y_pred - y_true * log(y_pred) + ``` + + Args: + reduction: Type of reduction to apply to the loss. In almost all cases + this should be `"sum_over_batch_size"`. Supported options are + `"sum"`, `"sum_over_batch_size"`, `"mean"`, + `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss, + `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the + sample size, and `"mean_with_sample_weight"` sums the loss and + divides by the sum of the sample weights. `"none"` and `None` + perform no aggregation. Defaults to `"sum_over_batch_size"`. + name: Optional name for the loss instance. + dtype: The dtype of the loss's computations. Defaults to `None`, which + means using `keras.backend.floatx()`. `keras.backend.floatx()` is a + `"float32"` unless set to different value + (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is + provided, then the `compute_dtype` will be utilized. + """ + + def __init__( + self, reduction="sum_over_batch_size", name="poisson", dtype=None + ): + super().__init__(poisson, name=name, reduction=reduction, dtype=dtype) + + def get_config(self): + return Loss.get_config(self) + + +@keras_export("keras.losses.BinaryCrossentropy") +class BinaryCrossentropy(LossFunctionWrapper): + """Computes the cross-entropy loss between true labels and predicted labels. + + Use this cross-entropy loss for binary (0 or 1) classification applications. + The loss function requires the following inputs: + + - `y_true` (true label): This is either 0 or 1. + - `y_pred` (predicted value): This is the model's prediction, i.e, a single + floating-point value which either represents a + [logit](https://en.wikipedia.org/wiki/Logit), (i.e, value in [-inf, inf] + when `from_logits=True`) or a probability (i.e, value in [0., 1.] when + `from_logits=False`). + + Args: + from_logits: Whether to interpret `y_pred` as a tensor of + [logit](https://en.wikipedia.org/wiki/Logit) values. By default, we + assume that `y_pred` is probabilities (i.e., values in [0, 1]). + label_smoothing: Float in range [0, 1]. When 0, no smoothing occurs. + When > 0, we compute the loss between the predicted labels + and a smoothed version of the true labels, where the smoothing + squeezes the labels towards 0.5. Larger values of + `label_smoothing` correspond to heavier smoothing. + axis: The axis along which to compute crossentropy (the features axis). + Defaults to `-1`. + reduction: Type of reduction to apply to the loss. In almost all cases + this should be `"sum_over_batch_size"`. Supported options are + `"sum"`, `"sum_over_batch_size"`, `"mean"`, + `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss, + `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the + sample size, and `"mean_with_sample_weight"` sums the loss and + divides by the sum of the sample weights. `"none"` and `None` + perform no aggregation. Defaults to `"sum_over_batch_size"`. + name: Optional name for the loss instance. + dtype: The dtype of the loss's computations. Defaults to `None`, which + means using `keras.backend.floatx()`. `keras.backend.floatx()` is a + `"float32"` unless set to different value + (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is + provided, then the `compute_dtype` will be utilized. + + Examples: + + **Recommended Usage:** (set `from_logits=True`) + + With `compile()` API: + + ```python + model.compile( + loss=keras.losses.BinaryCrossentropy(from_logits=True), + ... + ) + ``` + + As a standalone function: + + >>> # Example 1: (batch_size = 1, number of samples = 4) + >>> y_true = np.array([0, 1, 0, 0]) + >>> y_pred = np.array([-18.6, 0.51, 2.94, -12.8]) + >>> bce = keras.losses.BinaryCrossentropy(from_logits=True) + >>> bce(y_true, y_pred) + 0.8654 + + >>> # Example 2: (batch_size = 2, number of samples = 4) + >>> y_true = np.array([[0, 1], [0, 0]]) + >>> y_pred = np.array([[-18.6, 0.51], [2.94, -12.8]]) + >>> # Using default 'auto'/'sum_over_batch_size' reduction type. + >>> bce = keras.losses.BinaryCrossentropy(from_logits=True) + >>> bce(y_true, y_pred) + 0.8654 + >>> # Using 'sample_weight' attribute + >>> bce(y_true, y_pred, sample_weight=[0.8, 0.2]) + 0.243 + >>> # Using 'sum' reduction` type. + >>> bce = keras.losses.BinaryCrossentropy(from_logits=True, + ... reduction="sum") + >>> bce(y_true, y_pred) + 1.730 + >>> # Using 'none' reduction type. + >>> bce = keras.losses.BinaryCrossentropy(from_logits=True, + ... reduction=None) + >>> bce(y_true, y_pred) + array([0.235, 1.496], dtype=float32) + + **Default Usage:** (set `from_logits=False`) + + >>> # Make the following updates to the above "Recommended Usage" section + >>> # 1. Set `from_logits=False` + >>> keras.losses.BinaryCrossentropy() # OR ...('from_logits=False') + >>> # 2. Update `y_pred` to use probabilities instead of logits + >>> y_pred = [0.6, 0.3, 0.2, 0.8] # OR [[0.6, 0.3], [0.2, 0.8]] + """ + + def __init__( + self, + from_logits=False, + label_smoothing=0.0, + axis=-1, + reduction="sum_over_batch_size", + name="binary_crossentropy", + dtype=None, + ): + super().__init__( + binary_crossentropy, + name=name, + reduction=reduction, + dtype=dtype, + from_logits=from_logits, + label_smoothing=label_smoothing, + axis=axis, + ) + self.from_logits = from_logits + self.label_smoothing = label_smoothing + self.axis = axis + + def get_config(self): + config = Loss.get_config(self) + config.update( + { + "from_logits": self.from_logits, + "label_smoothing": self.label_smoothing, + "axis": self.axis, + } + ) + return config + + +@keras_export("keras.losses.BinaryFocalCrossentropy") +class BinaryFocalCrossentropy(LossFunctionWrapper): + """Computes focal cross-entropy loss between true labels and predictions. + + Binary cross-entropy loss is often used for binary (0 or 1) classification + tasks. The loss function requires the following inputs: + + - `y_true` (true label): This is either 0 or 1. + - `y_pred` (predicted value): This is the model's prediction, i.e, a single + floating-point value which either represents a + [logit](https://en.wikipedia.org/wiki/Logit), (i.e, value in [-inf, inf] + when `from_logits=True`) or a probability (i.e, value in `[0., 1.]` when + `from_logits=False`). + + According to [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf), it + helps to apply a "focal factor" to down-weight easy examples and focus more + on hard examples. By default, the focal tensor is computed as follows: + + `focal_factor = (1 - output) ** gamma` for class 1 + `focal_factor = output ** gamma` for class 0 + where `gamma` is a focusing parameter. When `gamma=0`, this function is + equivalent to the binary crossentropy loss. + + Args: + apply_class_balancing: A bool, whether to apply weight balancing on the + binary classes 0 and 1. + alpha: A weight balancing factor for class 1, default is `0.25` as + mentioned in reference [Lin et al., 2018]( + https://arxiv.org/pdf/1708.02002.pdf). The weight for class 0 is + `1.0 - alpha`. + gamma: A focusing parameter used to compute the focal factor, default is + `2.0` as mentioned in the reference + [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf). + from_logits: Whether to interpret `y_pred` as a tensor of + [logit](https://en.wikipedia.org/wiki/Logit) values. By default, we + assume that `y_pred` are probabilities (i.e., values in `[0, 1]`). + label_smoothing: Float in `[0, 1]`. When `0`, no smoothing occurs. + When > `0`, we compute the loss between the predicted labels + and a smoothed version of the true labels, where the smoothing + squeezes the labels towards `0.5`. + Larger values of `label_smoothing` correspond to heavier smoothing. + axis: The axis along which to compute crossentropy (the features axis). + Defaults to `-1`. + reduction: Type of reduction to apply to the loss. In almost all cases + this should be `"sum_over_batch_size"`. Supported options are + `"sum"`, `"sum_over_batch_size"`, `"mean"`, + `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss, + `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the + sample size, and `"mean_with_sample_weight"` sums the loss and + divides by the sum of the sample weights. `"none"` and `None` + perform no aggregation. Defaults to `"sum_over_batch_size"`. + name: Optional name for the loss instance. + dtype: The dtype of the loss's computations. Defaults to `None`, which + means using `keras.backend.floatx()`. `keras.backend.floatx()` is a + `"float32"` unless set to different value + (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is + provided, then the `compute_dtype` will be utilized. + + Examples: + + With the `compile()` API: + + ```python + model.compile( + loss=keras.losses.BinaryFocalCrossentropy( + gamma=2.0, from_logits=True), + ... + ) + ``` + + As a standalone function: + + >>> # Example 1: (batch_size = 1, number of samples = 4) + >>> y_true = np.array([0, 1, 0, 0]) + >>> y_pred = np.array([-18.6, 0.51, 2.94, -12.8]) + >>> loss = keras.losses.BinaryFocalCrossentropy( + ... gamma=2, from_logits=True) + >>> loss(y_true, y_pred) + 0.691 + + >>> # Apply class weight + >>> loss = keras.losses.BinaryFocalCrossentropy( + ... apply_class_balancing=True, gamma=2, from_logits=True) + >>> loss(y_true, y_pred) + 0.51 + + >>> # Example 2: (batch_size = 2, number of samples = 4) + >>> y_true = np.array([[0, 1], [0, 0]]) + >>> y_pred = np.array([[-18.6, 0.51], [2.94, -12.8]]) + >>> # Using default 'auto'/'sum_over_batch_size' reduction type. + >>> loss = keras.losses.BinaryFocalCrossentropy( + ... gamma=3, from_logits=True) + >>> loss(y_true, y_pred) + 0.647 + + >>> # Apply class weight + >>> loss = keras.losses.BinaryFocalCrossentropy( + ... apply_class_balancing=True, gamma=3, from_logits=True) + >>> loss(y_true, y_pred) + 0.482 + + >>> # Using 'sample_weight' attribute with focal effect + >>> loss = keras.losses.BinaryFocalCrossentropy( + ... gamma=3, from_logits=True) + >>> loss(y_true, y_pred, sample_weight=[0.8, 0.2]) + 0.133 + + >>> # Apply class weight + >>> loss = keras.losses.BinaryFocalCrossentropy( + ... apply_class_balancing=True, gamma=3, from_logits=True) + >>> loss(y_true, y_pred, sample_weight=[0.8, 0.2]) + 0.097 + + >>> # Using 'sum' reduction` type. + >>> loss = keras.losses.BinaryFocalCrossentropy( + ... gamma=4, from_logits=True, + ... reduction="sum") + >>> loss(y_true, y_pred) + 1.222 + + >>> # Apply class weight + >>> loss = keras.losses.BinaryFocalCrossentropy( + ... apply_class_balancing=True, gamma=4, from_logits=True, + ... reduction="sum") + >>> loss(y_true, y_pred) + 0.914 + + >>> # Using 'none' reduction type. + >>> loss = keras.losses.BinaryFocalCrossentropy( + ... gamma=5, from_logits=True, + ... reduction=None) + >>> loss(y_true, y_pred) + array([0.0017 1.1561], dtype=float32) + + >>> # Apply class weight + >>> loss = keras.losses.BinaryFocalCrossentropy( + ... apply_class_balancing=True, gamma=5, from_logits=True, + ... reduction=None) + >>> loss(y_true, y_pred) + array([0.0004 0.8670], dtype=float32) + """ + + def __init__( + self, + apply_class_balancing=False, + alpha=0.25, + gamma=2.0, + from_logits=False, + label_smoothing=0.0, + axis=-1, + reduction="sum_over_batch_size", + name="binary_focal_crossentropy", + dtype=None, + ): + super().__init__( + binary_focal_crossentropy, + name=name, + reduction=reduction, + dtype=dtype, + apply_class_balancing=apply_class_balancing, + alpha=alpha, + gamma=gamma, + from_logits=from_logits, + label_smoothing=label_smoothing, + axis=axis, + ) + self.from_logits = from_logits + self.label_smoothing = label_smoothing + self.axis = axis + self.apply_class_balancing = apply_class_balancing + self.alpha = alpha + self.gamma = gamma + + def get_config(self): + config = Loss.get_config(self) + config.update( + { + "from_logits": self.from_logits, + "label_smoothing": self.label_smoothing, + "axis": self.axis, + "apply_class_balancing": self.apply_class_balancing, + "alpha": self.alpha, + "gamma": self.gamma, + } + ) + return config + + +@keras_export("keras.losses.CategoricalCrossentropy") +class CategoricalCrossentropy(LossFunctionWrapper): + """Computes the crossentropy loss between the labels and predictions. + + Use this crossentropy loss function when there are two or more label + classes. We expect labels to be provided in a `one_hot` representation. If + you want to provide labels as integers, please use + `SparseCategoricalCrossentropy` loss. There should be `num_classes` floating + point values per feature, i.e., the shape of both `y_pred` and `y_true` are + `[batch_size, num_classes]`. + + Args: + from_logits: Whether `y_pred` is expected to be a logits tensor. By + default, we assume that `y_pred` encodes a probability distribution. + label_smoothing: Float in [0, 1]. When > 0, label values are smoothed, + meaning the confidence on label values are relaxed. For example, if + `0.1`, use `0.1 / num_classes` for non-target labels and + `0.9 + 0.1 / num_classes` for target labels. + axis: The axis along which to compute crossentropy (the features + axis). Defaults to `-1`. + reduction: Type of reduction to apply to the loss. In almost all cases + this should be `"sum_over_batch_size"`. Supported options are + `"sum"`, `"sum_over_batch_size"`, `"mean"`, + `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss, + `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the + sample size, and `"mean_with_sample_weight"` sums the loss and + divides by the sum of the sample weights. `"none"` and `None` + perform no aggregation. Defaults to `"sum_over_batch_size"`. + name: Optional name for the loss instance. + dtype: The dtype of the loss's computations. Defaults to `None`, which + means using `keras.backend.floatx()`. `keras.backend.floatx()` is a + `"float32"` unless set to different value + (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is + provided, then the `compute_dtype` will be utilized. + + Examples: + + Standalone usage: + + >>> y_true = np.array([[0, 1, 0], [0, 0, 1]]) + >>> y_pred = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) + >>> # Using 'auto'/'sum_over_batch_size' reduction type. + >>> cce = keras.losses.CategoricalCrossentropy() + >>> cce(y_true, y_pred) + 1.177 + + >>> # Calling with 'sample_weight'. + >>> cce(y_true, y_pred, sample_weight=np.array([0.3, 0.7])) + 0.814 + + >>> # Using 'sum' reduction type. + >>> cce = keras.losses.CategoricalCrossentropy( + ... reduction="sum") + >>> cce(y_true, y_pred) + 2.354 + + >>> # Using 'none' reduction type. + >>> cce = keras.losses.CategoricalCrossentropy( + ... reduction=None) + >>> cce(y_true, y_pred) + array([0.0513, 2.303], dtype=float32) + + Usage with the `compile()` API: + + ```python + model.compile(optimizer='sgd', + loss=keras.losses.CategoricalCrossentropy()) + ``` + """ + + def __init__( + self, + from_logits=False, + label_smoothing=0.0, + axis=-1, + reduction="sum_over_batch_size", + name="categorical_crossentropy", + dtype=None, + ): + super().__init__( + categorical_crossentropy, + name=name, + reduction=reduction, + dtype=dtype, + from_logits=from_logits, + label_smoothing=label_smoothing, + axis=axis, + ) + self.from_logits = from_logits + self.label_smoothing = label_smoothing + self.axis = axis + + def get_config(self): + config = Loss.get_config(self) + config.update( + { + "from_logits": self.from_logits, + "label_smoothing": self.label_smoothing, + "axis": self.axis, + } + ) + return config + + +@keras_export("keras.losses.CategoricalFocalCrossentropy") +class CategoricalFocalCrossentropy(LossFunctionWrapper): + """Computes the alpha balanced focal crossentropy loss. + + Use this crossentropy loss function when there are two or more label + classes and if you want to handle class imbalance without using + `class_weights`. We expect labels to be provided in a `one_hot` + representation. + + According to [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf), it + helps to apply a focal factor to down-weight easy examples and focus more on + hard examples. The general formula for the focal loss (FL) + is as follows: + + `FL(p_t) = (1 - p_t) ** gamma * log(p_t)` + + where `p_t` is defined as follows: + `p_t = output if y_true == 1, else 1 - output` + + `(1 - p_t) ** gamma` is the `modulating_factor`, where `gamma` is a focusing + parameter. When `gamma` = 0, there is no focal effect on the cross entropy. + `gamma` reduces the importance given to simple examples in a smooth manner. + + The authors use alpha-balanced variant of focal loss (FL) in the paper: + `FL(p_t) = -alpha * (1 - p_t) ** gamma * log(p_t)` + + where `alpha` is the weight factor for the classes. If `alpha` = 1, the + loss won't be able to handle class imbalance properly as all + classes will have the same weight. This can be a constant or a list of + constants. If alpha is a list, it must have the same length as the number + of classes. + + The formula above can be generalized to: + `FL(p_t) = alpha * (1 - p_t) ** gamma * CrossEntropy(y_true, y_pred)` + + where minus comes from `CrossEntropy(y_true, y_pred)` (CE). + + Extending this to multi-class case is straightforward: + `FL(p_t) = alpha * (1 - p_t) ** gamma * CategoricalCE(y_true, y_pred)` + + In the snippet below, there is `num_classes` floating pointing values per + example. The shape of both `y_pred` and `y_true` are + `(batch_size, num_classes)`. + + Args: + alpha: A weight balancing factor for all classes, default is `0.25` as + mentioned in the reference. It can be a list of floats or a scalar. + In the multi-class case, alpha may be set by inverse class + frequency by using `compute_class_weight` from `sklearn.utils`. + gamma: A focusing parameter, default is `2.0` as mentioned in the + reference. It helps to gradually reduce the importance given to + simple (easy) examples in a smooth manner. + from_logits: Whether `output` is expected to be a logits tensor. By + default, we consider that `output` encodes a probability + distribution. + label_smoothing: Float in [0, 1]. When > 0, label values are smoothed, + meaning the confidence on label values are relaxed. For example, if + `0.1`, use `0.1 / num_classes` for non-target labels and + `0.9 + 0.1 / num_classes` for target labels. + axis: The axis along which to compute crossentropy (the features + axis). Defaults to `-1`. + reduction: Type of reduction to apply to the loss. In almost all cases + this should be `"sum_over_batch_size"`. Supported options are + `"sum"`, `"sum_over_batch_size"`, `"mean"`, + `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss, + `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the + sample size, and `"mean_with_sample_weight"` sums the loss and + divides by the sum of the sample weights. `"none"` and `None` + perform no aggregation. Defaults to `"sum_over_batch_size"`. + name: Optional name for the loss instance. + dtype: The dtype of the loss's computations. Defaults to `None`, which + means using `keras.backend.floatx()`. `keras.backend.floatx()` is a + `"float32"` unless set to different value + (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is + provided, then the `compute_dtype` will be utilized. + + Examples: + + Standalone usage: + + >>> y_true = [[0., 1., 0.], [0., 0., 1.]] + >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] + >>> # Using 'auto'/'sum_over_batch_size' reduction type. + >>> cce = keras.losses.CategoricalFocalCrossentropy() + >>> cce(y_true, y_pred) + 0.23315276 + + >>> # Calling with 'sample_weight'. + >>> cce(y_true, y_pred, sample_weight=np.array([0.3, 0.7])) + 0.1632 + + >>> # Using 'sum' reduction type. + >>> cce = keras.losses.CategoricalFocalCrossentropy( + ... reduction="sum") + >>> cce(y_true, y_pred) + 0.46631 + + >>> # Using 'none' reduction type. + >>> cce = keras.losses.CategoricalFocalCrossentropy( + ... reduction=None) + >>> cce(y_true, y_pred) + array([3.2058331e-05, 4.6627346e-01], dtype=float32) + + Usage with the `compile()` API: + + ```python + model.compile(optimizer='adam', + loss=keras.losses.CategoricalFocalCrossentropy()) + ``` + """ + + def __init__( + self, + alpha=0.25, + gamma=2.0, + from_logits=False, + label_smoothing=0.0, + axis=-1, + reduction="sum_over_batch_size", + name="categorical_focal_crossentropy", + dtype=None, + ): + """Initializes `CategoricalFocalCrossentropy` instance.""" + super().__init__( + categorical_focal_crossentropy, + name=name, + reduction=reduction, + dtype=dtype, + alpha=alpha, + gamma=gamma, + from_logits=from_logits, + label_smoothing=label_smoothing, + axis=axis, + ) + self.from_logits = from_logits + self.label_smoothing = label_smoothing + self.axis = axis + self.alpha = alpha + self.gamma = gamma + + def get_config(self): + config = Loss.get_config(self) + config.update( + { + "from_logits": self.from_logits, + "label_smoothing": self.label_smoothing, + "axis": self.axis, + "alpha": self.alpha, + "gamma": self.gamma, + } + ) + return config + + +@keras_export("keras.losses.SparseCategoricalCrossentropy") +class SparseCategoricalCrossentropy(LossFunctionWrapper): + """Computes the crossentropy loss between the labels and predictions. + + Use this crossentropy loss function when there are two or more label + classes. We expect labels to be provided as integers. If you want to + provide labels using `one-hot` representation, please use + `CategoricalCrossentropy` loss. There should be `# classes` floating point + values per feature for `y_pred` and a single floating point value per + feature for `y_true`. + + In the snippet below, there is a single floating point value per example for + `y_true` and `num_classes` floating pointing values per example for + `y_pred`. The shape of `y_true` is `[batch_size]` and the shape of `y_pred` + is `[batch_size, num_classes]`. + + Args: + from_logits: Whether `y_pred` is expected to be a logits tensor. By + default, we assume that `y_pred` encodes a probability distribution. + reduction: Type of reduction to apply to the loss. In almost all cases + this should be `"sum_over_batch_size"`. Supported options are + `"sum"`, `"sum_over_batch_size"`, `"mean"`, + `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss, + `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the + sample size, and `"mean_with_sample_weight"` sums the loss and + divides by the sum of the sample weights. `"none"` and `None` + perform no aggregation. Defaults to `"sum_over_batch_size"`. + name: Optional name for the loss instance. + dtype: The dtype of the loss's computations. Defaults to `None`, which + means using `keras.backend.floatx()`. `keras.backend.floatx()` is a + `"float32"` unless set to different value + (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is + provided, then the `compute_dtype` will be utilized. + + Examples: + + >>> y_true = [1, 2] + >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] + >>> # Using 'auto'/'sum_over_batch_size' reduction type. + >>> scce = keras.losses.SparseCategoricalCrossentropy() + >>> scce(y_true, y_pred) + 1.177 + + >>> # Calling with 'sample_weight'. + >>> scce(y_true, y_pred, sample_weight=np.array([0.3, 0.7])) + 0.814 + + >>> # Using 'sum' reduction type. + >>> scce = keras.losses.SparseCategoricalCrossentropy( + ... reduction="sum") + >>> scce(y_true, y_pred) + 2.354 + + >>> # Using 'none' reduction type. + >>> scce = keras.losses.SparseCategoricalCrossentropy( + ... reduction=None) + >>> scce(y_true, y_pred) + array([0.0513, 2.303], dtype=float32) + + Usage with the `compile()` API: + + ```python + model.compile(optimizer='sgd', + loss=keras.losses.SparseCategoricalCrossentropy()) + ``` + """ + + def __init__( + self, + from_logits=False, + ignore_class=None, + reduction="sum_over_batch_size", + name="sparse_categorical_crossentropy", + dtype=None, + ): + super().__init__( + sparse_categorical_crossentropy, + name=name, + reduction=reduction, + dtype=dtype, + from_logits=from_logits, + ignore_class=ignore_class, + ) + self.from_logits = from_logits + self.ignore_class = ignore_class + + def get_config(self): + config = Loss.get_config(self) + config.update( + { + "from_logits": self.from_logits, + "ignore_class": self.ignore_class, + } + ) + return config + + +@keras_export("keras.losses.CTC") +class CTC(LossFunctionWrapper): + """CTC (Connectionist Temporal Classification) loss. + + Args: + reduction: Type of reduction to apply to the loss. In almost all cases + this should be `"sum_over_batch_size"`. Supported options are + `"sum"`, `"sum_over_batch_size"`, `"mean"`, + `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss, + `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the + sample size, and `"mean_with_sample_weight"` sums the loss and + divides by the sum of the sample weights. `"none"` and `None` + perform no aggregation. Defaults to `"sum_over_batch_size"`. + name: Optional name for the loss instance. + dtype: The dtype of the loss's computations. Defaults to `None`, which + means using `keras.backend.floatx()`. `keras.backend.floatx()` is a + `"float32"` unless set to different value + (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is + provided, then the `compute_dtype` will be utilized. + """ + + def __init__(self, reduction="sum_over_batch_size", name="ctc", dtype=None): + super().__init__(ctc, name=name, reduction=reduction, dtype=dtype) + + def get_config(self): + return Loss.get_config(self) + + +@keras_export("keras.losses.Dice") +class Dice(LossFunctionWrapper): + """Computes the Dice loss value between `y_true` and `y_pred`. + + Formula: + ```python + loss = 1 - (2 * sum(y_true * y_pred)) / (sum(y_true) + sum(y_pred)) + ``` + + Args: + reduction: Type of reduction to apply to the loss. In almost all cases + this should be `"sum_over_batch_size"`. Supported options are + `"sum"`, `"sum_over_batch_size"`, `"mean"`, + `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss, + `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the + sample size, and `"mean_with_sample_weight"` sums the loss and + divides by the sum of the sample weights. `"none"` and `None` + perform no aggregation. Defaults to `"sum_over_batch_size"`. + name: Optional name for the loss instance. + axis: Tuple for which dimensions the loss is calculated. Defaults to + `None`. + dtype: The dtype of the loss's computations. Defaults to `None`, which + means using `keras.backend.floatx()`. `keras.backend.floatx()` is a + `"float32"` unless set to different value + (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is + provided, then the `compute_dtype` will be utilized. + + Returns: + Dice loss value. + + Example: + + >>> y_true = [[[[1.0], [1.0]], [[0.0], [0.0]]], + ... [[[1.0], [1.0]], [[0.0], [0.0]]]] + >>> y_pred = [[[[0.0], [1.0]], [[0.0], [1.0]]], + ... [[[0.4], [0.0]], [[0.0], [0.9]]]] + >>> axis = (1, 2, 3) + >>> loss = keras.losses.dice(y_true, y_pred, axis=axis) + >>> assert loss.shape == (2,) + >>> loss + array([0.5, 0.75757575], shape=(2,), dtype=float32) + + >>> loss = keras.losses.dice(y_true, y_pred) + >>> assert loss.shape == () + >>> loss + array(0.6164384, shape=(), dtype=float32) + + >>> y_true = np.array(y_true) + >>> y_pred = np.array(y_pred) + >>> loss = keras.losses.Dice(axis=axis, reduction=None)(y_true, y_pred) + >>> assert loss.shape == (2,) + >>> loss + array([0.5, 0.75757575], shape=(2,), dtype=float32) + + """ + + def __init__( + self, + reduction="sum_over_batch_size", + name="dice", + axis=None, + dtype=None, + ): + super().__init__( + dice, name=name, reduction=reduction, dtype=dtype, axis=axis + ) + self.axis = axis + + def get_config(self): + config = Loss.get_config(self) + config.update({"axis": self.axis}) + return config + + +@keras_export("keras.losses.Tversky") +class Tversky(LossFunctionWrapper): + """Computes the Tversky loss value between `y_true` and `y_pred`. + + This loss function is weighted by the alpha and beta coefficients + that penalize false positives and false negatives. + + With `alpha=0.5` and `beta=0.5`, the loss value becomes equivalent to + Dice Loss. + + Args: + alpha: The coefficient controlling incidence of false positives. + Defaults to `0.5`. + beta: The coefficient controlling incidence of false negatives. + Defaults to `0.5`. + reduction: Type of reduction to apply to the loss. In almost all cases + this should be `"sum_over_batch_size"`. Supported options are + `"sum"`, `"sum_over_batch_size"`, `"mean"`, + `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss, + `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the + sample size, and `"mean_with_sample_weight"` sums the loss and + divides by the sum of the sample weights. `"none"` and `None` + perform no aggregation. Defaults to `"sum_over_batch_size"`. + name: Optional name for the loss instance. + dtype: The dtype of the loss's computations. Defaults to `None`, which + means using `keras.backend.floatx()`. `keras.backend.floatx()` is a + `"float32"` unless set to different value + (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is + provided, then the `compute_dtype` will be utilized. + + Returns: + Tversky loss value. + + Reference: + + - [Salehi et al., 2017](https://arxiv.org/abs/1706.05721) + """ + + def __init__( + self, + alpha=0.5, + beta=0.5, + reduction="sum_over_batch_size", + name="tversky", + axis=None, + dtype=None, + ): + super().__init__( + tversky, + name=name, + reduction=reduction, + dtype=dtype, + alpha=alpha, + beta=beta, + axis=axis, + ) + self.alpha = alpha + self.beta = beta + self.axis = axis + + def get_config(self): + config = Loss.get_config(self) + config.update( + {"alpha": self.alpha, "beta": self.beta, "axis": self.axis} + ) + return config + + +@keras_export("keras.losses.Circle") +class Circle(LossFunctionWrapper): + """Computes Circle Loss between integer labels and L2-normalized embeddings. + + This is a metric learning loss designed to minimize within-class distance + and maximize between-class distance in a flexible manner by dynamically + adjusting the penalty strength based on optimization status of each + similarity score. + + To use Circle Loss effectively, the model should output embeddings without + an activation function (such as a `Dense` layer with `activation=None`) + followed by UnitNormalization layer to ensure unit-norm embeddings. + + Args: + gamma: Scaling factor that determines the largest scale of each + similarity score. Defaults to `80`. + margin: The relaxation factor, below this distance, negatives are + up weighted and positives are down weighted. Similarly, above this + distance negatives are down weighted and positive are up weighted. + Defaults to `0.4`. + remove_diagonal: Boolean, whether to remove self-similarities from the + positive mask. Defaults to `True`. + reduction: Type of reduction to apply to the loss. In almost all cases + this should be `"sum_over_batch_size"`. Supported options are + `"sum"`, `"sum_over_batch_size"`, `"mean"`, + `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss, + `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the + sample size, and `"mean_with_sample_weight"` sums the loss and + divides by the sum of the sample weights. `"none"` and `None` + perform no aggregation. Defaults to `"sum_over_batch_size"`. + name: Optional name for the loss instance. + dtype: The dtype of the loss's computations. Defaults to `None`, which + means using `keras.backend.floatx()`. `keras.backend.floatx()` is a + `"float32"` unless set to different value + (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is + provided, then the `compute_dtype` will be utilized. + + Examples: + + Usage with the `compile()` API: + + ```python + model = models.Sequential([ + keras.layers.Input(shape=(224, 224, 3)), + keras.layers.Conv2D(16, (3, 3), activation='relu'), + keras.layers.Flatten(), + keras.layers.Dense(64, activation=None), # No activation + keras.layers.UnitNormalization() # L2 normalization + ]) + + model.compile(optimizer="adam", loss=keras.losses.Circle()) + ``` + + Reference: + - [Yifan Sun et al., 2020](https://arxiv.org/abs/2002.10857) + + """ + + def __init__( + self, + gamma=80.0, + margin=0.4, + remove_diagonal=True, + reduction="sum_over_batch_size", + name="circle", + dtype=None, + ): + super().__init__( + circle, + name=name, + reduction=reduction, + dtype=dtype, + gamma=gamma, + margin=margin, + remove_diagonal=remove_diagonal, + ) + self.gamma = gamma + self.margin = margin + self.remove_diagonal = remove_diagonal + + def get_config(self): + config = Loss.get_config(self) + config.update( + { + "gamma": self.gamma, + "margin": self.margin, + "remove_diagonal": self.remove_diagonal, + } + ) + return config + + +def convert_binary_labels_to_hinge(y_true): + """Converts binary labels into -1/1 for hinge loss/metric calculation.""" + are_zeros = ops.equal(y_true, 0) + are_ones = ops.equal(y_true, 1) + is_binary = ops.all((ops.logical_or(are_zeros, are_ones))) + + def _convert_binary_labels(): + # Convert the binary labels to -1 or 1. + return 2.0 * y_true - 1.0 + + def _return_labels_unconverted(): + # Returns the labels unchanged if they are non-binary + return y_true + + updated_y_true = ops.cond( + is_binary, _convert_binary_labels, _return_labels_unconverted + ) + return updated_y_true + + +@keras_export( + [ + "keras.metrics.hinge", + "keras.losses.hinge", + ] +) +def hinge(y_true, y_pred): + """Computes the hinge loss between `y_true` & `y_pred`. + + Formula: + + ```python + loss = mean(maximum(1 - y_true * y_pred, 0), axis=-1) + ``` + + Args: + y_true: The ground truth values. `y_true` values are expected to be -1 + or 1. If binary (0 or 1) labels are provided they will be converted + to -1 or 1 with shape = `[batch_size, d0, .. dN]`. + y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`. + + Returns: + Hinge loss values with shape = `[batch_size, d0, .. dN-1]`. + + Example: + + >>> y_true = np.random.choice([-1, 1], size=(2, 3)) + >>> y_pred = np.random.random(size=(2, 3)) + >>> loss = keras.losses.hinge(y_true, y_pred) + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.cast(y_true, dtype=y_pred.dtype) + y_true = ops.convert_to_tensor(y_true) + y_true = convert_binary_labels_to_hinge(y_true) + return ops.mean(ops.maximum(1.0 - y_true * y_pred, 0.0), axis=-1) + + +@keras_export( + [ + "keras.metrics.squared_hinge", + "keras.losses.squared_hinge", + ] +) +def squared_hinge(y_true, y_pred): + """Computes the squared hinge loss between `y_true` & `y_pred`. + + Formula: + + ```python + loss = mean(square(maximum(1 - y_true * y_pred, 0)), axis=-1) + ``` + + Args: + y_true: The ground truth values. `y_true` values are expected to be -1 + or 1. If binary (0 or 1) labels are provided we will convert them + to -1 or 1 with shape = `[batch_size, d0, .. dN]`. + y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`. + + Returns: + Squared hinge loss values with shape = `[batch_size, d0, .. dN-1]`. + + Example: + + >>> y_true = np.random.choice([-1, 1], size=(2, 3)) + >>> y_pred = np.random.random(size=(2, 3)) + >>> loss = keras.losses.squared_hinge(y_true, y_pred) + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.cast(y_true, y_pred.dtype) + y_true = convert_binary_labels_to_hinge(y_true) + return ops.mean( + ops.square(ops.maximum(1.0 - y_true * y_pred, 0.0)), axis=-1 + ) + + +@keras_export( + [ + "keras.metrics.categorical_hinge", + "keras.losses.categorical_hinge", + ] +) +def categorical_hinge(y_true, y_pred): + """Computes the categorical hinge loss between `y_true` & `y_pred`. + + Formula: + + ```python + loss = maximum(neg - pos + 1, 0) + ``` + + where `neg=maximum((1-y_true)*y_pred)` and `pos=sum(y_true*y_pred)` + + Args: + y_true: The ground truth values. `y_true` values are expected to be + either `{-1, +1}` or `{0, 1}` (i.e. a one-hot-encoded tensor) with + shape = `[batch_size, d0, .. dN]`. + y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`. + + Returns: + Categorical hinge loss values with shape = `[batch_size, d0, .. dN-1]`. + + Example: + + >>> y_true = np.random.randint(0, 3, size=(2,)) + >>> y_true = np.eye(np.max(y_true) + 1)[y_true] + >>> y_pred = np.random.random(size=(2, 3)) + >>> loss = keras.losses.categorical_hinge(y_true, y_pred) + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.cast(y_true, y_pred.dtype) + pos = ops.sum(y_true * y_pred, axis=-1) + neg = ops.max((1.0 - y_true) * y_pred, axis=-1) + zero = ops.cast(0.0, y_pred.dtype) + return ops.maximum(neg - pos + 1.0, zero) + + +@keras_export( + [ + "keras.metrics.mean_squared_error", + "keras.losses.mean_squared_error", + # Legacy aliases + "keras._legacy.losses.mse", + "keras._legacy.losses.MSE", + "keras._legacy.metrics.mse", + "keras._legacy.metrics.MSE", + ] +) +def mean_squared_error(y_true, y_pred): + """Computes the mean squared error between labels and predictions. + + Formula: + + ```python + loss = mean(square(y_true - y_pred), axis=-1) + ``` + + Example: + + >>> y_true = np.random.randint(0, 2, size=(2, 3)) + >>> y_pred = np.random.random(size=(2, 3)) + >>> loss = keras.losses.mean_squared_error(y_true, y_pred) + + Args: + y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`. + y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`. + + Returns: + Mean squared error values with shape = `[batch_size, d0, .. dN-1]`. + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred) + return ops.mean(ops.square(y_true - y_pred), axis=-1) + + +@keras_export( + [ + "keras.metrics.mean_absolute_error", + "keras.losses.mean_absolute_error", + # Legacy aliases + "keras._legacy.losses.MAE", + "keras._legacy.losses.mae", + "keras._legacy.metrics.MAE", + "keras._legacy.metrics.mae", + ] +) +def mean_absolute_error(y_true, y_pred): + """Computes the mean absolute error between labels and predictions. + + ```python + loss = mean(abs(y_true - y_pred), axis=-1) + ``` + + Args: + y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`. + y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`. + + Returns: + Mean absolute error values with shape = `[batch_size, d0, .. dN-1]`. + + Example: + + >>> y_true = np.random.randint(0, 2, size=(2, 3)) + >>> y_pred = np.random.random(size=(2, 3)) + >>> loss = keras.losses.mean_absolute_error(y_true, y_pred) + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred) + return ops.mean(ops.abs(y_true - y_pred), axis=-1) + + +@keras_export( + [ + "keras.metrics.mean_absolute_percentage_error", + "keras.losses.mean_absolute_percentage_error", + # Legacy aliases + "keras._legacy.losses.mape", + "keras._legacy.losses.MAPE", + "keras._legacy.metrics.mape", + "keras._legacy.metrics.MAPE", + ] +) +def mean_absolute_percentage_error(y_true, y_pred): + """Computes the mean absolute percentage error between `y_true` & `y_pred`. + + Formula: + + ```python + loss = 100 * mean(abs((y_true - y_pred) / y_true), axis=-1) + ``` + + Division by zero is prevented by dividing by `maximum(y_true, epsilon)` + where `epsilon = keras.backend.epsilon()` + (default to `1e-7`). + + Args: + y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`. + y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`. + + Returns: + Mean absolute percentage error values with shape = `[batch_size, d0, .. + dN-1]`. + + Example: + + >>> y_true = np.random.random(size=(2, 3)) + >>> y_pred = np.random.random(size=(2, 3)) + >>> loss = keras.losses.mean_absolute_percentage_error(y_true, y_pred) + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + epsilon = ops.convert_to_tensor(backend.epsilon(), dtype=y_pred.dtype) + y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred) + diff = ops.abs((y_true - y_pred) / ops.maximum(ops.abs(y_true), epsilon)) + return 100.0 * ops.mean(diff, axis=-1) + + +@keras_export( + [ + "keras.metrics.mean_squared_logarithmic_error", + "keras.losses.mean_squared_logarithmic_error", + # Legacy aliases + "keras._legacy.losses.msle", + "keras._legacy.losses.MSLE", + "keras._legacy.metrics.msle", + "keras._legacy.metrics.MSLE", + ] +) +def mean_squared_logarithmic_error(y_true, y_pred): + """Computes the mean squared logarithmic error between `y_true` & `y_pred`. + + Formula: + + ```python + loss = mean(square(log(y_true + 1) - log(y_pred + 1)), axis=-1) + ``` + + Note that `y_pred` and `y_true` cannot be less or equal to 0. Negative + values and 0 values will be replaced with `keras.backend.epsilon()` + (default to `1e-7`). + + Args: + y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`. + y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`. + + Returns: + Mean squared logarithmic error values with shape = `[batch_size, d0, .. + dN-1]`. + + Example: + + >>> y_true = np.random.randint(0, 2, size=(2, 3)) + >>> y_pred = np.random.random(size=(2, 3)) + >>> loss = keras.losses.mean_squared_logarithmic_error(y_true, y_pred) + """ + epsilon = ops.convert_to_tensor(backend.epsilon()) + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred) + first_log = ops.log(ops.maximum(y_pred, epsilon) + 1.0) + second_log = ops.log(ops.maximum(y_true, epsilon) + 1.0) + return ops.mean(ops.square(first_log - second_log), axis=-1) + + +@keras_export("keras.losses.cosine_similarity") +def cosine_similarity(y_true, y_pred, axis=-1): + """Computes the cosine similarity between labels and predictions. + + Formula: + ```python + loss = -sum(l2_norm(y_true) * l2_norm(y_pred)) + ``` + + Note that it is a number between -1 and 1. When it is a negative number + between -1 and 0, 0 indicates orthogonality and values closer to -1 + indicate greater similarity. This makes it usable as a loss function in a + setting where you try to maximize the proximity between predictions and + targets. If either `y_true` or `y_pred` is a zero vector, cosine + similarity will be 0 regardless of the proximity between predictions + and targets. + + Args: + y_true: Tensor of true targets. + y_pred: Tensor of predicted targets. + axis: Axis along which to determine similarity. Defaults to `-1`. + + Returns: + Cosine similarity tensor. + + Example: + + >>> y_true = [[0., 1.], [1., 1.], [1., 1.]] + >>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]] + >>> loss = keras.losses.cosine_similarity(y_true, y_pred, axis=-1) + [-0., -0.99999994, 0.99999994] + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred) + y_pred = normalize(y_pred, axis=axis) + y_true = normalize(y_true, axis=axis) + return -ops.sum(y_true * y_pred, axis=axis) + + +@keras_export(["keras.losses.huber", "keras.metrics.huber"]) +def huber(y_true, y_pred, delta=1.0): + """Computes Huber loss value. + + Formula: + ```python + for x in error: + if abs(x) <= delta: + loss.append(0.5 * x^2) + elif abs(x) > delta: + loss.append(delta * abs(x) - 0.5 * delta^2) + + loss = mean(loss, axis=-1) + ``` + See: [Huber loss](https://en.wikipedia.org/wiki/Huber_loss). + + Example: + + >>> y_true = [[0, 1], [0, 0]] + >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] + >>> loss = keras.losses.huber(y_true, y_pred) + 0.155 + + + Args: + y_true: tensor of true targets. + y_pred: tensor of predicted targets. + delta: A float, the point where the Huber loss function changes from a + quadratic to linear. Defaults to `1.0`. + + Returns: + Tensor with one scalar loss entry per sample. + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred) + delta = ops.convert_to_tensor(delta, dtype=y_pred.dtype) + error = ops.subtract(y_pred, y_true) + abs_error = ops.abs(error) + half = ops.convert_to_tensor(0.5, dtype=abs_error.dtype) + return ops.mean( + ops.where( + abs_error <= delta, + half * ops.square(error), + delta * abs_error - half * ops.square(delta), + ), + axis=-1, + ) + + +@keras_export( + [ + "keras.losses.log_cosh", + "keras.metrics.log_cosh", + # Legacy aliases + "keras._legacy.losses.logcosh", + "keras._legacy.metrics.logcosh", + ] +) +def log_cosh(y_true, y_pred): + """Logarithm of the hyperbolic cosine of the prediction error. + + Formula: + ```python + loss = mean(log(cosh(y_pred - y_true)), axis=-1) + ``` + + Note that `log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small + `x` and to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works + mostly like the mean squared error, but will not be so strongly affected by + the occasional wildly incorrect prediction. + + Example: + + >>> y_true = [[0., 1.], [0., 0.]] + >>> y_pred = [[1., 1.], [0., 0.]] + >>> loss = keras.losses.log_cosh(y_true, y_pred) + 0.108 + + Args: + y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`. + y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`. + + Returns: + Logcosh error values with shape = `[batch_size, d0, .. dN-1]`. + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred) + log2 = ops.convert_to_tensor(ops.log(2.0), dtype=y_pred.dtype) + + def _logcosh(x): + return x + ops.softplus(x * -2.0) - log2 + + return ops.mean(_logcosh(y_pred - y_true), axis=-1) + + +@keras_export( + [ + "keras.metrics.kl_divergence", + "keras.losses.kl_divergence", + # Legacy aliases + "keras._legacy.losses.KLD", + "keras._legacy.losses.kld", + "keras._legacy.losses.kullback_leibler_divergence", + "keras._legacy.metrics.KLD", + "keras._legacy.metrics.kld", + "keras._legacy.metrics.kullback_leibler_divergence", + ] +) +def kl_divergence(y_true, y_pred): + """Computes Kullback-Leibler divergence loss between `y_true` & `y_pred`. + + Formula: + + ```python + loss = y_true * log(y_true / y_pred) + ``` + + `y_true` and `y_pred` are expected to be probability + distributions, with values between 0 and 1. They will get + clipped to the `[0, 1]` range. + + Args: + y_true: Tensor of true targets. + y_pred: Tensor of predicted targets. + + Returns: + KL Divergence loss values with shape = `[batch_size, d0, .. dN-1]`. + + Example: + + >>> y_true = np.random.randint(0, 2, size=(2, 3)).astype(np.float32) + >>> y_pred = np.random.random(size=(2, 3)) + >>> loss = keras.losses.kl_divergence(y_true, y_pred) + >>> assert loss.shape == (2,) + >>> y_true = ops.clip(y_true, 1e-7, 1) + >>> y_pred = ops.clip(y_pred, 1e-7, 1) + >>> assert np.array_equal( + ... loss, np.sum(y_true * np.log(y_true / y_pred), axis=-1)) + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, y_pred.dtype) + y_true = ops.clip(y_true, backend.epsilon(), 1) + y_pred = ops.clip(y_pred, backend.epsilon(), 1) + return ops.sum(y_true * ops.log(y_true / y_pred), axis=-1) + + +@keras_export( + [ + "keras.metrics.poisson", + "keras.losses.poisson", + ] +) +def poisson(y_true, y_pred): + """Computes the Poisson loss between y_true and y_pred. + + Formula: + + ```python + loss = y_pred - y_true * log(y_pred) + ``` + + Args: + y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. + y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. + + Returns: + Poisson loss values with shape = `[batch_size, d0, .. dN-1]`. + + Example: + + >>> y_true = np.random.randint(0, 2, size=(2, 3)) + >>> y_pred = np.random.random(size=(2, 3)) + >>> loss = keras.losses.poisson(y_true, y_pred) + >>> assert loss.shape == (2,) + >>> y_pred = y_pred + 1e-7 + >>> assert np.allclose( + ... loss, np.mean(y_pred - y_true * np.log(y_pred), axis=-1), + ... atol=1e-5) + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + epsilon = ops.convert_to_tensor(backend.epsilon(), dtype=y_pred.dtype) + return ops.mean(y_pred - y_true * ops.log(y_pred + epsilon), axis=-1) + + +@keras_export( + [ + "keras.metrics.categorical_crossentropy", + "keras.losses.categorical_crossentropy", + ] +) +def categorical_crossentropy( + y_true, y_pred, from_logits=False, label_smoothing=0.0, axis=-1 +): + """Computes the categorical crossentropy loss. + + Args: + y_true: Tensor of one-hot true targets. + y_pred: Tensor of predicted targets. + from_logits: Whether `y_pred` is expected to be a logits tensor. By + default, we assume that `y_pred` encodes a probability distribution. + label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For + example, if `0.1`, use `0.1 / num_classes` for non-target labels + and `0.9 + 0.1 / num_classes` for target labels. + axis: Defaults to `-1`. The dimension along which the entropy is + computed. + + Returns: + Categorical crossentropy loss value. + + Example: + + >>> y_true = [[0, 1, 0], [0, 0, 1]] + >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] + >>> loss = keras.losses.categorical_crossentropy(y_true, y_pred) + >>> assert loss.shape == (2,) + >>> loss + array([0.0513, 2.303], dtype=float32) + """ + if isinstance(axis, bool): + raise ValueError( + "`axis` must be of type `int`. " + f"Received: axis={axis} of type {type(axis)}" + ) + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.cast(y_true, y_pred.dtype) + + if y_pred.shape[-1] == 1: + warnings.warn( + "In loss categorical_crossentropy, expected " + "y_pred.shape to be (batch_size, num_classes) " + f"with num_classes > 1. Received: y_pred.shape={y_pred.shape}. " + "Consider using 'binary_crossentropy' if you only have 2 classes.", + SyntaxWarning, + stacklevel=2, + ) + + if label_smoothing: + num_classes = ops.cast(ops.shape(y_true)[-1], y_pred.dtype) + y_true = y_true * (1.0 - label_smoothing) + ( + label_smoothing / num_classes + ) + + return ops.categorical_crossentropy( + y_true, y_pred, from_logits=from_logits, axis=axis + ) + + +@keras_export( + [ + "keras.metrics.categorical_focal_crossentropy", + "keras.losses.categorical_focal_crossentropy", + ] +) +def categorical_focal_crossentropy( + y_true, + y_pred, + alpha=0.25, + gamma=2.0, + from_logits=False, + label_smoothing=0.0, + axis=-1, +): + """Computes the categorical focal crossentropy loss. + + Args: + y_true: Tensor of one-hot true targets. + y_pred: Tensor of predicted targets. + alpha: A weight balancing factor for all classes, default is `0.25` as + mentioned in the reference. It can be a list of floats or a scalar. + In the multi-class case, alpha may be set by inverse class + frequency by using `compute_class_weight` from `sklearn.utils`. + gamma: A focusing parameter, default is `2.0` as mentioned in the + reference. It helps to gradually reduce the importance given to + simple examples in a smooth manner. When `gamma` = 0, there is + no focal effect on the categorical crossentropy. + from_logits: Whether `y_pred` is expected to be a logits tensor. By + default, we assume that `y_pred` encodes a probability + distribution. + label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For + example, if `0.1`, use `0.1 / num_classes` for non-target labels + and `0.9 + 0.1 / num_classes` for target labels. + axis: Defaults to `-1`. The dimension along which the entropy is + computed. + + Returns: + Categorical focal crossentropy loss value. + + Example: + + >>> y_true = [[0, 1, 0], [0, 0, 1]] + >>> y_pred = [[0.05, 0.9, 0.05], [0.1, 0.85, 0.05]] + >>> loss = keras.losses.categorical_focal_crossentropy(y_true, y_pred) + >>> assert loss.shape == (2,) + >>> loss + array([2.63401289e-04, 6.75912094e-01], dtype=float32) + """ + if isinstance(axis, bool): + raise ValueError( + "`axis` must be of type `int`. " + f"Received: axis={axis} of type {type(axis)}" + ) + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.cast(y_true, y_pred.dtype) + + if y_pred.shape[-1] == 1: + warnings.warn( + "In loss categorical_focal_crossentropy, expected " + "y_pred.shape to be (batch_size, num_classes) " + f"with num_classes > 1. Received: y_pred.shape={y_pred.shape}. " + "Consider using 'binary_crossentropy' if you only have 2 classes.", + SyntaxWarning, + stacklevel=2, + ) + + if label_smoothing: + num_classes = ops.cast(ops.shape(y_true)[-1], y_pred.dtype) + y_true = y_true * (1.0 - label_smoothing) + ( + label_smoothing / num_classes + ) + + if from_logits: + y_pred = ops.softmax(y_pred, axis=axis) + + # Adjust the predictions so that the probability of + # each class for every sample adds up to 1 + # This is needed to ensure that the cross entropy is + # computed correctly. + output = y_pred / ops.sum(y_pred, axis=axis, keepdims=True) + output = ops.clip(output, backend.epsilon(), 1.0 - backend.epsilon()) + + # Calculate cross entropy + cce = -y_true * ops.log(output) + + # Calculate factors + modulating_factor = ops.power(1.0 - output, gamma) + weighting_factor = ops.multiply(modulating_factor, alpha) + + # Apply weighting factor + focal_cce = ops.multiply(weighting_factor, cce) + focal_cce = ops.sum(focal_cce, axis=axis) + return focal_cce + + +@keras_export( + [ + "keras.metrics.sparse_categorical_crossentropy", + "keras.losses.sparse_categorical_crossentropy", + ] +) +def sparse_categorical_crossentropy( + y_true, y_pred, from_logits=False, ignore_class=None, axis=-1 +): + """Computes the sparse categorical crossentropy loss. + + Args: + y_true: Ground truth values. + y_pred: The predicted values. + from_logits: Whether `y_pred` is expected to be a logits tensor. By + default, we assume that `y_pred` encodes a probability distribution. + ignore_class: Optional integer. The ID of a class to be ignored during + loss computation. This is useful, for example, in segmentation + problems featuring a "void" class (commonly -1 or 255) in + segmentation maps. By default (`ignore_class=None`), all classes are + considered. + axis: Defaults to `-1`. The dimension along which the entropy is + computed. + + Returns: + Sparse categorical crossentropy loss value. + + Examples: + + >>> y_true = [1, 2] + >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] + >>> loss = keras.losses.sparse_categorical_crossentropy(y_true, y_pred) + >>> assert loss.shape == (2,) + >>> loss + array([0.0513, 2.303], dtype=float32) + """ + + if len(y_true.shape) == len(y_pred.shape) and y_true.shape[-1] == 1: + y_true = ops.squeeze(y_true, axis=-1) + + if ignore_class is not None: + res_shape = ops.shape(y_pred)[:-1] + valid_mask = ops.not_equal(y_true, ops.cast(ignore_class, y_pred.dtype)) + y_true = y_true * ops.cast(valid_mask, y_true.dtype) + y_pred = y_pred * ops.cast( + ops.expand_dims(valid_mask, -1), y_pred.dtype + ) + + res = ops.sparse_categorical_crossentropy( + y_true, + y_pred, + from_logits=from_logits, + axis=axis, + ) + + if ignore_class is not None: + valid_mask = ops.reshape(valid_mask, res_shape) + res = ops.where(valid_mask, res, 0.0) + backend.set_keras_mask(res, mask=valid_mask) + + return res + + +@keras_export( + [ + "keras.metrics.binary_crossentropy", + "keras.losses.binary_crossentropy", + ] +) +def binary_crossentropy( + y_true, y_pred, from_logits=False, label_smoothing=0.0, axis=-1 +): + """Computes the binary crossentropy loss. + + Args: + y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. + y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. + from_logits: Whether `y_pred` is expected to be a logits tensor. By + default, we assume that `y_pred` encodes a probability distribution. + label_smoothing: Float in `[0, 1]`. If > `0` then smooth the labels by + squeezing them towards 0.5, that is, + using `1. - 0.5 * label_smoothing` for the target class + and `0.5 * label_smoothing` for the non-target class. + axis: The axis along which the mean is computed. Defaults to `-1`. + + Returns: + Binary crossentropy loss value. shape = `[batch_size, d0, .. dN-1]`. + + Example: + + >>> y_true = [[0, 1], [0, 0]] + >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] + >>> loss = keras.losses.binary_crossentropy(y_true, y_pred) + >>> assert loss.shape == (2,) + >>> loss + array([0.916 , 0.714], dtype=float32) + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.cast(y_true, y_pred.dtype) + + if label_smoothing: + y_true = y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing + + return ops.mean( + ops.binary_crossentropy(y_true, y_pred, from_logits=from_logits), + axis=axis, + ) + + +@keras_export( + [ + "keras.metrics.binary_focal_crossentropy", + "keras.losses.binary_focal_crossentropy", + ] +) +def binary_focal_crossentropy( + y_true, + y_pred, + apply_class_balancing=False, + alpha=0.25, + gamma=2.0, + from_logits=False, + label_smoothing=0.0, + axis=-1, +): + """Computes the binary focal crossentropy loss. + + According to [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf), it + helps to apply a focal factor to down-weight easy examples and focus more on + hard examples. By default, the focal tensor is computed as follows: + + `focal_factor = (1 - output) ** gamma` for class 1 + `focal_factor = output ** gamma` for class 0 + where `gamma` is a focusing parameter. When `gamma` = 0, there is no focal + effect on the binary crossentropy loss. + + If `apply_class_balancing == True`, this function also takes into account a + weight balancing factor for the binary classes 0 and 1 as follows: + + `weight = alpha` for class 1 (`target == 1`) + `weight = 1 - alpha` for class 0 + where `alpha` is a float in the range of `[0, 1]`. + + Args: + y_true: Ground truth values, of shape `(batch_size, d0, .. dN)`. + y_pred: The predicted values, of shape `(batch_size, d0, .. dN)`. + apply_class_balancing: A bool, whether to apply weight balancing on the + binary classes 0 and 1. + alpha: A weight balancing factor for class 1, default is `0.25` as + mentioned in the reference. The weight for class 0 is `1.0 - alpha`. + gamma: A focusing parameter, default is `2.0` as mentioned in the + reference. + from_logits: Whether `y_pred` is expected to be a logits tensor. By + default, we assume that `y_pred` encodes a probability distribution. + label_smoothing: Float in `[0, 1]`. If > `0` then smooth the labels by + squeezing them towards 0.5, that is, + using `1. - 0.5 * label_smoothing` for the target class + and `0.5 * label_smoothing` for the non-target class. + axis: The axis along which the mean is computed. Defaults to `-1`. + + Returns: + Binary focal crossentropy loss value + with shape = `[batch_size, d0, .. dN-1]`. + + Example: + + >>> y_true = [[0, 1], [0, 0]] + >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] + >>> loss = keras.losses.binary_focal_crossentropy( + ... y_true, y_pred, gamma=2) + >>> assert loss.shape == (2,) + >>> loss + array([0.330, 0.206], dtype=float32) + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.cast(y_true, y_pred.dtype) + + if label_smoothing: + y_true = y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing + + if from_logits: + y_pred = ops.sigmoid(y_pred) + + bce = ops.binary_crossentropy( + target=y_true, + output=y_pred, + from_logits=False, + ) + + # Calculate focal factor + p_t = y_true * y_pred + (1 - y_true) * (1 - y_pred) + focal_factor = ops.power(1.0 - p_t, gamma) + + focal_bce = focal_factor * bce + + if apply_class_balancing: + weight = y_true * alpha + (1 - y_true) * (1 - alpha) + focal_bce = weight * focal_bce + + return ops.mean(focal_bce, axis=axis) + + +@keras_export("keras.losses.ctc") +def ctc(y_true, y_pred): + """CTC (Connectionist Temporal Classification) loss. + + Args: + y_true: A tensor of shape `(batch_size, max_length)` containing + the true labels in integer format. `0` always represents + the blank/mask index and should not be used for classes. + y_pred: A tensor of shape `(batch_size, max_length, num_classes)` + containing logits (the output of your model). + They should *not* be normalized via softmax. + """ + if len(ops.shape(y_true)) != 2: + raise ValueError( + "Targets `y_true` are expected to be a tensor of shape " + "`(batch_size, max_length)` in integer format. " + f"Received: y_true.shape={ops.shape(y_true)}" + ) + if len(ops.shape(y_pred)) != 3: + raise ValueError( + "Logits `y_pred` are expected to be a tensor of shape " + "`(batch_size, max_length, num_classes)`. " + f"Received: y_pred.shape={ops.shape(y_pred)}" + ) + + mask_index = 0 + batch_length = ops.shape(y_pred)[0] + input_length = ops.shape(y_pred)[1] + input_length = input_length * ops.ones((batch_length,), dtype="int32") + label_length = ops.cast( + ops.sum(y_true != mask_index, axis=-1), dtype="int32" + ) + + return ops.ctc_loss( + y_true, y_pred, label_length, input_length, mask_index=mask_index + ) + + +@keras_export("keras.losses.dice") +def dice(y_true, y_pred, axis=None): + """Computes the Dice loss value between `y_true` and `y_pred`. + + Formula: + ```python + loss = 1 - (2 * sum(y_true * y_pred)) / (sum(y_true) + sum(y_pred)) + ``` + + Args: + y_true: tensor of true targets. + y_pred: tensor of predicted targets. + axis: tuple for which dimensions the loss is calculated + + Returns: + Dice loss value. + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.cast(y_true, y_pred.dtype) + + inputs = y_true + targets = y_pred + + intersection = ops.sum(inputs * targets, axis=axis) + dice = ops.divide( + 2.0 * intersection, + ops.sum(y_true, axis=axis) + + ops.sum(y_pred, axis=axis) + + backend.epsilon(), + ) + + return 1 - dice + + +@keras_export("keras.losses.tversky") +def tversky(y_true, y_pred, alpha=0.5, beta=0.5, axis=None): + """Computes the Tversky loss value between `y_true` and `y_pred`. + + This loss function is weighted by the alpha and beta coefficients + that penalize false positives and false negatives. + + With `alpha=0.5` and `beta=0.5`, the loss value becomes equivalent to + Dice Loss. + + Args: + y_true: tensor of true targets. + y_pred: tensor of predicted targets. + alpha: coefficient controlling incidence of false positives. + beta: coefficient controlling incidence of false negatives. + axis: tuple for which dimensions the loss is calculated. + + Returns: + Tversky loss value. + + Reference: + + - [Salehi et al., 2017](https://arxiv.org/abs/1706.05721) + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.cast(y_true, y_pred.dtype) + + inputs = y_true + targets = y_pred + + intersection = ops.sum(inputs * targets, axis=axis) + fp = ops.sum((1 - targets) * inputs, axis=axis) + fn = ops.sum(targets * (1 - inputs), axis=axis) + + tversky = ops.divide( + intersection, + intersection + fp * alpha + fn * beta + backend.epsilon(), + ) + + return 1 - tversky + + +@keras_export("keras.losses.circle") +def circle( + y_true, + y_pred, + ref_labels=None, + ref_embeddings=None, + remove_diagonal=True, + gamma=80, + margin=0.4, +): + """Computes the Circle loss. + + It is designed to minimize within-class distances and maximize between-class + distances in L2 normalized embedding space. + + Args: + y_true: Tensor with ground truth labels in integer format. + y_pred: Tensor with predicted L2 normalized embeddings. + ref_labels: Optional integer tensor with labels for reference + embeddings. If `None`, defaults to `y_true`. + ref_embeddings: Optional tensor with L2 normalized reference embeddings. + If `None`, defaults to `y_pred`. + remove_diagonal: Boolean, whether to remove self-similarities from + positive mask. Defaults to `True`. + gamma: Float, scaling factor for the loss. Defaults to `80`. + margin: Float, relaxation factor for the loss. Defaults to `0.4`. + + Returns: + Circle loss value. + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.cast(y_true, "int32") + ref_embeddings = ( + y_pred + if ref_embeddings is None + else ops.convert_to_tensor(ref_embeddings) + ) + ref_labels = y_true if ref_labels is None else ops.cast(ref_labels, "int32") + + optim_pos = margin + optim_neg = 1 + margin + delta_pos = margin + delta_neg = 1 - margin + + pairwise_cosine_distances = 1 - ops.matmul( + y_pred, ops.transpose(ref_embeddings) + ) + + pairwise_cosine_distances = ops.maximum(pairwise_cosine_distances, 0.0) + positive_mask, negative_mask = build_pos_neg_masks( + y_true, + ref_labels, + remove_diagonal=remove_diagonal, + ) + positive_mask = ops.cast( + positive_mask, dtype=pairwise_cosine_distances.dtype + ) + negative_mask = ops.cast( + negative_mask, dtype=pairwise_cosine_distances.dtype + ) + + pos_weights = optim_pos + pairwise_cosine_distances + pos_weights = pos_weights * positive_mask + pos_weights = ops.maximum(pos_weights, 0.0) + neg_weights = optim_neg - pairwise_cosine_distances + neg_weights = neg_weights * negative_mask + neg_weights = ops.maximum(neg_weights, 0.0) + + pos_dists = delta_pos - pairwise_cosine_distances + neg_dists = delta_neg - pairwise_cosine_distances + + pos_wdists = -1 * gamma * pos_weights * pos_dists + neg_wdists = gamma * neg_weights * neg_dists + + p_loss = ops.logsumexp( + ops.where(positive_mask, pos_wdists, float("-inf")), + axis=1, + ) + n_loss = ops.logsumexp( + ops.where(negative_mask, neg_wdists, float("-inf")), + axis=1, + ) + + circle_loss = ops.softplus(p_loss + n_loss) + backend.set_keras_mask(circle_loss, circle_loss > 0) + return circle_loss diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4cb9dc42cd5cf2fa70adce6d691cb41f969cc521 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__init__.py @@ -0,0 +1,211 @@ +import inspect + +from keras.src.api_export import keras_export +from keras.src.metrics.accuracy_metrics import Accuracy +from keras.src.metrics.accuracy_metrics import BinaryAccuracy +from keras.src.metrics.accuracy_metrics import CategoricalAccuracy +from keras.src.metrics.accuracy_metrics import SparseCategoricalAccuracy +from keras.src.metrics.accuracy_metrics import SparseTopKCategoricalAccuracy +from keras.src.metrics.accuracy_metrics import TopKCategoricalAccuracy +from keras.src.metrics.confusion_metrics import AUC +from keras.src.metrics.confusion_metrics import FalseNegatives +from keras.src.metrics.confusion_metrics import FalsePositives +from keras.src.metrics.confusion_metrics import Precision +from keras.src.metrics.confusion_metrics import PrecisionAtRecall +from keras.src.metrics.confusion_metrics import Recall +from keras.src.metrics.confusion_metrics import RecallAtPrecision +from keras.src.metrics.confusion_metrics import SensitivityAtSpecificity +from keras.src.metrics.confusion_metrics import SpecificityAtSensitivity +from keras.src.metrics.confusion_metrics import TrueNegatives +from keras.src.metrics.confusion_metrics import TruePositives +from keras.src.metrics.correlation_metrics import ConcordanceCorrelation +from keras.src.metrics.correlation_metrics import PearsonCorrelation +from keras.src.metrics.f_score_metrics import F1Score +from keras.src.metrics.f_score_metrics import FBetaScore +from keras.src.metrics.hinge_metrics import CategoricalHinge +from keras.src.metrics.hinge_metrics import Hinge +from keras.src.metrics.hinge_metrics import SquaredHinge +from keras.src.metrics.iou_metrics import BinaryIoU +from keras.src.metrics.iou_metrics import IoU +from keras.src.metrics.iou_metrics import MeanIoU +from keras.src.metrics.iou_metrics import OneHotIoU +from keras.src.metrics.iou_metrics import OneHotMeanIoU +from keras.src.metrics.metric import Metric +from keras.src.metrics.probabilistic_metrics import BinaryCrossentropy +from keras.src.metrics.probabilistic_metrics import CategoricalCrossentropy +from keras.src.metrics.probabilistic_metrics import KLDivergence +from keras.src.metrics.probabilistic_metrics import Poisson +from keras.src.metrics.probabilistic_metrics import ( + SparseCategoricalCrossentropy, +) +from keras.src.metrics.reduction_metrics import Mean +from keras.src.metrics.reduction_metrics import MeanMetricWrapper +from keras.src.metrics.reduction_metrics import Sum +from keras.src.metrics.regression_metrics import CosineSimilarity +from keras.src.metrics.regression_metrics import LogCoshError +from keras.src.metrics.regression_metrics import MeanAbsoluteError +from keras.src.metrics.regression_metrics import MeanAbsolutePercentageError +from keras.src.metrics.regression_metrics import MeanSquaredError +from keras.src.metrics.regression_metrics import MeanSquaredLogarithmicError +from keras.src.metrics.regression_metrics import R2Score +from keras.src.metrics.regression_metrics import RootMeanSquaredError +from keras.src.saving import serialization_lib +from keras.src.utils.naming import to_snake_case + +ALL_OBJECTS = { + # Base + Metric, + Mean, + Sum, + MeanMetricWrapper, + # Regression + MeanSquaredError, + RootMeanSquaredError, + MeanAbsoluteError, + MeanAbsolutePercentageError, + MeanSquaredLogarithmicError, + CosineSimilarity, + LogCoshError, + R2Score, + # Classification + AUC, + FalseNegatives, + FalsePositives, + Precision, + PrecisionAtRecall, + Recall, + RecallAtPrecision, + SensitivityAtSpecificity, + SpecificityAtSensitivity, + TrueNegatives, + TruePositives, + # Correlation + ConcordanceCorrelation, + PearsonCorrelation, + # Hinge + Hinge, + SquaredHinge, + CategoricalHinge, + # Probabilistic + KLDivergence, + Poisson, + BinaryCrossentropy, + CategoricalCrossentropy, + SparseCategoricalCrossentropy, + # Accuracy + Accuracy, + BinaryAccuracy, + CategoricalAccuracy, + SparseCategoricalAccuracy, + TopKCategoricalAccuracy, + SparseTopKCategoricalAccuracy, + # F-Score + F1Score, + FBetaScore, + # IoU + IoU, + BinaryIoU, + MeanIoU, + OneHotIoU, + OneHotMeanIoU, +} +ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS} +ALL_OBJECTS_DICT.update( + {to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS} +) +# TODO: Align with `tf.keras` and set the name attribute of metrics +# with the key name. Currently it uses default name of class definitions. +ALL_OBJECTS_DICT.update( + { + "bce": BinaryCrossentropy, + "BCE": BinaryCrossentropy, + "mse": MeanSquaredError, + "MSE": MeanSquaredError, + "mae": MeanAbsoluteError, + "MAE": MeanAbsoluteError, + "mape": MeanAbsolutePercentageError, + "MAPE": MeanAbsolutePercentageError, + "msle": MeanSquaredLogarithmicError, + "MSLE": MeanSquaredLogarithmicError, + } +) + + +@keras_export("keras.metrics.serialize") +def serialize(metric): + """Serializes metric function or `Metric` instance. + + Args: + metric: A Keras `Metric` instance or a metric function. + + Returns: + Metric configuration dictionary. + """ + return serialization_lib.serialize_keras_object(metric) + + +@keras_export("keras.metrics.deserialize") +def deserialize(config, custom_objects=None): + """Deserializes a serialized metric class/function instance. + + Args: + config: Metric configuration. + custom_objects: Optional dictionary mapping names (strings) + to custom objects (classes and functions) to be + considered during deserialization. + + Returns: + A Keras `Metric` instance or a metric function. + """ + return serialization_lib.deserialize_keras_object( + config, + module_objects=ALL_OBJECTS_DICT, + custom_objects=custom_objects, + ) + + +@keras_export("keras.metrics.get") +def get(identifier): + """Retrieves a Keras metric as a `function`/`Metric` class instance. + + The `identifier` may be the string name of a metric function or class. + + >>> metric = metrics.get("categorical_crossentropy") + >>> type(metric) + + >>> metric = metrics.get("CategoricalCrossentropy") + >>> type(metric) + + + You can also specify `config` of the metric to this function by passing dict + containing `class_name` and `config` as an identifier. Also note that the + `class_name` must map to a `Metric` class + + >>> identifier = {"class_name": "CategoricalCrossentropy", + ... "config": {"from_logits": True}} + >>> metric = metrics.get(identifier) + >>> type(metric) + + + Args: + identifier: A metric identifier. One of None or string name of a metric + function/class or metric configuration dictionary or a metric + function or a metric class instance + + Returns: + A Keras metric as a `function`/ `Metric` class instance. + """ + if identifier is None: + return None + if isinstance(identifier, dict): + obj = deserialize(identifier) + elif isinstance(identifier, str): + obj = ALL_OBJECTS_DICT.get(identifier, None) + else: + obj = identifier + if callable(obj): + if inspect.isclass(obj): + obj = obj() + return obj + else: + raise ValueError(f"Could not interpret metric identifier: {identifier}") diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..898add07b05b4193ac3ae14e0054a7710e3f78ba Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/accuracy_metrics.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/accuracy_metrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f8fece6739bb636579e58e7aac02f906a108281 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/accuracy_metrics.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/confusion_metrics.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/confusion_metrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07233527e186c629a210aab565e95444a96632a0 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/confusion_metrics.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/correlation_metrics.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/correlation_metrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6dbe0854164c2a1fabb007957f0556e1532a7851 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/correlation_metrics.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/f_score_metrics.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/f_score_metrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff3b9c4cf648cdd30317a30fad19c4c87cd8932c Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/f_score_metrics.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/hinge_metrics.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/hinge_metrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6bc1b4ca541785bfb0a89a97306ad09de49813e5 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/hinge_metrics.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/iou_metrics.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/iou_metrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fdf5498fc71b7bc39c9a102a41518d2b358f6fb8 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/iou_metrics.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/metric.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/metric.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a637923aa1924962733a82a0a03fdb8a92ba5c3 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/metric.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/metrics_utils.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/metrics_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbbebf4070bc97f183a958a43da59140d7b6f26d Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/metrics_utils.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/probabilistic_metrics.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/probabilistic_metrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b72ef7f4d8033167f32c09c4148d12a244f38cf Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/probabilistic_metrics.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/reduction_metrics.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/reduction_metrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f513d29db56e7321d39072819147932b3b966a3b Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/reduction_metrics.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/regression_metrics.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/regression_metrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4141912ccfeea5116fafa6015904b7e5c690694d Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/__pycache__/regression_metrics.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/accuracy_metrics.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/accuracy_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..50dd5dc8cf7282060ac4460875181d7c4fdcd330 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/accuracy_metrics.py @@ -0,0 +1,522 @@ +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.losses.loss import squeeze_or_expand_to_same_rank +from keras.src.metrics import reduction_metrics + + +def accuracy(y_true, y_pred): + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred) + return ops.cast(ops.equal(y_true, y_pred), dtype=backend.floatx()) + + +@keras_export("keras.metrics.Accuracy") +class Accuracy(reduction_metrics.MeanMetricWrapper): + """Calculates how often predictions equal labels. + + This metric creates two local variables, `total` and `count` that are used + to compute the frequency with which `y_pred` matches `y_true`. This + frequency is ultimately returned as `binary accuracy`: an idempotent + operation that simply divides `total` by `count`. + + If `sample_weight` is `None`, weights default to 1. + Use `sample_weight` of 0 to mask values. + + Args: + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Examples: + + >>> m = keras.metrics.Accuracy() + >>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]]) + >>> m.result() + 0.75 + + >>> m.reset_state() + >>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]], + ... sample_weight=[1, 1, 0, 0]) + >>> m.result() + 0.5 + + Usage with `compile()` API: + + ```python + model.compile(optimizer='sgd', + loss='binary_crossentropy', + metrics=[keras.metrics.Accuracy()]) + ``` + """ + + def __init__(self, name="accuracy", dtype=None): + super().__init__(fn=accuracy, name=name, dtype=dtype) + # Metric should be maximized during optimization. + self._direction = "up" + + def get_config(self): + return {"name": self.name, "dtype": self.dtype} + + +@keras_export("keras.metrics.binary_accuracy") +def binary_accuracy(y_true, y_pred, threshold=0.5): + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred) + threshold = ops.cast(threshold, y_pred.dtype) + y_pred = ops.cast(y_pred > threshold, y_true.dtype) + return ops.cast(ops.equal(y_true, y_pred), dtype=backend.floatx()) + + +@keras_export("keras.metrics.BinaryAccuracy") +class BinaryAccuracy(reduction_metrics.MeanMetricWrapper): + """Calculates how often predictions match binary labels. + + This metric creates two local variables, `total` and `count` that are used + to compute the frequency with which `y_pred` matches `y_true`. This + frequency is ultimately returned as `binary accuracy`: an idempotent + operation that simply divides `total` by `count`. + + If `sample_weight` is `None`, weights default to 1. + Use `sample_weight` of 0 to mask values. + + Args: + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + threshold: (Optional) Float representing the threshold for deciding + whether prediction values are 1 or 0. + + Example: + + >>> m = keras.metrics.BinaryAccuracy() + >>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]]) + >>> m.result() + 0.75 + + >>> m.reset_state() + >>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]], + ... sample_weight=[1, 0, 0, 1]) + >>> m.result() + 0.5 + + Usage with `compile()` API: + + ```python + model.compile(optimizer='sgd', + loss='binary_crossentropy', + metrics=[keras.metrics.BinaryAccuracy()]) + ``` + """ + + def __init__(self, name="binary_accuracy", dtype=None, threshold=0.5): + super().__init__( + fn=binary_accuracy, name=name, dtype=dtype, threshold=threshold + ) + self.threshold = threshold + # Metric should be maximized during optimization. + self._direction = "up" + + def get_config(self): + return { + "name": self.name, + "dtype": self.dtype, + "threshold": self.threshold, + } + + +@keras_export("keras.metrics.categorical_accuracy") +def categorical_accuracy(y_true, y_pred): + y_true = ops.argmax(y_true, axis=-1) + + reshape_matches = False + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + + y_true_org_shape = ops.shape(y_true) + y_pred_rank = len(y_pred.shape) + y_true_rank = len(y_true.shape) + + # If the shape of y_true is (num_samples, 1), squeeze to (num_samples,) + if ( + (y_true_rank is not None) + and (y_pred_rank is not None) + and (len(y_true.shape) == len(y_pred.shape)) + ): + y_true = ops.squeeze(y_true, -1) + reshape_matches = True + y_pred = ops.argmax(y_pred, axis=-1) + + # If the predicted output and actual output types don't match, force cast + # them to match. + if y_pred.dtype is not y_true.dtype: + y_pred = ops.cast(y_pred, dtype=y_true.dtype) + matches = ops.cast(ops.equal(y_true, y_pred), backend.floatx()) + if reshape_matches: + matches = ops.reshape(matches, y_true_org_shape) + return matches + + +@keras_export("keras.metrics.CategoricalAccuracy") +class CategoricalAccuracy(reduction_metrics.MeanMetricWrapper): + """Calculates how often predictions match one-hot labels. + + You can provide logits of classes as `y_pred`, since argmax of + logits and probabilities are same. + + This metric creates two local variables, `total` and `count` that are used + to compute the frequency with which `y_pred` matches `y_true`. This + frequency is ultimately returned as `categorical accuracy`: an idempotent + operation that simply divides `total` by `count`. + + `y_pred` and `y_true` should be passed in as vectors of probabilities, + rather than as labels. If necessary, use `ops.one_hot` to expand `y_true` as + a vector. + + If `sample_weight` is `None`, weights default to 1. + Use `sample_weight` of 0 to mask values. + + Args: + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Example: + + >>> m = keras.metrics.CategoricalAccuracy() + >>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8], + ... [0.05, 0.95, 0]]) + >>> m.result() + 0.5 + + >>> m.reset_state() + >>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8], + ... [0.05, 0.95, 0]], + ... sample_weight=[0.7, 0.3]) + >>> m.result() + 0.3 + + Usage with `compile()` API: + + ```python + model.compile(optimizer='sgd', + loss='categorical_crossentropy', + metrics=[keras.metrics.CategoricalAccuracy()]) + ``` + """ + + def __init__(self, name="categorical_accuracy", dtype=None): + super().__init__(fn=categorical_accuracy, name=name, dtype=dtype) + # Metric should be maximized during optimization. + self._direction = "up" + + def get_config(self): + return {"name": self.name, "dtype": self.dtype} + + +@keras_export("keras.metrics.sparse_categorical_accuracy") +def sparse_categorical_accuracy(y_true, y_pred): + reshape_matches = False + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + y_true_org_shape = ops.shape(y_true) + y_pred_rank = len(y_pred.shape) + y_true_rank = len(y_true.shape) + + # If the shape of y_true is (num_samples, 1), squeeze to (num_samples,) + if ( + (y_true_rank is not None) + and (y_pred_rank is not None) + and (len(y_true.shape) == len(y_pred.shape)) + and ops.shape(y_true)[-1] == 1 + ): + y_true = ops.squeeze(y_true, -1) + reshape_matches = True + y_pred = ops.argmax(y_pred, axis=-1) + + # If the predicted output and actual output types don't match, force cast + # them to match. + if y_pred.dtype is not y_true.dtype: + y_pred = ops.cast(y_pred, y_true.dtype) + matches = ops.cast(ops.equal(y_true, y_pred), backend.floatx()) + if reshape_matches: + matches = ops.reshape(matches, y_true_org_shape) + # if shape is (num_samples, 1) squeeze + if len(matches.shape) > 1 and matches.shape[-1] == 1: + matches = ops.squeeze(matches, -1) + return matches + + +@keras_export("keras.metrics.SparseCategoricalAccuracy") +class SparseCategoricalAccuracy(reduction_metrics.MeanMetricWrapper): + """Calculates how often predictions match integer labels. + + ```python + acc = np.dot(sample_weight, np.equal(y_true, np.argmax(y_pred, axis=1)) + ``` + + You can provide logits of classes as `y_pred`, since argmax of + logits and probabilities are same. + + This metric creates two local variables, `total` and `count` that are used + to compute the frequency with which `y_pred` matches `y_true`. This + frequency is ultimately returned as `sparse categorical accuracy`: an + idempotent operation that simply divides `total` by `count`. + + If `sample_weight` is `None`, weights default to 1. + Use `sample_weight` of 0 to mask values. + + Args: + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Example: + + >>> m = keras.metrics.SparseCategoricalAccuracy() + >>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]]) + >>> m.result() + 0.5 + + >>> m.reset_state() + >>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]], + ... sample_weight=[0.7, 0.3]) + >>> m.result() + 0.3 + + Usage with `compile()` API: + + ```python + model.compile(optimizer='sgd', + loss='sparse_categorical_crossentropy', + metrics=[keras.metrics.SparseCategoricalAccuracy()]) + ``` + """ + + def __init__(self, name="sparse_categorical_accuracy", dtype=None): + super().__init__(fn=sparse_categorical_accuracy, name=name, dtype=dtype) + # Metric should be maximized during optimization. + self._direction = "up" + + def get_config(self): + return {"name": self.name, "dtype": self.dtype} + + +@keras_export("keras.metrics.top_k_categorical_accuracy") +def top_k_categorical_accuracy(y_true, y_pred, k=5): + reshape_matches = False + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + y_true = ops.argmax(y_true, axis=-1) + y_true_rank = len(y_true.shape) + y_pred_rank = len(y_pred.shape) + y_true_org_shape = ops.shape(y_true) + + # Flatten y_pred to (batch_size, num_samples) and y_true to (num_samples,) + if (y_true_rank is not None) and (y_pred_rank is not None): + if y_pred_rank > 2: + y_pred = ops.reshape(y_pred, [-1, y_pred.shape[-1]]) + if y_true_rank > 1: + reshape_matches = True + y_true = ops.reshape(y_true, [-1]) + + matches = ops.cast( + ops.in_top_k(ops.cast(y_true, "int32"), y_pred, k=k), + dtype=backend.floatx(), + ) + + # returned matches is expected to have same shape as y_true input + if reshape_matches: + matches = ops.reshape(matches, y_true_org_shape) + + return matches + + +@keras_export("keras.metrics.TopKCategoricalAccuracy") +class TopKCategoricalAccuracy(reduction_metrics.MeanMetricWrapper): + """Computes how often targets are in the top `K` predictions. + + Args: + k: (Optional) Number of top elements to look at for computing accuracy. + Defaults to `5`. + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Example: + + >>> m = keras.metrics.TopKCategoricalAccuracy(k=1) + >>> m.update_state([[0, 0, 1], [0, 1, 0]], + ... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]) + >>> m.result() + 0.5 + + >>> m.reset_state() + >>> m.update_state([[0, 0, 1], [0, 1, 0]], + ... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]], + ... sample_weight=[0.7, 0.3]) + >>> m.result() + 0.3 + + Usage with `compile()` API: + + ```python + model.compile(optimizer='sgd', + loss='categorical_crossentropy', + metrics=[keras.metrics.TopKCategoricalAccuracy()]) + ``` + """ + + def __init__(self, k=5, name="top_k_categorical_accuracy", dtype=None): + super().__init__( + fn=top_k_categorical_accuracy, + name=name, + dtype=dtype, + k=k, + ) + self.k = k + # Metric should be maximized during optimization. + self._direction = "up" + + def get_config(self): + return {"name": self.name, "dtype": self.dtype, "k": self.k} + + +@keras_export("keras.metrics.sparse_top_k_categorical_accuracy") +def sparse_top_k_categorical_accuracy( + y_true, y_pred, k=5, from_sorted_ids=False +): + """Computes how often integer targets are in the top `K` predictions. + + Args: + y_true: A tensor of shape `(batch_size)` representing indices or IDs of + true categories. + y_pred: If `from_sorted_ids=False`, a tensor of shape + `(batch_size, num_categories)` containing the scores for each sample + for all possible categories. If `from_sorted_ids=True`, a tensor of + shape `(batch_size, N)` containing indices or IDs of the top `N` + categories in order from highest score to lowest score. + k: (Optional) Number of top elements to look at for computing accuracy. + Defaults to `5`. + from_sorted_ids: (Optional) Whether `y_pred` is sorted category IDs or + scores for all categories (the default). + + Returns: + A tensor with the same shape as `y_true` containing ones where `y_true` + is in the top `k` and zeros elsewhere. + """ + reshape_matches = False + y_pred = ops.convert_to_tensor(y_pred) + y_true_dtype = y_pred.dtype if from_sorted_ids else "int32" + y_true = ops.convert_to_tensor(y_true, dtype=y_true_dtype) + y_true_rank = len(y_true.shape) + y_pred_rank = len(y_pred.shape) + y_true_org_shape = ops.shape(y_true) + + # Flatten y_pred to (batch_size, num_samples) and y_true to (num_samples,) + if (y_true_rank is not None) and (y_pred_rank is not None): + if y_pred_rank > 2: + y_pred = ops.reshape(y_pred, [-1, y_pred.shape[-1]]) + if y_true_rank > 1: + reshape_matches = True + y_true = ops.reshape(y_true, [-1]) + + if from_sorted_ids: + # By slicing the first k items, we assume they are sorted by score. + # Reduce with `any` to count multiple matches only once. + matches = ops.any( + ops.equal(ops.expand_dims(y_true, axis=1), y_pred[:, :k]), axis=1 + ) + else: + matches = ops.in_top_k(y_true, y_pred, k=k) + + matches = ops.cast(matches, dtype=backend.floatx()) + + # returned matches is expected to have same shape as y_true input + if reshape_matches: + matches = ops.reshape(matches, y_true_org_shape) + + return matches + + +@keras_export("keras.metrics.SparseTopKCategoricalAccuracy") +class SparseTopKCategoricalAccuracy(reduction_metrics.MeanMetricWrapper): + """Computes how often integer targets are in the top `K` predictions. + + By default, the arguments expected by `update_state()` are: + - `y_true`: a tensor of shape `(batch_size)` representing indices of true + categories. + - `y_pred`: a tensor of shape `(batch_size, num_categories)` containing the + scores for each sample for all possible categories. + + With `from_sorted_ids=True`, the arguments expected by `update_state` are: + - `y_true`: a tensor of shape `(batch_size)` representing indices or IDs of + true categories. + - `y_pred`: a tensor of shape `(batch_size, N)` containing the indices or + IDs of the top `N` categories sorted in order from highest score to + lowest score. `N` must be greater or equal to `k`. + + The `from_sorted_ids=True` option can be more efficient when the set of + categories is very large and the model has an optimized way to retrieve the + top ones either without scoring or without maintaining the scores for all + the possible categories. + + Args: + k: (Optional) Number of top elements to look at for computing accuracy. + Defaults to `5`. + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + from_sorted_ids: (Optional) When `False`, the default, the tensor passed + in `y_pred` contains the unsorted scores of all possible categories. + When `True`, `y_pred` contains a the indices or IDs for the top + categories. + + Example: + + >>> m = keras.metrics.SparseTopKCategoricalAccuracy(k=1) + >>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]) + >>> m.result() + 0.5 + + >>> m.reset_state() + >>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]], + ... sample_weight=[0.7, 0.3]) + >>> m.result() + 0.3 + + >>> m = keras.metrics.SparseTopKCategoricalAccuracy(k=1, + ... from_sorted_ids=True) + >>> m.update_state([2, 1], [[1, 0, 3], [1, 2, 3]]) + >>> m.result() + 0.5 + + Usage with `compile()` API: + + ```python + model.compile(optimizer='sgd', + loss='sparse_categorical_crossentropy', + metrics=[keras.metrics.SparseTopKCategoricalAccuracy()]) + ``` + """ + + def __init__( + self, + k=5, + name="sparse_top_k_categorical_accuracy", + dtype=None, + from_sorted_ids=False, + ): + super().__init__( + fn=sparse_top_k_categorical_accuracy, + name=name, + dtype=dtype, + k=k, + from_sorted_ids=from_sorted_ids, + ) + self.k = k + self.from_sorted_ids = from_sorted_ids + # Metric should be maximized during optimization. + self._direction = "up" + + def get_config(self): + config = {"name": self.name, "dtype": self.dtype, "k": self.k} + if self.from_sorted_ids: + config["from_sorted_ids"] = True + return config diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/confusion_metrics.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/confusion_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..29f74de61ab2836fcff82aee96c58892a94535f9 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/confusion_metrics.py @@ -0,0 +1,1576 @@ +import numpy as np + +from keras.src import activations +from keras.src import backend +from keras.src import initializers +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.metrics import metrics_utils +from keras.src.metrics.metric import Metric +from keras.src.utils.python_utils import to_list + + +class _ConfusionMatrixConditionCount(Metric): + """Calculates the number of the given confusion matrix condition. + + Args: + confusion_matrix_cond: One of `metrics_utils.ConfusionMatrix` + conditions. + thresholds: (Optional) Defaults to `0.5`. A float value or a python list + / tuple of float threshold values in `[0, 1]`. A threshold is + compared with prediction values to determine the truth value of + predictions (i.e., above the threshold is `True`, below is `False`). + One metric value is generated for each threshold value. + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + """ + + def __init__( + self, confusion_matrix_cond, thresholds=None, name=None, dtype=None + ): + super().__init__(name=name, dtype=dtype) + self._confusion_matrix_cond = confusion_matrix_cond + self.init_thresholds = thresholds + self.thresholds = metrics_utils.parse_init_thresholds( + thresholds, default_threshold=0.5 + ) + self._thresholds_distributed_evenly = ( + metrics_utils.is_evenly_distributed_thresholds(self.thresholds) + ) + self.accumulator = self.add_variable( + shape=(len(self.thresholds),), + initializer=initializers.Zeros(), + name="accumulator", + ) + + def update_state(self, y_true, y_pred, sample_weight=None): + """Accumulates the metric statistics. + + Args: + y_true: The ground truth values. + y_pred: The predicted values. + sample_weight: Optional weighting of each example. Defaults to `1`. + Can be a tensor whose rank is either 0, or the same rank as + `y_true`, and must be broadcastable to `y_true`. + """ + return metrics_utils.update_confusion_matrix_variables( + {self._confusion_matrix_cond: self.accumulator}, + y_true, + y_pred, + thresholds=self.thresholds, + thresholds_distributed_evenly=self._thresholds_distributed_evenly, + sample_weight=sample_weight, + ) + + def result(self): + if len(self.thresholds) == 1: + result = self.accumulator[0] + else: + result = self.accumulator + return backend.convert_to_tensor(result) + + def get_config(self): + config = {"thresholds": self.init_thresholds} + base_config = super().get_config() + return {**base_config, **config} + + +@keras_export("keras.metrics.FalsePositives") +class FalsePositives(_ConfusionMatrixConditionCount): + """Calculates the number of false positives. + + If `sample_weight` is given, calculates the sum of the weights of + false positives. This metric creates one local variable, `accumulator` + that is used to keep track of the number of false positives. + + If `sample_weight` is `None`, weights default to 1. + Use `sample_weight` of 0 to mask values. + + Args: + thresholds: (Optional) Defaults to `0.5`. A float value, or a Python + list/tuple of float threshold values in `[0, 1]`. A threshold is + compared with prediction values to determine the truth value of + predictions (i.e., above the threshold is `True`, below is `False`). + If used with a loss function that sets `from_logits=True` (i.e. no + sigmoid applied to predictions), `thresholds` should be set to 0. + One metric value is generated for each threshold value. + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Examples: + + >>> m = keras.metrics.FalsePositives() + >>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1]) + >>> m.result() + 2.0 + + >>> m.reset_state() + >>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1], sample_weight=[0, 0, 1, 0]) + >>> m.result() + 1.0 + """ + + def __init__(self, thresholds=None, name=None, dtype=None): + super().__init__( + confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_POSITIVES, + thresholds=thresholds, + name=name, + dtype=dtype, + ) + + +@keras_export("keras.metrics.FalseNegatives") +class FalseNegatives(_ConfusionMatrixConditionCount): + """Calculates the number of false negatives. + + If `sample_weight` is given, calculates the sum of the weights of + false negatives. This metric creates one local variable, `accumulator` + that is used to keep track of the number of false negatives. + + If `sample_weight` is `None`, weights default to 1. + Use `sample_weight` of 0 to mask values. + + Args: + thresholds: (Optional) Defaults to `0.5`. A float value, or a Python + list/tuple of float threshold values in `[0, 1]`. A threshold is + compared with prediction values to determine the truth value of + predictions (i.e., above the threshold is `True`, below is `False`). + If used with a loss function that sets `from_logits=True` (i.e. no + sigmoid applied to predictions), `thresholds` should be set to 0. + One metric value is generated for each threshold value. + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Example: + + >>> m = keras.metrics.FalseNegatives() + >>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0]) + >>> m.result() + 2.0 + + >>> m.reset_state() + >>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0], sample_weight=[0, 0, 1, 0]) + >>> m.result() + 1.0 + """ + + def __init__(self, thresholds=None, name=None, dtype=None): + super().__init__( + confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_NEGATIVES, + thresholds=thresholds, + name=name, + dtype=dtype, + ) + + +@keras_export("keras.metrics.TrueNegatives") +class TrueNegatives(_ConfusionMatrixConditionCount): + """Calculates the number of true negatives. + + If `sample_weight` is given, calculates the sum of the weights of + true negatives. This metric creates one local variable, `accumulator` + that is used to keep track of the number of true negatives. + + If `sample_weight` is `None`, weights default to 1. + Use `sample_weight` of 0 to mask values. + + Args: + thresholds: (Optional) Defaults to `0.5`. A float value, or a Python + list/tuple of float threshold values in `[0, 1]`. A threshold is + compared with prediction values to determine the truth value of + predictions (i.e., above the threshold is `True`, below is `False`). + If used with a loss function that sets `from_logits=True` (i.e. no + sigmoid applied to predictions), `thresholds` should be set to 0. + One metric value is generated for each threshold value. + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Example: + + >>> m = keras.metrics.TrueNegatives() + >>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0]) + >>> m.result() + 2.0 + + >>> m.reset_state() + >>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0], sample_weight=[0, 0, 1, 0]) + >>> m.result() + 1.0 + """ + + def __init__(self, thresholds=None, name=None, dtype=None): + super().__init__( + confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_NEGATIVES, + thresholds=thresholds, + name=name, + dtype=dtype, + ) + + +@keras_export("keras.metrics.TruePositives") +class TruePositives(_ConfusionMatrixConditionCount): + """Calculates the number of true positives. + + If `sample_weight` is given, calculates the sum of the weights of + true positives. This metric creates one local variable, `true_positives` + that is used to keep track of the number of true positives. + + If `sample_weight` is `None`, weights default to 1. + Use `sample_weight` of 0 to mask values. + + Args: + thresholds: (Optional) Defaults to `0.5`. A float value, or a Python + list/tuple of float threshold values in `[0, 1]`. A threshold is + compared with prediction values to determine the truth value of + predictions (i.e., above the threshold is `True`, below is `False`). + If used with a loss function that sets `from_logits=True` (i.e. no + sigmoid applied to predictions), `thresholds` should be set to 0. + One metric value is generated for each threshold value. + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Example: + + >>> m = keras.metrics.TruePositives() + >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1]) + >>> m.result() + 2.0 + + >>> m.reset_state() + >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0]) + >>> m.result() + 1.0 + """ + + def __init__(self, thresholds=None, name=None, dtype=None): + super().__init__( + confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_POSITIVES, + thresholds=thresholds, + name=name, + dtype=dtype, + ) + + +@keras_export("keras.metrics.Precision") +class Precision(Metric): + """Computes the precision of the predictions with respect to the labels. + + The metric creates two local variables, `true_positives` and + `false_positives` that are used to compute the precision. This value is + ultimately returned as `precision`, an idempotent operation that simply + divides `true_positives` by the sum of `true_positives` and + `false_positives`. + + If `sample_weight` is `None`, weights default to 1. + Use `sample_weight` of 0 to mask values. + + If `top_k` is set, we'll calculate precision as how often on average a class + among the top-k classes with the highest predicted values of a batch entry + is correct and can be found in the label for that entry. + + If `class_id` is specified, we calculate precision by considering only the + entries in the batch for which `class_id` is above the threshold and/or in + the top-k highest predictions, and computing the fraction of them for which + `class_id` is indeed a correct label. + + Args: + thresholds: (Optional) A float value, or a Python list/tuple of float + threshold values in `[0, 1]`. A threshold is compared with + prediction values to determine the truth value of predictions (i.e., + above the threshold is `True`, below is `False`). If used with a + loss function that sets `from_logits=True` (i.e. no sigmoid applied + to predictions), `thresholds` should be set to 0. One metric value + is generated for each threshold value. If neither `thresholds` nor + `top_k` are set, the default is to calculate precision with + `thresholds=0.5`. + top_k: (Optional) Unset by default. An int value specifying the top-k + predictions to consider when calculating precision. + class_id: (Optional) Integer class ID for which we want binary metrics. + This must be in the half-open interval `[0, num_classes)`, where + `num_classes` is the last dimension of predictions. + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Example: + + >>> m = keras.metrics.Precision() + >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1]) + >>> m.result() + 0.6666667 + + >>> m.reset_state() + >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0]) + >>> m.result() + 1.0 + + >>> # With top_k=2, it will calculate precision over y_true[:2] + >>> # and y_pred[:2] + >>> m = keras.metrics.Precision(top_k=2) + >>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1]) + >>> m.result() + 0.0 + + >>> # With top_k=4, it will calculate precision over y_true[:4] + >>> # and y_pred[:4] + >>> m = keras.metrics.Precision(top_k=4) + >>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1]) + >>> m.result() + 0.5 + + Usage with `compile()` API: + + ```python + model.compile(optimizer='sgd', + loss='binary_crossentropy', + metrics=[keras.metrics.Precision()]) + ``` + + Usage with a loss with `from_logits=True`: + + ```python + model.compile(optimizer='adam', + loss=keras.losses.BinaryCrossentropy(from_logits=True), + metrics=[keras.metrics.Precision(thresholds=0)]) + ``` + """ + + def __init__( + self, thresholds=None, top_k=None, class_id=None, name=None, dtype=None + ): + super().__init__(name=name, dtype=dtype) + # Metric should be maximized during optimization. + self._direction = "up" + + self.init_thresholds = thresholds + self.top_k = top_k + self.class_id = class_id + + default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF + self.thresholds = metrics_utils.parse_init_thresholds( + thresholds, default_threshold=default_threshold + ) + self._thresholds_distributed_evenly = ( + metrics_utils.is_evenly_distributed_thresholds(self.thresholds) + ) + self.true_positives = self.add_variable( + shape=(len(self.thresholds),), + initializer=initializers.Zeros(), + name="true_positives", + ) + self.false_positives = self.add_variable( + shape=(len(self.thresholds),), + initializer=initializers.Zeros(), + name="false_positives", + ) + + def update_state(self, y_true, y_pred, sample_weight=None): + """Accumulates true positive and false positive statistics. + + Args: + y_true: The ground truth values, with the same dimensions as + `y_pred`. Will be cast to `bool`. + y_pred: The predicted values. Each element must be in the range + `[0, 1]`. + sample_weight: Optional weighting of each example. Defaults to `1`. + Can be a tensor whose rank is either 0, or the same rank as + `y_true`, and must be broadcastable to `y_true`. + """ + metrics_utils.update_confusion_matrix_variables( + { + metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, # noqa: E501 + metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, # noqa: E501 + }, + y_true, + y_pred, + thresholds=self.thresholds, + thresholds_distributed_evenly=self._thresholds_distributed_evenly, + top_k=self.top_k, + class_id=self.class_id, + sample_weight=sample_weight, + ) + + def result(self): + result = ops.divide_no_nan( + self.true_positives, + ops.add(self.true_positives, self.false_positives), + ) + return result[0] if len(self.thresholds) == 1 else result + + def reset_state(self): + num_thresholds = len(to_list(self.thresholds)) + self.true_positives.assign(ops.zeros((num_thresholds,))) + self.false_positives.assign(ops.zeros((num_thresholds,))) + + def get_config(self): + config = { + "thresholds": self.init_thresholds, + "top_k": self.top_k, + "class_id": self.class_id, + } + base_config = super().get_config() + return {**base_config, **config} + + +@keras_export("keras.metrics.Recall") +class Recall(Metric): + """Computes the recall of the predictions with respect to the labels. + + This metric creates two local variables, `true_positives` and + `false_negatives`, that are used to compute the recall. This value is + ultimately returned as `recall`, an idempotent operation that simply divides + `true_positives` by the sum of `true_positives` and `false_negatives`. + + If `sample_weight` is `None`, weights default to 1. + Use `sample_weight` of 0 to mask values. + + If `top_k` is set, recall will be computed as how often on average a class + among the labels of a batch entry is in the top-k predictions. + + If `class_id` is specified, we calculate recall by considering only the + entries in the batch for which `class_id` is in the label, and computing the + fraction of them for which `class_id` is above the threshold and/or in the + top-k predictions. + + Args: + thresholds: (Optional) A float value, or a Python list/tuple of float + threshold values in `[0, 1]`. A threshold is compared with + prediction values to determine the truth value of predictions (i.e., + above the threshold is `True`, below is `False`). If used with a + loss function that sets `from_logits=True` (i.e. no sigmoid + applied to predictions), `thresholds` should be set to 0. + One metric value is generated for each threshold value. + If neither `thresholds` nor `top_k` are set, + the default is to calculate recall with `thresholds=0.5`. + top_k: (Optional) Unset by default. An int value specifying the top-k + predictions to consider when calculating recall. + class_id: (Optional) Integer class ID for which we want binary metrics. + This must be in the half-open interval `[0, num_classes)`, where + `num_classes` is the last dimension of predictions. + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Example: + + >>> m = keras.metrics.Recall() + >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1]) + >>> m.result() + 0.6666667 + + >>> m.reset_state() + >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0]) + >>> m.result() + 1.0 + + Usage with `compile()` API: + + ```python + model.compile(optimizer='sgd', + loss='binary_crossentropy', + metrics=[keras.metrics.Recall()]) + ``` + + Usage with a loss with `from_logits=True`: + + ```python + model.compile(optimizer='adam', + loss=keras.losses.BinaryCrossentropy(from_logits=True), + metrics=[keras.metrics.Recall(thresholds=0)]) + ``` + """ + + def __init__( + self, thresholds=None, top_k=None, class_id=None, name=None, dtype=None + ): + super().__init__(name=name, dtype=dtype) + # Metric should be maximized during optimization. + self._direction = "up" + + self.init_thresholds = thresholds + self.top_k = top_k + self.class_id = class_id + + default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF + self.thresholds = metrics_utils.parse_init_thresholds( + thresholds, default_threshold=default_threshold + ) + self._thresholds_distributed_evenly = ( + metrics_utils.is_evenly_distributed_thresholds(self.thresholds) + ) + self.true_positives = self.add_variable( + shape=(len(self.thresholds),), + initializer=initializers.Zeros(), + name="true_positives", + ) + self.false_negatives = self.add_variable( + shape=(len(self.thresholds),), + initializer=initializers.Zeros(), + name="false_negatives", + ) + + def update_state(self, y_true, y_pred, sample_weight=None): + """Accumulates true positive and false negative statistics. + + Args: + y_true: The ground truth values, with the same dimensions as + `y_pred`. Will be cast to `bool`. + y_pred: The predicted values. Each element must be in the range + `[0, 1]`. + sample_weight: Optional weighting of each example. Defaults to `1`. + Can be a tensor whose rank is either 0, or the same rank as + `y_true`, and must be broadcastable to `y_true`. + """ + metrics_utils.update_confusion_matrix_variables( + { + metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, # noqa: E501 + metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives, # noqa: E501 + }, + y_true, + y_pred, + thresholds=self.thresholds, + thresholds_distributed_evenly=self._thresholds_distributed_evenly, + top_k=self.top_k, + class_id=self.class_id, + sample_weight=sample_weight, + ) + + def result(self): + result = ops.divide_no_nan( + self.true_positives, + ops.add(self.true_positives, self.false_negatives), + ) + return result[0] if len(self.thresholds) == 1 else result + + def reset_state(self): + num_thresholds = len(to_list(self.thresholds)) + self.true_positives.assign(ops.zeros((num_thresholds,))) + self.false_negatives.assign(ops.zeros((num_thresholds,))) + + def get_config(self): + config = { + "thresholds": self.init_thresholds, + "top_k": self.top_k, + "class_id": self.class_id, + } + base_config = super().get_config() + return {**base_config, **config} + + +class SensitivitySpecificityBase(Metric): + """Abstract base class for computing sensitivity and specificity. + + For additional information about specificity and sensitivity, see + [the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity). + """ + + def __init__( + self, value, num_thresholds=200, class_id=None, name=None, dtype=None + ): + super().__init__(name=name, dtype=dtype) + # Metric should be maximized during optimization. + self._direction = "up" + + if num_thresholds <= 0: + raise ValueError( + "Argument `num_thresholds` must be an integer > 0. " + f"Received: num_thresholds={num_thresholds}" + ) + self.value = value + self.class_id = class_id + + # Compute `num_thresholds` thresholds in [0, 1] + if num_thresholds == 1: + self.thresholds = [0.5] + self._thresholds_distributed_evenly = False + else: + thresholds = [ + (i + 1) * 1.0 / (num_thresholds - 1) + for i in range(num_thresholds - 2) + ] + self.thresholds = [0.0] + thresholds + [1.0] + self._thresholds_distributed_evenly = True + + self.true_positives = self.add_variable( + shape=(len(self.thresholds),), + initializer=initializers.Zeros(), + name="true_positives", + ) + self.false_positives = self.add_variable( + shape=(len(self.thresholds),), + initializer=initializers.Zeros(), + name="false_positives", + ) + self.true_negatives = self.add_variable( + shape=(len(self.thresholds),), + initializer=initializers.Zeros(), + name="true_negatives", + ) + self.false_negatives = self.add_variable( + shape=(len(self.thresholds),), + initializer=initializers.Zeros(), + name="false_negatives", + ) + + def update_state(self, y_true, y_pred, sample_weight=None): + """Accumulates confusion matrix statistics. + + Args: + y_true: The ground truth values. + y_pred: The predicted values. + sample_weight: Optional weighting of each example. Defaults to `1`. + Can be a tensor whose rank is either 0, or the same rank as + `y_true`, and must be broadcastable to `y_true`. + """ + metrics_utils.update_confusion_matrix_variables( + { + metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, # noqa: E501 + metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, # noqa: E501 + metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, # noqa: E501 + metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives, # noqa: E501 + }, + y_true, + y_pred, + thresholds=self.thresholds, + thresholds_distributed_evenly=self._thresholds_distributed_evenly, + class_id=self.class_id, + sample_weight=sample_weight, + ) + + def reset_state(self): + num_thresholds = len(self.thresholds) + self.true_positives.assign(ops.zeros((num_thresholds,))) + self.false_positives.assign(ops.zeros((num_thresholds,))) + self.true_negatives.assign(ops.zeros((num_thresholds,))) + self.false_negatives.assign(ops.zeros((num_thresholds,))) + + def get_config(self): + config = {"class_id": self.class_id} + base_config = super().get_config() + return {**base_config, **config} + + def _find_max_under_constraint(self, constrained, dependent, predicate): + """Returns the maximum of dependent_statistic that satisfies the + constraint. + + Args: + constrained: Over these values the constraint is specified. A rank-1 + tensor. + dependent: From these values the maximum that satiesfies the + constraint is selected. Values in this tensor and in + `constrained` are linked by having the same threshold at each + position, hence this tensor must have the same shape. + predicate: A binary boolean functor to be applied to arguments + `constrained` and `self.value`, e.g. `ops.greater`. + + Returns: + maximal dependent value, if no value satisfies the constraint 0.0. + """ + feasible = ops.nonzero(predicate(constrained, self.value)) + feasible_exists = ops.greater(ops.size(feasible), 0) + max_dependent = ops.max(ops.take(dependent, feasible), initial=0) + + return ops.where(feasible_exists, max_dependent, 0.0) + + +@keras_export("keras.metrics.SensitivityAtSpecificity") +class SensitivityAtSpecificity(SensitivitySpecificityBase): + """Computes best sensitivity where specificity is >= specified value. + + `Sensitivity` measures the proportion of actual positives that are correctly + identified as such `(tp / (tp + fn))`. + `Specificity` measures the proportion of actual negatives that are correctly + identified as such `(tn / (tn + fp))`. + + This metric creates four local variables, `true_positives`, + `true_negatives`, `false_positives` and `false_negatives` that are used to + compute the sensitivity at the given specificity. The threshold for the + given specificity value is computed and used to evaluate the corresponding + sensitivity. + + If `sample_weight` is `None`, weights default to 1. + Use `sample_weight` of 0 to mask values. + + If `class_id` is specified, we calculate precision by considering only the + entries in the batch for which `class_id` is above the threshold + predictions, and computing the fraction of them for which `class_id` is + indeed a correct label. + + For additional information about specificity and sensitivity, see + [the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity). + + Args: + specificity: A scalar value in range `[0, 1]`. + num_thresholds: (Optional) Defaults to 200. The number of thresholds to + use for matching the given specificity. + class_id: (Optional) Integer class ID for which we want binary metrics. + This must be in the half-open interval `[0, num_classes)`, where + `num_classes` is the last dimension of predictions. + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Example: + + >>> m = keras.metrics.SensitivityAtSpecificity(0.5) + >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8]) + >>> m.result() + 0.5 + + >>> m.reset_state() + >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8], + ... sample_weight=[1, 1, 2, 2, 1]) + >>> m.result() + 0.333333 + + Usage with `compile()` API: + + ```python + model.compile( + optimizer='sgd', + loss='binary_crossentropy', + metrics=[keras.metrics.SensitivityAtSpecificity()]) + ``` + """ + + def __init__( + self, + specificity, + num_thresholds=200, + class_id=None, + name=None, + dtype=None, + ): + if specificity < 0 or specificity > 1: + raise ValueError( + "Argument `specificity` must be in the range [0, 1]. " + f"Received: specificity={specificity}" + ) + self.specificity = specificity + self.num_thresholds = num_thresholds + super().__init__( + specificity, + num_thresholds=num_thresholds, + class_id=class_id, + name=name, + dtype=dtype, + ) + + def result(self): + sensitivities = ops.divide_no_nan( + self.true_positives, + ops.add(self.true_positives, self.false_negatives), + ) + specificities = ops.divide_no_nan( + self.true_negatives, + ops.add(self.true_negatives, self.false_positives), + ) + return self._find_max_under_constraint( + specificities, sensitivities, ops.greater_equal + ) + + def get_config(self): + config = { + "num_thresholds": self.num_thresholds, + "specificity": self.specificity, + } + base_config = super().get_config() + return {**base_config, **config} + + +@keras_export("keras.metrics.SpecificityAtSensitivity") +class SpecificityAtSensitivity(SensitivitySpecificityBase): + """Computes best specificity where sensitivity is >= specified value. + + `Sensitivity` measures the proportion of actual positives that are correctly + identified as such `(tp / (tp + fn))`. + `Specificity` measures the proportion of actual negatives that are correctly + identified as such `(tn / (tn + fp))`. + + This metric creates four local variables, `true_positives`, + `true_negatives`, `false_positives` and `false_negatives` that are used to + compute the specificity at the given sensitivity. The threshold for the + given sensitivity value is computed and used to evaluate the corresponding + specificity. + + If `sample_weight` is `None`, weights default to 1. + Use `sample_weight` of 0 to mask values. + + If `class_id` is specified, we calculate precision by considering only the + entries in the batch for which `class_id` is above the threshold + predictions, and computing the fraction of them for which `class_id` is + indeed a correct label. + + For additional information about specificity and sensitivity, see + [the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity). + + Args: + sensitivity: A scalar value in range `[0, 1]`. + num_thresholds: (Optional) Defaults to 200. The number of thresholds to + use for matching the given sensitivity. + class_id: (Optional) Integer class ID for which we want binary metrics. + This must be in the half-open interval `[0, num_classes)`, where + `num_classes` is the last dimension of predictions. + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Example: + + >>> m = keras.metrics.SpecificityAtSensitivity(0.5) + >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8]) + >>> m.result() + 0.66666667 + + >>> m.reset_state() + >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8], + ... sample_weight=[1, 1, 2, 2, 2]) + >>> m.result() + 0.5 + + Usage with `compile()` API: + + ```python + model.compile( + optimizer='sgd', + loss='binary_crossentropy', + metrics=[keras.metrics.SpecificityAtSensitivity()]) + ``` + """ + + def __init__( + self, + sensitivity, + num_thresholds=200, + class_id=None, + name=None, + dtype=None, + ): + if sensitivity < 0 or sensitivity > 1: + raise ValueError( + "Argument `sensitivity` must be in the range [0, 1]. " + f"Received: sensitivity={sensitivity}" + ) + self.sensitivity = sensitivity + self.num_thresholds = num_thresholds + super().__init__( + sensitivity, + num_thresholds=num_thresholds, + class_id=class_id, + name=name, + dtype=dtype, + ) + + def result(self): + sensitivities = ops.divide_no_nan( + self.true_positives, + ops.add(self.true_positives, self.false_negatives), + ) + specificities = ops.divide_no_nan( + self.true_negatives, + ops.add(self.true_negatives, self.false_positives), + ) + return self._find_max_under_constraint( + sensitivities, specificities, ops.greater_equal + ) + + def get_config(self): + config = { + "num_thresholds": self.num_thresholds, + "sensitivity": self.sensitivity, + } + base_config = super().get_config() + return {**base_config, **config} + + +@keras_export("keras.metrics.PrecisionAtRecall") +class PrecisionAtRecall(SensitivitySpecificityBase): + """Computes best precision where recall is >= specified value. + + This metric creates four local variables, `true_positives`, + `true_negatives`, `false_positives` and `false_negatives` that are used to + compute the precision at the given recall. The threshold for the given + recall value is computed and used to evaluate the corresponding precision. + + If `sample_weight` is `None`, weights default to 1. + Use `sample_weight` of 0 to mask values. + + If `class_id` is specified, we calculate precision by considering only the + entries in the batch for which `class_id` is above the threshold + predictions, and computing the fraction of them for which `class_id` is + indeed a correct label. + + Args: + recall: A scalar value in range `[0, 1]`. + num_thresholds: (Optional) Defaults to 200. The number of thresholds to + use for matching the given recall. + class_id: (Optional) Integer class ID for which we want binary metrics. + This must be in the half-open interval `[0, num_classes)`, where + `num_classes` is the last dimension of predictions. + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Example: + + >>> m = keras.metrics.PrecisionAtRecall(0.5) + >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8]) + >>> m.result() + 0.5 + + >>> m.reset_state() + >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8], + ... sample_weight=[2, 2, 2, 1, 1]) + >>> m.result() + 0.33333333 + + Usage with `compile()` API: + + ```python + model.compile( + optimizer='sgd', + loss='binary_crossentropy', + metrics=[keras.metrics.PrecisionAtRecall(recall=0.8)]) + ``` + """ + + def __init__( + self, recall, num_thresholds=200, class_id=None, name=None, dtype=None + ): + if recall < 0 or recall > 1: + raise ValueError( + "Argument `recall` must be in the range [0, 1]. " + f"Received: recall={recall}" + ) + self.recall = recall + self.num_thresholds = num_thresholds + super().__init__( + value=recall, + num_thresholds=num_thresholds, + class_id=class_id, + name=name, + dtype=dtype, + ) + + def result(self): + recalls = ops.divide_no_nan( + self.true_positives, + ops.add(self.true_positives, self.false_negatives), + ) + precisions = ops.divide_no_nan( + self.true_positives, + ops.add(self.true_positives, self.false_positives), + ) + return self._find_max_under_constraint( + recalls, precisions, ops.greater_equal + ) + + def get_config(self): + config = {"num_thresholds": self.num_thresholds, "recall": self.recall} + base_config = super().get_config() + return {**base_config, **config} + + +@keras_export("keras.metrics.RecallAtPrecision") +class RecallAtPrecision(SensitivitySpecificityBase): + """Computes best recall where precision is >= specified value. + + For a given score-label-distribution the required precision might not + be achievable, in this case 0.0 is returned as recall. + + This metric creates four local variables, `true_positives`, + `true_negatives`, `false_positives` and `false_negatives` that are used to + compute the recall at the given precision. The threshold for the given + precision value is computed and used to evaluate the corresponding recall. + + If `sample_weight` is `None`, weights default to 1. + Use `sample_weight` of 0 to mask values. + + If `class_id` is specified, we calculate precision by considering only the + entries in the batch for which `class_id` is above the threshold + predictions, and computing the fraction of them for which `class_id` is + indeed a correct label. + + Args: + precision: A scalar value in range `[0, 1]`. + num_thresholds: (Optional) Defaults to 200. The number of thresholds + to use for matching the given precision. + class_id: (Optional) Integer class ID for which we want binary metrics. + This must be in the half-open interval `[0, num_classes)`, where + `num_classes` is the last dimension of predictions. + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Example: + + >>> m = keras.metrics.RecallAtPrecision(0.8) + >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9]) + >>> m.result() + 0.5 + + >>> m.reset_state() + >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9], + ... sample_weight=[1, 0, 0, 1]) + >>> m.result() + 1.0 + + Usage with `compile()` API: + + ```python + model.compile( + optimizer='sgd', + loss='binary_crossentropy', + metrics=[keras.metrics.RecallAtPrecision(precision=0.8)]) + ``` + """ + + def __init__( + self, + precision, + num_thresholds=200, + class_id=None, + name=None, + dtype=None, + ): + if precision < 0 or precision > 1: + raise ValueError( + "Argument `precision` must be in the range [0, 1]. " + f"Received: precision={precision}" + ) + self.precision = precision + self.num_thresholds = num_thresholds + super().__init__( + value=precision, + num_thresholds=num_thresholds, + class_id=class_id, + name=name, + dtype=dtype, + ) + + def result(self): + recalls = ops.divide_no_nan( + self.true_positives, + ops.add(self.true_positives, self.false_negatives), + ) + precisions = ops.divide_no_nan( + self.true_positives, + ops.add(self.true_positives, self.false_positives), + ) + return self._find_max_under_constraint( + precisions, recalls, ops.greater_equal + ) + + def get_config(self): + config = { + "num_thresholds": self.num_thresholds, + "precision": self.precision, + } + base_config = super().get_config() + return {**base_config, **config} + + +@keras_export("keras.metrics.AUC") +class AUC(Metric): + """Approximates the AUC (Area under the curve) of the ROC or PR curves. + + The AUC (Area under the curve) of the ROC (Receiver operating + characteristic; default) or PR (Precision Recall) curves are quality + measures of binary classifiers. Unlike the accuracy, and like cross-entropy + losses, ROC-AUC and PR-AUC evaluate all the operational points of a model. + + This class approximates AUCs using a Riemann sum. During the metric + accumulation phrase, predictions are accumulated within predefined buckets + by value. The AUC is then computed by interpolating per-bucket averages. + These buckets define the evaluated operational points. + + This metric creates four local variables, `true_positives`, + `true_negatives`, `false_positives` and `false_negatives` that are used to + compute the AUC. To discretize the AUC curve, a linearly spaced set of + thresholds is used to compute pairs of recall and precision values. The area + under the ROC-curve is therefore computed using the height of the recall + values by the false positive rate, while the area under the PR-curve is the + computed using the height of the precision values by the recall. + + This value is ultimately returned as `auc`, an idempotent operation that + computes the area under a discretized curve of precision versus recall + values (computed using the aforementioned variables). The `num_thresholds` + variable controls the degree of discretization with larger numbers of + thresholds more closely approximating the true AUC. The quality of the + approximation may vary dramatically depending on `num_thresholds`. The + `thresholds` parameter can be used to manually specify thresholds which + split the predictions more evenly. + + For a best approximation of the real AUC, `predictions` should be + distributed approximately uniformly in the range `[0, 1]` (if + `from_logits=False`). The quality of the AUC approximation may be poor if + this is not the case. Setting `summation_method` to 'minoring' or 'majoring' + can help quantify the error in the approximation by providing lower or upper + bound estimate of the AUC. + + If `sample_weight` is `None`, weights default to 1. + Use `sample_weight` of 0 to mask values. + + Args: + num_thresholds: (Optional) The number of thresholds to + use when discretizing the roc curve. Values must be > 1. + Defaults to `200`. + curve: (Optional) Specifies the name of the curve to be computed, + `'ROC'` (default) or `'PR'` for the Precision-Recall-curve. + summation_method: (Optional) Specifies the [Riemann summation method]( + https://en.wikipedia.org/wiki/Riemann_sum) used. + 'interpolation' (default) applies mid-point summation scheme for + `ROC`. For PR-AUC, interpolates (true/false) positives but not + the ratio that is precision (see Davis & Goadrich 2006 for + details); 'minoring' applies left summation for increasing + intervals and right summation for decreasing intervals; 'majoring' + does the opposite. + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + thresholds: (Optional) A list of floating point values to use as the + thresholds for discretizing the curve. If set, the `num_thresholds` + parameter is ignored. Values should be in `[0, 1]`. Endpoint + thresholds equal to {`-epsilon`, `1+epsilon`} for a small positive + epsilon value will be automatically included with these to correctly + handle predictions equal to exactly 0 or 1. + multi_label: boolean indicating whether multilabel data should be + treated as such, wherein AUC is computed separately for each label + and then averaged across labels, or (when `False`) if the data + should be flattened into a single label before AUC computation. In + the latter case, when multilabel data is passed to AUC, each + label-prediction pair is treated as an individual data point. Should + be set to `False` for multi-class data. + num_labels: (Optional) The number of labels, used when `multi_label` is + True. If `num_labels` is not specified, then state variables get + created on the first call to `update_state`. + label_weights: (Optional) list, array, or tensor of non-negative weights + used to compute AUCs for multilabel data. When `multi_label` is + True, the weights are applied to the individual label AUCs when they + are averaged to produce the multi-label AUC. When it's False, they + are used to weight the individual label predictions in computing the + confusion matrix on the flattened data. Note that this is unlike + `class_weights` in that `class_weights` weights the example + depending on the value of its label, whereas `label_weights` depends + only on the index of that label before flattening; therefore + `label_weights` should not be used for multi-class data. + from_logits: boolean indicating whether the predictions (`y_pred` in + `update_state`) are probabilities or sigmoid logits. As a rule of thumb, + when using a keras loss, the `from_logits` constructor argument of the + loss should match the AUC `from_logits` constructor argument. + + Example: + + >>> m = keras.metrics.AUC(num_thresholds=3) + >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9]) + >>> # threshold values are [0 - 1e-7, 0.5, 1 + 1e-7] + >>> # tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2] + >>> # tp_rate = recall = [1, 0.5, 0], fp_rate = [1, 0, 0] + >>> # auc = ((((1 + 0.5) / 2) * (1 - 0)) + (((0.5 + 0) / 2) * (0 - 0))) + >>> # = 0.75 + >>> m.result() + 0.75 + + >>> m.reset_state() + >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9], + ... sample_weight=[1, 0, 0, 1]) + >>> m.result() + 1.0 + + Usage with `compile()` API: + + ```python + # Reports the AUC of a model outputting a probability. + model.compile(optimizer='sgd', + loss=keras.losses.BinaryCrossentropy(), + metrics=[keras.metrics.AUC()]) + + # Reports the AUC of a model outputting a logit. + model.compile(optimizer='sgd', + loss=keras.losses.BinaryCrossentropy(from_logits=True), + metrics=[keras.metrics.AUC(from_logits=True)]) + ``` + """ + + def __init__( + self, + num_thresholds=200, + curve="ROC", + summation_method="interpolation", + name=None, + dtype=None, + thresholds=None, + multi_label=False, + num_labels=None, + label_weights=None, + from_logits=False, + ): + # Metric should be maximized during optimization. + self._direction = "up" + + # Validate configurations. + if isinstance(curve, metrics_utils.AUCCurve) and curve not in list( + metrics_utils.AUCCurve + ): + raise ValueError( + f'Invalid `curve` argument value "{curve}". ' + f"Expected one of: {list(metrics_utils.AUCCurve)}" + ) + if isinstance( + summation_method, metrics_utils.AUCSummationMethod + ) and summation_method not in list(metrics_utils.AUCSummationMethod): + raise ValueError( + "Invalid `summation_method` " + f'argument value "{summation_method}". ' + f"Expected one of: {list(metrics_utils.AUCSummationMethod)}" + ) + + # Update properties. + self._init_from_thresholds = thresholds is not None + if thresholds is not None: + # If specified, use the supplied thresholds. + self.num_thresholds = len(thresholds) + 2 + thresholds = sorted(thresholds) + self._thresholds_distributed_evenly = ( + metrics_utils.is_evenly_distributed_thresholds( + np.array([0.0] + thresholds + [1.0]) + ) + ) + else: + if num_thresholds <= 1: + raise ValueError( + "Argument `num_thresholds` must be an integer > 1. " + f"Received: num_thresholds={num_thresholds}" + ) + + # Otherwise, linearly interpolate (num_thresholds - 2) thresholds in + # (0, 1). + self.num_thresholds = num_thresholds + thresholds = [ + (i + 1) * 1.0 / (num_thresholds - 1) + for i in range(num_thresholds - 2) + ] + self._thresholds_distributed_evenly = True + + # Add an endpoint "threshold" below zero and above one for either + # threshold method to account for floating point imprecisions. + self._thresholds = np.array( + [0.0 - backend.epsilon()] + thresholds + [1.0 + backend.epsilon()] + ) + + if isinstance(curve, metrics_utils.AUCCurve): + self.curve = curve + else: + self.curve = metrics_utils.AUCCurve.from_str(curve) + if isinstance(summation_method, metrics_utils.AUCSummationMethod): + self.summation_method = summation_method + else: + self.summation_method = metrics_utils.AUCSummationMethod.from_str( + summation_method + ) + super().__init__(name=name, dtype=dtype) + + # Handle multilabel arguments. + self.multi_label = multi_label + self.num_labels = num_labels + if label_weights is not None: + label_weights = ops.array(label_weights, dtype=self.dtype) + self.label_weights = label_weights + + else: + self.label_weights = None + + self._from_logits = from_logits + + self._built = False + if self.multi_label: + if num_labels: + shape = [None, num_labels] + self._build(shape) + else: + if num_labels: + raise ValueError( + "`num_labels` is needed only when `multi_label` is True." + ) + self._build(None) + + @property + def thresholds(self): + """The thresholds used for evaluating AUC.""" + return list(self._thresholds) + + def _build(self, shape): + """Initialize TP, FP, TN, and FN tensors, given the shape of the + data.""" + if self.multi_label: + if len(shape) != 2: + raise ValueError( + "`y_pred` must have rank 2 when `multi_label=True`. " + f"Found rank {len(shape)}. " + f"Full shape received for `y_pred`: {shape}" + ) + self._num_labels = shape[1] + variable_shape = [self.num_thresholds, self._num_labels] + else: + variable_shape = [self.num_thresholds] + + self._build_input_shape = shape + # Create metric variables + self.true_positives = self.add_variable( + shape=variable_shape, + initializer=initializers.Zeros(), + name="true_positives", + ) + self.false_positives = self.add_variable( + shape=variable_shape, + initializer=initializers.Zeros(), + name="false_positives", + ) + self.true_negatives = self.add_variable( + shape=variable_shape, + initializer=initializers.Zeros(), + name="true_negatives", + ) + self.false_negatives = self.add_variable( + shape=variable_shape, + initializer=initializers.Zeros(), + name="false_negatives", + ) + + self._built = True + + def update_state(self, y_true, y_pred, sample_weight=None): + """Accumulates confusion matrix statistics. + + Args: + y_true: The ground truth values. + y_pred: The predicted values. + sample_weight: Optional weighting of each example. Can + be a tensor whose rank is either 0, or the same rank as + `y_true`, and must be broadcastable to `y_true`. Defaults to + `1`. + """ + if not self._built: + self._build(y_pred.shape) + + if self.multi_label or (self.label_weights is not None): + # y_true should have shape (number of examples, number of labels). + shapes = [(y_true, ("N", "L"))] + if self.multi_label: + # TP, TN, FP, and FN should all have shape + # (number of thresholds, number of labels). + shapes.extend( + [ + (self.true_positives, ("T", "L")), + (self.true_negatives, ("T", "L")), + (self.false_positives, ("T", "L")), + (self.false_negatives, ("T", "L")), + ] + ) + if self.label_weights is not None: + # label_weights should be of length equal to the number of + # labels. + shapes.append((self.label_weights, ("L",))) + + # Only forward label_weights to update_confusion_matrix_variables when + # multi_label is False. Otherwise the averaging of individual label AUCs + # is handled in AUC.result + label_weights = None if self.multi_label else self.label_weights + + if self._from_logits: + y_pred = activations.sigmoid(y_pred) + + metrics_utils.update_confusion_matrix_variables( + { + metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, # noqa: E501 + metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, # noqa: E501 + metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, # noqa: E501 + metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives, # noqa: E501 + }, + y_true, + y_pred, + self._thresholds, + thresholds_distributed_evenly=self._thresholds_distributed_evenly, + sample_weight=sample_weight, + multi_label=self.multi_label, + label_weights=label_weights, + ) + + def interpolate_pr_auc(self): + """Interpolation formula inspired by section 4 of Davis & Goadrich 2006. + + https://www.biostat.wisc.edu/~page/rocpr.pdf + + Note here we derive & use a closed formula not present in the paper + as follows: + + Precision = TP / (TP + FP) = TP / P + + Modeling all of TP (true positive), FP (false positive) and their sum + P = TP + FP (predicted positive) as varying linearly within each + interval [A, B] between successive thresholds, we get + + Precision slope = dTP / dP + = (TP_B - TP_A) / (P_B - P_A) + = (TP - TP_A) / (P - P_A) + Precision = (TP_A + slope * (P - P_A)) / P + + The area within the interval is (slope / total_pos_weight) times + + int_A^B{Precision.dP} = int_A^B{(TP_A + slope * (P - P_A)) * dP / P} + int_A^B{Precision.dP} = int_A^B{slope * dP + intercept * dP / P} + + where intercept = TP_A - slope * P_A = TP_B - slope * P_B, resulting in + + int_A^B{Precision.dP} = TP_B - TP_A + intercept * log(P_B / P_A) + + Bringing back the factor (slope / total_pos_weight) we'd put aside, we + get + + slope * [dTP + intercept * log(P_B / P_A)] / total_pos_weight + + where dTP == TP_B - TP_A. + + Note that when P_A == 0 the above calculation simplifies into + + int_A^B{Precision.dTP} = int_A^B{slope * dTP} + = slope * (TP_B - TP_A) + + which is really equivalent to imputing constant precision throughout the + first bucket having >0 true positives. + + Returns: + pr_auc: an approximation of the area under the P-R curve. + """ + + dtp = ops.subtract( + self.true_positives[: self.num_thresholds - 1], + self.true_positives[1:], + ) + p = ops.add(self.true_positives, self.false_positives) + dp = ops.subtract(p[: self.num_thresholds - 1], p[1:]) + prec_slope = ops.divide_no_nan(dtp, ops.maximum(dp, 0)) + intercept = ops.subtract( + self.true_positives[1:], ops.multiply(prec_slope, p[1:]) + ) + + safe_p_ratio = ops.where( + ops.logical_and(p[: self.num_thresholds - 1] > 0, p[1:] > 0), + ops.divide_no_nan( + p[: self.num_thresholds - 1], ops.maximum(p[1:], 0) + ), + ops.ones_like(p[1:]), + ) + + pr_auc_increment = ops.divide_no_nan( + ops.multiply( + prec_slope, + (ops.add(dtp, ops.multiply(intercept, ops.log(safe_p_ratio)))), + ), + ops.maximum( + ops.add(self.true_positives[1:], self.false_negatives[1:]), 0 + ), + ) + + if self.multi_label: + by_label_auc = ops.sum(pr_auc_increment, axis=0) + if self.label_weights is None: + # Evenly weighted average of the label AUCs. + return ops.mean(by_label_auc) + else: + # Weighted average of the label AUCs. + return ops.divide_no_nan( + ops.sum(ops.multiply(by_label_auc, self.label_weights)), + ops.sum(self.label_weights), + ) + else: + return ops.sum(pr_auc_increment) + + def result(self): + if ( + self.curve == metrics_utils.AUCCurve.PR + and self.summation_method + == metrics_utils.AUCSummationMethod.INTERPOLATION + ): + # This use case is different and is handled separately. + return self.interpolate_pr_auc() + + # Set `x` and `y` values for the curves based on `curve` config. + recall = ops.divide_no_nan( + self.true_positives, + ops.add(self.true_positives, self.false_negatives), + ) + if self.curve == metrics_utils.AUCCurve.ROC: + fp_rate = ops.divide_no_nan( + self.false_positives, + ops.add(self.false_positives, self.true_negatives), + ) + x = fp_rate + y = recall + else: # curve == 'PR'. + precision = ops.divide_no_nan( + self.true_positives, + ops.add(self.true_positives, self.false_positives), + ) + x = recall + y = precision + + # Find the rectangle heights based on `summation_method`. + if ( + self.summation_method + == metrics_utils.AUCSummationMethod.INTERPOLATION + ): + # Note: the case ('PR', 'interpolation') has been handled above. + heights = ops.divide( + ops.add(y[: self.num_thresholds - 1], y[1:]), 2.0 + ) + elif self.summation_method == metrics_utils.AUCSummationMethod.MINORING: + heights = ops.minimum(y[: self.num_thresholds - 1], y[1:]) + # self.summation_method = metrics_utils.AUCSummationMethod.MAJORING: + else: + heights = ops.maximum(y[: self.num_thresholds - 1], y[1:]) + + # Sum up the areas of all the rectangles. + riemann_terms = ops.multiply( + ops.subtract(x[: self.num_thresholds - 1], x[1:]), heights + ) + if self.multi_label: + by_label_auc = ops.sum(riemann_terms, axis=0) + + if self.label_weights is None: + # Unweighted average of the label AUCs. + return ops.mean(by_label_auc) + else: + # Weighted average of the label AUCs. + return ops.divide_no_nan( + ops.sum(ops.multiply(by_label_auc, self.label_weights)), + ops.sum(self.label_weights), + ) + else: + return ops.sum(riemann_terms) + + def reset_state(self): + if self._built: + if self.multi_label: + variable_shape = (self.num_thresholds, self._num_labels) + else: + variable_shape = (self.num_thresholds,) + + self.true_positives.assign(ops.zeros(variable_shape)) + self.false_positives.assign(ops.zeros(variable_shape)) + self.true_negatives.assign(ops.zeros(variable_shape)) + self.false_negatives.assign(ops.zeros(variable_shape)) + + def get_config(self): + label_weights = self.label_weights + config = { + "num_thresholds": self.num_thresholds, + "curve": self.curve.value, + "summation_method": self.summation_method.value, + "multi_label": self.multi_label, + "num_labels": self.num_labels, + "label_weights": label_weights, + "from_logits": self._from_logits, + } + # optimization to avoid serializing a large number of generated + # thresholds + if self._init_from_thresholds: + # We remove the endpoint thresholds as an inverse of how the + # thresholds were initialized. This ensures that a metric + # initialized from this config has the same thresholds. + config["thresholds"] = self.thresholds[1:-1] + base_config = super().get_config() + return {**base_config, **config} diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/correlation_metrics.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/correlation_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..1d2c8efea6c747e88e139ef98b0f34cad59f927b --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/correlation_metrics.py @@ -0,0 +1,215 @@ +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.losses.loss import squeeze_or_expand_to_same_rank +from keras.src.metrics import reduction_metrics + + +@keras_export("keras.metrics.pearson_correlation") +def pearson_correlation(y_true, y_pred, axis=-1): + """Computes the Pearson coefficient between labels and predictions. + + Formula: + + ```python + loss = mean(l2norm(y_true - mean(y_true) * l2norm(y_pred - mean(y_pred))) + ``` + + Args: + y_true: Tensor of true targets. + y_pred: Tensor of predicted targets. + axis: Axis along which to determine similarity. Defaults to `-1`. + + Returns: + Pearson Correlation Coefficient tensor. + + Example: + + >>> y_true = [[0, 1, 0.5], [1, 1, 0.2]] + >>> y_pred = [[0.1, 0.9, 0.5], [1, 0.9, 0.2]] + >>> loss = keras.losses.concordance_correlation( + ... y_true, y_pred, axis=-1 + ... ).numpy() + [1. 0.99339927] + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred) + + y_true_norm = y_true - ops.mean(y_true, axis=axis, keepdims=True) + y_pred_norm = y_pred - ops.mean(y_pred, axis=axis, keepdims=True) + + y_true_norm = y_true_norm / ops.std(y_true_norm, axis=axis, keepdims=True) + y_pred_norm = y_pred_norm / ops.std(y_pred_norm, axis=axis, keepdims=True) + + return ops.mean(y_true_norm * y_pred_norm, axis=axis) + + +@keras_export("keras.metrics.concordance_correlation") +def concordance_correlation(y_true, y_pred, axis=-1): + """Computes the Concordance coefficient between labels and predictions. + + Formula: + + ```python + loss = mean( + 2 * (y_true - mean(y_true) * (y_pred - mean(y_pred)) / ( + var(y_true) + var(y_pred) + square(mean(y_true) - mean(y_pred)) + ) + ) + ``` + + Args: + y_true: Tensor of true targets. + y_pred: Tensor of predicted targets. + axis: Axis along which to determine similarity. Defaults to `-1`. + + Returns: + Concordance Correlation Coefficient tensor. + + Example: + + >>> y_true = [[0, 1, 0.5], [1, 1, 0.2]] + >>> y_pred = [[0.1, 0.9, 0.5], [1, 0.9, 0.2]] + >>> loss = keras.losses.concordance_correlation( + ... y_true, y_pred, axis=-1 + ... ).numpy() + [0.97560976 0.98765432] + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred) + + y_true_mean = ops.mean(y_true, axis=axis, keepdims=True) + y_pred_mean = ops.mean(y_pred, axis=axis, keepdims=True) + + y_true_var = ops.var(y_true - y_true_mean, axis=axis, keepdims=True) + y_pred_var = ops.var(y_pred - y_pred_mean, axis=axis, keepdims=True) + + covar = (y_true - y_pred_mean) * (y_pred - y_pred_mean) + norm = y_true_var + y_pred_var + ops.square(y_true_mean - y_pred_mean) + + return ops.mean(2 * covar / (norm + backend.epsilon()), axis=axis) + + +@keras_export("keras.metrics.PearsonCorrelation") +class PearsonCorrelation(reduction_metrics.MeanMetricWrapper): + """Calculates the Pearson Correlation Coefficient (PCC). + + PCC measures the linear relationship between the true values (`y_true`) and + the predicted values (`y_pred`). The coefficient ranges from -1 to 1, where + a value of 1 implies a perfect positive linear correlation, 0 indicates no + linear correlation, and -1 indicates a perfect negative linear correlation. + + This metric is widely used in regression tasks where the strength of the + linear relationship between predictions and true labels is an + important evaluation criterion. + + Args: + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + axis: (Optional) integer or tuple of integers of the axis/axes along + which to compute the metric. Defaults to `-1`. + + Example: + + >>> pcc = keras.metrics.PearsonCorrelation(axis=-1) + >>> y_true = [[0, 1, 0.5], [1, 1, 0.2]] + >>> y_pred = [[0.1, 0.9, 0.5], [1, 0.9, 0.2]] + >>> pcc.update_state(y_true, y_pred) + >>> pcc.result() + 0.9966996338993913 + + Usage with `compile()` API: + + ```python + model.compile(optimizer='sgd', + loss='mean_squared_error', + metrics=[keras.metrics.PearsonCorrelation()]) + ``` + """ + + def __init__( + self, + name="pearson_correlation", + dtype=None, + axis=-1, + ): + super().__init__( + fn=pearson_correlation, + name=name, + dtype=dtype, + axis=axis, + ) + self.axis = axis + # Metric should be maximized during optimization. + self._direction = "up" + + def get_config(self): + return { + "name": self.name, + "dtype": self.dtype, + "axis": self.axis, + } + + +@keras_export("keras.metrics.ConcordanceCorrelation") +class ConcordanceCorrelation(reduction_metrics.MeanMetricWrapper): + """Calculates the Concordance Correlation Coefficient (CCC). + + CCC evaluates the agreement between true values (`y_true`) and predicted + values (`y_pred`) by considering both precision and accuracy. The + coefficient ranges from -1 to 1, where a value of 1 indicates perfect + agreement. + + This metric is useful in regression tasks where it is important to assess + how well the predictions match the true values, taking into account both + their correlation and proximity to the 45-degree line of perfect + concordance. + + Args: + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + axis: (Optional) integer or tuple of integers of the axis/axes along + which to compute the metric. Defaults to `-1`. + + Example: + + >>> ccc = keras.metrics.ConcordanceCorrelation(axis=-1) + >>> y_true = [[0, 1, 0.5], [1, 1, 0.2]] + >>> y_pred = [[0.1, 0.9, 0.5], [1, 0.9, 0.2]] + >>> ccc.update_state(y_true, y_pred) + >>> ccc.result() + 0.9816320385426076 + + Usage with `compile()` API: + + ```python + model.compile(optimizer='sgd', + loss='mean_squared_error', + metrics=[keras.metrics.ConcordanceCorrelation()]) + ``` + """ + + def __init__( + self, + name="concordance_correlation", + dtype=None, + axis=-1, + ): + super().__init__( + fn=concordance_correlation, + name=name, + dtype=dtype, + axis=axis, + ) + self.axis = axis + # Metric should be maximized during optimization. + self._direction = "up" + + def get_config(self): + return { + "name": self.name, + "dtype": self.dtype, + "axis": self.axis, + } diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/f_score_metrics.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/f_score_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..a51119cb48e4845b30d61bd2f0185ee1d5537685 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/f_score_metrics.py @@ -0,0 +1,320 @@ +from keras.src import backend +from keras.src import initializers +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.metrics.metric import Metric + + +@keras_export("keras.metrics.FBetaScore") +class FBetaScore(Metric): + """Computes F-Beta score. + + Formula: + + ```python + b2 = beta ** 2 + f_beta_score = (1 + b2) * (precision * recall) / (precision * b2 + recall) + ``` + This is the weighted harmonic mean of precision and recall. + Its output range is `[0, 1]`. It works for both multi-class + and multi-label classification. + + Args: + average: Type of averaging to be performed across per-class results + in the multi-class case. + Acceptable values are `None`, `"micro"`, `"macro"` and + `"weighted"`. Defaults to `None`. + If `None`, no averaging is performed and `result()` will return + the score for each class. + If `"micro"`, compute metrics globally by counting the total + true positives, false negatives and false positives. + If `"macro"`, compute metrics for each label, + and return their unweighted mean. + This does not take label imbalance into account. + If `"weighted"`, compute metrics for each label, + and return their average weighted by support + (the number of true instances for each label). + This alters `"macro"` to account for label imbalance. + It can result in an score that is not between precision and recall. + beta: Determines the weight of given to recall + in the harmonic mean between precision and recall (see pseudocode + equation above). Defaults to `1`. + threshold: Elements of `y_pred` greater than `threshold` are + converted to be 1, and the rest 0. If `threshold` is + `None`, the argmax of `y_pred` is converted to 1, and the rest to 0. + name: Optional. String name of the metric instance. + dtype: Optional. Data type of the metric result. + + Returns: + F-Beta Score: float. + + Example: + + >>> metric = keras.metrics.FBetaScore(beta=2.0, threshold=0.5) + >>> y_true = np.array([[1, 1, 1], + ... [1, 0, 0], + ... [1, 1, 0]], np.int32) + >>> y_pred = np.array([[0.2, 0.6, 0.7], + ... [0.2, 0.6, 0.6], + ... [0.6, 0.8, 0.0]], np.float32) + >>> metric.update_state(y_true, y_pred) + >>> result = metric.result() + >>> result + [0.3846154 , 0.90909094, 0.8333334 ] + """ + + def __init__( + self, + average=None, + beta=1.0, + threshold=None, + name="fbeta_score", + dtype=None, + ): + super().__init__(name=name, dtype=dtype) + # Metric should be maximized during optimization. + self._direction = "up" + + if average not in (None, "micro", "macro", "weighted"): + raise ValueError( + "Invalid `average` argument value. Expected one of: " + "{None, 'micro', 'macro', 'weighted'}. " + f"Received: average={average}" + ) + + if not isinstance(beta, float): + raise ValueError( + "Invalid `beta` argument value. " + "It should be a Python float. " + f"Received: beta={beta} of type '{type(beta)}'" + ) + if beta <= 0.0: + raise ValueError( + "Invalid `beta` argument value. " + "It should be > 0. " + f"Received: beta={beta}" + ) + + if threshold is not None: + if not isinstance(threshold, float): + raise ValueError( + "Invalid `threshold` argument value. " + "It should be a Python float. " + f"Received: threshold={threshold} " + f"of type '{type(threshold)}'" + ) + if threshold > 1.0 or threshold <= 0.0: + raise ValueError( + "Invalid `threshold` argument value. " + "It should verify 0 < threshold <= 1. " + f"Received: threshold={threshold}" + ) + + self.average = average + self.beta = beta + self.threshold = threshold + self.axis = None + self._built = False + + if self.average != "micro": + self.axis = 0 + + def _build(self, y_true_shape, y_pred_shape): + if len(y_pred_shape) != 2 or len(y_true_shape) != 2: + raise ValueError( + "FBetaScore expects 2D inputs with shape " + "(batch_size, output_dim). Received input " + f"shapes: y_pred.shape={y_pred_shape} and " + f"y_true.shape={y_true_shape}." + ) + if y_pred_shape[-1] is None or y_true_shape[-1] is None: + raise ValueError( + "FBetaScore expects 2D inputs with shape " + "(batch_size, output_dim), with output_dim fully " + "defined (not None). Received input " + f"shapes: y_pred.shape={y_pred_shape} and " + f"y_true.shape={y_true_shape}." + ) + num_classes = y_pred_shape[-1] + if self.average != "micro": + init_shape = (num_classes,) + else: + init_shape = () + + def _add_zeros_variable(name): + return self.add_variable( + name=name, + shape=init_shape, + initializer=initializers.Zeros(), + dtype=self.dtype, + ) + + self.true_positives = _add_zeros_variable("true_positives") + self.false_positives = _add_zeros_variable("false_positives") + self.false_negatives = _add_zeros_variable("false_negatives") + self.intermediate_weights = _add_zeros_variable("intermediate_weights") + self._built = True + + def update_state(self, y_true, y_pred, sample_weight=None): + y_true = ops.convert_to_tensor(y_true, dtype=self.dtype) + y_pred = ops.convert_to_tensor(y_pred, dtype=self.dtype) + if not self._built: + self._build(y_true.shape, y_pred.shape) + + if self.threshold is None: + threshold = ops.max(y_pred, axis=-1, keepdims=True) + # make sure [0, 0, 0] doesn't become [1, 1, 1] + # Use abs(x) > eps, instead of x != 0 to check for zero + y_pred = ops.logical_and( + y_pred >= threshold, ops.abs(y_pred) > 1e-9 + ) + else: + y_pred = y_pred > self.threshold + + y_pred = ops.cast(y_pred, dtype=self.dtype) + y_true = ops.cast(y_true, dtype=self.dtype) + if sample_weight is not None: + sample_weight = ops.convert_to_tensor( + sample_weight, dtype=self.dtype + ) + + def _weighted_sum(val, sample_weight): + if sample_weight is not None: + val = ops.multiply(val, ops.expand_dims(sample_weight, 1)) + return ops.sum(val, axis=self.axis) + + self.true_positives.assign( + self.true_positives + _weighted_sum(y_pred * y_true, sample_weight) + ) + self.false_positives.assign( + self.false_positives + + _weighted_sum(y_pred * (1 - y_true), sample_weight) + ) + self.false_negatives.assign( + self.false_negatives + + _weighted_sum((1 - y_pred) * y_true, sample_weight) + ) + self.intermediate_weights.assign( + self.intermediate_weights + _weighted_sum(y_true, sample_weight) + ) + + def result(self): + precision = ops.divide( + self.true_positives, + self.true_positives + self.false_positives + backend.epsilon(), + ) + recall = ops.divide( + self.true_positives, + self.true_positives + self.false_negatives + backend.epsilon(), + ) + + precision = ops.convert_to_tensor(precision, dtype=self.dtype) + recall = ops.convert_to_tensor(recall, dtype=self.dtype) + + mul_value = precision * recall + add_value = ((self.beta**2) * precision) + recall + mean = ops.divide(mul_value, add_value + backend.epsilon()) + f1_score = mean * (1 + (self.beta**2)) + + if self.average == "weighted": + weights = ops.divide( + self.intermediate_weights, + ops.sum(self.intermediate_weights) + backend.epsilon(), + ) + f1_score = ops.sum(f1_score * weights) + + elif self.average is not None: # [micro, macro] + f1_score = ops.mean(f1_score) + + return f1_score + + def get_config(self): + """Returns the serializable config of the metric.""" + + config = { + "name": self.name, + "dtype": self.dtype, + "average": self.average, + "beta": self.beta, + "threshold": self.threshold, + } + + base_config = super().get_config() + return {**base_config, **config} + + def reset_state(self): + for v in self.variables: + v.assign(ops.zeros(v.shape, dtype=v.dtype)) + + +@keras_export("keras.metrics.F1Score") +class F1Score(FBetaScore): + r"""Computes F-1 Score. + + Formula: + + ```python + f1_score = 2 * (precision * recall) / (precision + recall) + ``` + This is the harmonic mean of precision and recall. + Its output range is `[0, 1]`. It works for both multi-class + and multi-label classification. + + Args: + average: Type of averaging to be performed on data. + Acceptable values are `None`, `"micro"`, `"macro"` + and `"weighted"`. Defaults to `None`. + If `None`, no averaging is performed and `result()` will return + the score for each class. + If `"micro"`, compute metrics globally by counting the total + true positives, false negatives and false positives. + If `"macro"`, compute metrics for each label, + and return their unweighted mean. + This does not take label imbalance into account. + If `"weighted"`, compute metrics for each label, + and return their average weighted by support + (the number of true instances for each label). + This alters `"macro"` to account for label imbalance. + It can result in an score that is not between precision and recall. + threshold: Elements of `y_pred` greater than `threshold` are + converted to be 1, and the rest 0. If `threshold` is + `None`, the argmax of `y_pred` is converted to 1, and the rest to 0. + name: Optional. String name of the metric instance. + dtype: Optional. Data type of the metric result. + + Returns: + F-1 Score: float. + + Example: + + >>> metric = keras.metrics.F1Score(threshold=0.5) + >>> y_true = np.array([[1, 1, 1], + ... [1, 0, 0], + ... [1, 1, 0]], np.int32) + >>> y_pred = np.array([[0.2, 0.6, 0.7], + ... [0.2, 0.6, 0.6], + ... [0.6, 0.8, 0.0]], np.float32) + >>> metric.update_state(y_true, y_pred) + >>> result = metric.result() + array([0.5 , 0.8 , 0.6666667], dtype=float32) + """ + + def __init__( + self, + average=None, + threshold=None, + name="f1_score", + dtype=None, + ): + super().__init__( + average=average, + beta=1.0, + threshold=threshold, + name=name, + dtype=dtype, + ) + + def get_config(self): + base_config = super().get_config() + del base_config["beta"] + return base_config diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/hinge_metrics.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/hinge_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..4678b3fa1718fc138a9427407a7591c1f89ca927 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/hinge_metrics.py @@ -0,0 +1,100 @@ +from keras.src.api_export import keras_export +from keras.src.losses.losses import categorical_hinge +from keras.src.losses.losses import hinge +from keras.src.losses.losses import squared_hinge +from keras.src.metrics import reduction_metrics + + +@keras_export("keras.metrics.Hinge") +class Hinge(reduction_metrics.MeanMetricWrapper): + """Computes the hinge metric between `y_true` and `y_pred`. + + `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are + provided we will convert them to -1 or 1. + + Args: + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Examples: + + >>> m = keras.metrics.Hinge() + >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) + >>> m.result() + 1.3 + >>> m.reset_state() + >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], + ... sample_weight=[1, 0]) + >>> m.result() + 1.1 + """ + + def __init__(self, name="hinge", dtype=None): + super().__init__(fn=hinge, name=name, dtype=dtype) + # Metric should be minimized during optimization. + self._direction = "down" + + def get_config(self): + return {"name": self.name, "dtype": self.dtype} + + +@keras_export("keras.metrics.SquaredHinge") +class SquaredHinge(reduction_metrics.MeanMetricWrapper): + """Computes the hinge metric between `y_true` and `y_pred`. + + `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are + provided we will convert them to -1 or 1. + + Args: + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Example: + + >>> m = keras.metrics.SquaredHinge() + >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) + >>> m.result() + 1.86 + >>> m.reset_state() + >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], + ... sample_weight=[1, 0]) + >>> m.result() + 1.46 + """ + + def __init__(self, name="squared_hinge", dtype=None): + super().__init__(fn=squared_hinge, name=name, dtype=dtype) + # Metric should be minimized during optimization. + self._direction = "down" + + def get_config(self): + return {"name": self.name, "dtype": self.dtype} + + +@keras_export("keras.metrics.CategoricalHinge") +class CategoricalHinge(reduction_metrics.MeanMetricWrapper): + """Computes the categorical hinge metric between `y_true` and `y_pred`. + + Args: + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Example: + >>> m = keras.metrics.CategoricalHinge() + >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) + >>> m.result().numpy() + 1.4000001 + >>> m.reset_state() + >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], + ... sample_weight=[1, 0]) + >>> m.result() + 1.2 + """ + + def __init__(self, name="categorical_hinge", dtype=None): + super().__init__(fn=categorical_hinge, name=name, dtype=dtype) + # Metric should be minimized during optimization. + self._direction = "down" + + def get_config(self): + return {"name": self.name, "dtype": self.dtype} diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/iou_metrics.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/iou_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..65c84e591b967106bddba66fc111827d12442c83 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/iou_metrics.py @@ -0,0 +1,762 @@ +import warnings + +from keras.src import backend +from keras.src import initializers +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.metrics.metric import Metric +from keras.src.metrics.metrics_utils import confusion_matrix + + +class _IoUBase(Metric): + """Computes the confusion matrix for Intersection-Over-Union metrics. + + Formula: + + ```python + iou = true_positives / (true_positives + false_positives + false_negatives) + ``` + Intersection-Over-Union is a common evaluation metric for semantic image + segmentation. + + From IoUs of individual classes, the MeanIoU can be computed as the mean of + the individual IoUs. + + To compute IoUs, the predictions are accumulated in a confusion matrix, + weighted by `sample_weight` and the metric is then calculated from it. + + If `sample_weight` is `None`, weights default to 1. + Use `sample_weight` of 0 to mask values. + + Args: + num_classes: The possible number of labels the prediction task can have. + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + ignore_class: Optional integer. The ID of a class to be ignored during + metric computation. This is useful, for example, in segmentation + problems featuring a "void" class (commonly -1 or 255) in + segmentation maps. By default (`ignore_class=None`), all classes are + considered. + sparse_y_true: Whether labels are encoded using integers or + dense floating point vectors. If `False`, the `argmax` function + is used to determine each sample's most likely associated label. + sparse_y_pred: Whether predictions are encoded using integers or + dense floating point vectors. If `False`, the `argmax` function + is used to determine each sample's most likely associated label. + axis: (Optional) -1 is the dimension containing the logits. + Defaults to `-1`. + """ + + def __init__( + self, + num_classes, + name=None, + dtype=None, + ignore_class=None, + sparse_y_true=True, + sparse_y_pred=True, + axis=-1, + ): + # defaulting to int to avoid issues with confusion matrix + super().__init__(name=name, dtype=dtype or "int") + # Metric should be maximized during optimization. + self._direction = "up" + self.num_classes = num_classes + self.ignore_class = ignore_class + self.sparse_y_true = sparse_y_true + self.sparse_y_pred = sparse_y_pred + self.axis = axis + + self.total_cm = self.add_variable( + name="total_confusion_matrix", + shape=(num_classes, num_classes), + initializer=initializers.Zeros(), + dtype=self.dtype, + ) + + def update_state(self, y_true, y_pred, sample_weight=None): + """Accumulates the confusion matrix statistics. + + Args: + y_true: The ground truth values. + y_pred: The predicted values. + sample_weight: Optional weighting of each example. Can + be a `Tensor` whose rank is either 0, or the same as `y_true`, + and must be broadcastable to `y_true`. Defaults to `1`. + + Returns: + Update op. + """ + + if not self.sparse_y_true: + y_true = ops.argmax(y_true, axis=self.axis) + if not self.sparse_y_pred: + y_pred = ops.argmax(y_pred, axis=self.axis) + + y_true = ops.convert_to_tensor(y_true, dtype=self.dtype) + y_pred = ops.convert_to_tensor(y_pred, dtype=self.dtype) + + # Flatten the input if its rank > 1. + if len(y_pred.shape) > 1: + y_pred = ops.reshape(y_pred, [-1]) + + if len(y_true.shape) > 1: + y_true = ops.reshape(y_true, [-1]) + + if sample_weight is None: + sample_weight = 1 + else: + if ( + hasattr(sample_weight, "dtype") + and "float" in str(sample_weight.dtype) + and "int" in str(self.dtype) + ): + warnings.warn( + "You are passing weight as `float`, but dtype is `int`. " + "This may result in an incorrect weight due to type casting" + " Consider using integer weights." + ) + sample_weight = ops.convert_to_tensor(sample_weight, dtype=self.dtype) + + if len(sample_weight.shape) > 1: + sample_weight = ops.reshape(sample_weight, [-1]) + + sample_weight = ops.broadcast_to(sample_weight, ops.shape(y_true)) + + if self.ignore_class is not None: + ignore_class = ops.convert_to_tensor( + self.ignore_class, y_true.dtype + ) + valid_mask = ops.not_equal(y_true, ignore_class) + y_true = y_true * ops.cast(valid_mask, y_true.dtype) + y_pred = y_pred * ops.cast(valid_mask, y_pred.dtype) + if sample_weight is not None: + sample_weight = sample_weight * ops.cast( + valid_mask, sample_weight.dtype + ) + + y_pred = ops.cast(y_pred, dtype=self.dtype) + y_true = ops.cast(y_true, dtype=self.dtype) + sample_weight = ops.cast(sample_weight, dtype=self.dtype) + + current_cm = confusion_matrix( + y_true, + y_pred, + self.num_classes, + weights=sample_weight, + dtype=self.dtype, + ) + + return self.total_cm.assign(self.total_cm + current_cm) + + def reset_state(self): + self.total_cm.assign( + ops.zeros(self.total_cm.shape, dtype=self.total_cm.dtype) + ) + + +@keras_export("keras.metrics.IoU") +class IoU(_IoUBase): + """Computes the Intersection-Over-Union metric for specific target classes. + + Formula: + + ```python + iou = true_positives / (true_positives + false_positives + false_negatives) + ``` + Intersection-Over-Union is a common evaluation metric for semantic image + segmentation. + + To compute IoUs, the predictions are accumulated in a confusion matrix, + weighted by `sample_weight` and the metric is then calculated from it. + + If `sample_weight` is `None`, weights default to 1. + Use `sample_weight` of 0 to mask values. + + Note, this class first computes IoUs for all individual classes, then + returns the mean of IoUs for the classes that are specified by + `target_class_ids`. If `target_class_ids` has only one id value, the IoU of + that specific class is returned. + + Args: + num_classes: The possible number of labels the prediction task can have. + target_class_ids: A tuple or list of target class ids for which the + metric is returned. To compute IoU for a specific class, a list + (or tuple) of a single id value should be provided. + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + ignore_class: Optional integer. The ID of a class to be ignored during + metric computation. This is useful, for example, in segmentation + problems featuring a "void" class (commonly -1 or 255) in + segmentation maps. By default (`ignore_class=None`), all classes are + considered. + sparse_y_true: Whether labels are encoded using integers or + dense floating point vectors. If `False`, the `argmax` function + is used to determine each sample's most likely associated label. + sparse_y_pred: Whether predictions are encoded using integers or + dense floating point vectors. If `False`, the `argmax` function + is used to determine each sample's most likely associated label. + axis: (Optional) -1 is the dimension containing the logits. + Defaults to `-1`. + + Examples: + + >>> # cm = [[1, 1], + >>> # [1, 1]] + >>> # sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1] + >>> # iou = true_positives / (sum_row + sum_col - true_positives)) + >>> # iou = [0.33, 0.33] + >>> m = keras.metrics.IoU(num_classes=2, target_class_ids=[0]) + >>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1]) + >>> m.result() + 0.33333334 + + >>> m.reset_state() + >>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1], + ... sample_weight=[0.3, 0.3, 0.3, 0.1]) + >>> # cm = [[0.3, 0.3], + >>> # [0.3, 0.1]] + >>> # sum_row = [0.6, 0.4], sum_col = [0.6, 0.4], + >>> # true_positives = [0.3, 0.1] + >>> # iou = [0.33, 0.14] + >>> m.result() + 0.33333334 + + Usage with `compile()` API: + + ```python + model.compile( + optimizer='sgd', + loss='mse', + metrics=[keras.metrics.IoU(num_classes=2, target_class_ids=[0])]) + ``` + """ + + def __init__( + self, + num_classes, + target_class_ids, + name=None, + dtype=None, + ignore_class=None, + sparse_y_true=True, + sparse_y_pred=True, + axis=-1, + ): + super().__init__( + name=name, + num_classes=num_classes, + ignore_class=ignore_class, + sparse_y_true=sparse_y_true, + sparse_y_pred=sparse_y_pred, + axis=axis, + dtype=dtype, + ) + if max(target_class_ids) >= num_classes: + raise ValueError( + f"Target class id {max(target_class_ids)} " + "is out of range, which is " + f"[{0}, {num_classes})." + ) + self.target_class_ids = list(target_class_ids) + + def result(self): + """Compute the intersection-over-union via the confusion matrix.""" + sum_over_row = ops.cast( + ops.sum(self.total_cm, axis=0), dtype=self.dtype + ) + sum_over_col = ops.cast( + ops.sum(self.total_cm, axis=1), dtype=self.dtype + ) + true_positives = ops.cast(ops.diag(self.total_cm), dtype=self.dtype) + + # sum_over_row + sum_over_col = + # 2 * true_positives + false_positives + false_negatives. + denominator = sum_over_row + sum_over_col - true_positives + + target_class_ids = ops.convert_to_tensor( + self.target_class_ids, dtype="int32" + ) + + # Only keep the target classes + true_positives = ops.take_along_axis( + true_positives, target_class_ids, axis=-1 + ) + denominator = ops.take_along_axis( + denominator, target_class_ids, axis=-1 + ) + denominator = ops.cast(denominator, dtype="float32") + + # If the denominator is 0, we need to ignore the class. + num_valid_entries = ops.sum( + ops.cast(ops.greater(denominator, 1e-9), dtype="float32") + ) + + iou = ops.divide(true_positives, denominator + backend.epsilon()) + + return ops.divide( + ops.sum(iou, axis=self.axis), num_valid_entries + backend.epsilon() + ) + + def get_config(self): + config = { + "num_classes": self.num_classes, + "target_class_ids": self.target_class_ids, + "ignore_class": self.ignore_class, + "sparse_y_true": self.sparse_y_true, + "sparse_y_pred": self.sparse_y_pred, + "axis": self.axis, + } + base_config = super().get_config() + return dict(list(base_config.items()) + list(config.items())) + + +@keras_export("keras.metrics.BinaryIoU") +class BinaryIoU(IoU): + """Computes the Intersection-Over-Union metric for class 0 and/or 1. + + Formula: + + ```python + iou = true_positives / (true_positives + false_positives + false_negatives) + ``` + Intersection-Over-Union is a common evaluation metric for semantic image + segmentation. + + To compute IoUs, the predictions are accumulated in a confusion matrix, + weighted by `sample_weight` and the metric is then calculated from it. + + If `sample_weight` is `None`, weights default to 1. + Use `sample_weight` of 0 to mask values. + + This class can be used to compute IoUs for a binary classification task + where the predictions are provided as logits. First a `threshold` is applied + to the predicted values such that those that are below the `threshold` are + converted to class 0 and those that are above the `threshold` are converted + to class 1. + + IoUs for classes 0 and 1 are then computed, the mean of IoUs for the classes + that are specified by `target_class_ids` is returned. + + Note: with `threshold=0`, this metric has the same behavior as `IoU`. + + Args: + target_class_ids: A tuple or list of target class ids for which the + metric is returned. Options are `[0]`, `[1]`, or `[0, 1]`. With + `[0]` (or `[1]`), the IoU metric for class 0 (or class 1, + respectively) is returned. With `[0, 1]`, the mean of IoUs for the + two classes is returned. + threshold: A threshold that applies to the prediction logits to convert + them to either predicted class 0 if the logit is below `threshold` + or predicted class 1 if the logit is above `threshold`. + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Example: + + Example: + + >>> m = keras.metrics.BinaryIoU(target_class_ids=[0, 1], threshold=0.3) + >>> m.update_state([0, 1, 0, 1], [0.1, 0.2, 0.4, 0.7]) + >>> m.result() + 0.33333334 + + >>> m.reset_state() + >>> m.update_state([0, 1, 0, 1], [0.1, 0.2, 0.4, 0.7], + ... sample_weight=[0.2, 0.3, 0.4, 0.1]) + >>> # cm = [[0.2, 0.4], + >>> # [0.3, 0.1]] + >>> # sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], + >>> # true_positives = [0.2, 0.1] + >>> # iou = [0.222, 0.125] + >>> m.result() + 0.17361112 + + Usage with `compile()` API: + + ```python + model.compile( + optimizer='sgd', + loss='mse', + metrics=[keras.metrics.BinaryIoU( + target_class_ids=[0], + threshold=0.5 + )] + ) + ``` + """ + + def __init__( + self, + target_class_ids=(0, 1), + threshold=0.5, + name=None, + dtype=None, + ): + super().__init__( + num_classes=2, + target_class_ids=target_class_ids, + name=name, + dtype=dtype, + ) + self.threshold = threshold + + def update_state(self, y_true, y_pred, sample_weight=None): + """Accumulates the confusion matrix statistics. + + Before the confusion matrix is updated, the predicted values are + thresholded to be: + 0 for values that are smaller than the `threshold` + 1 for values that are larger or equal to the `threshold` + + Args: + y_true: The ground truth values. + y_pred: The predicted values. + sample_weight: Optional weighting of each example. Can + be a `Tensor` whose rank is either 0, or the same as `y_true`, + and must be broadcastable to `y_true`. Defaults to `1`. + + Returns: + Update op. + """ + y_true = ops.convert_to_tensor(y_true, dtype=self.dtype) + # convert y_pred on float 32 and cast just after to dtype + y_pred = ops.convert_to_tensor(y_pred, dtype="float32") + y_pred = ops.cast(y_pred >= self.threshold, self.dtype) + return super().update_state(y_true, y_pred, sample_weight) + + def get_config(self): + return { + "target_class_ids": self.target_class_ids, + "threshold": self.threshold, + "name": self.name, + "dtype": self._dtype, + } + + +@keras_export("keras.metrics.MeanIoU") +class MeanIoU(IoU): + """Computes the mean Intersection-Over-Union metric. + + Formula: + + ```python + iou = true_positives / (true_positives + false_positives + false_negatives) + ``` + Intersection-Over-Union is a common evaluation metric for semantic image + segmentation. + + To compute IoUs, the predictions are accumulated in a confusion matrix, + weighted by `sample_weight` and the metric is then calculated from it. + + If `sample_weight` is `None`, weights default to 1. + Use `sample_weight` of 0 to mask values. + + Note that this class first computes IoUs for all individual classes, then + returns the mean of these values. + + Args: + num_classes: The possible number of labels the prediction task can have. + This value must be provided, since a confusion matrix of dimension = + [num_classes, num_classes] will be allocated. + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + ignore_class: Optional integer. The ID of a class to be ignored during + metric computation. This is useful, for example, in segmentation + problems featuring a "void" class (commonly -1 or 255) in + segmentation maps. By default (`ignore_class=None`), all classes are + considered. + sparse_y_true: Whether labels are encoded using integers or + dense floating point vectors. If `False`, the `argmax` function + is used to determine each sample's most likely associated label. + sparse_y_pred: Whether predictions are encoded using integers or + dense floating point vectors. If `False`, the `argmax` function + is used to determine each sample's most likely associated label. + axis: (Optional) The dimension containing the logits. Defaults to `-1`. + + Example: + + Example: + + >>> # cm = [[1, 1], + >>> # [1, 1]] + >>> # sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1] + >>> # iou = true_positives / (sum_row + sum_col - true_positives)) + >>> # result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2 = 0.33 + >>> m = keras.metrics.MeanIoU(num_classes=2) + >>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1]) + >>> m.result() + 0.33333334 + + >>> m.reset_state() + >>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1], + ... sample_weight=[0.3, 0.3, 0.3, 0.1]) + >>> m.result().numpy() + 0.23809525 + + Usage with `compile()` API: + + ```python + model.compile( + optimizer='sgd', + loss='mse', + metrics=[keras.metrics.MeanIoU(num_classes=2)]) + ``` + """ + + def __init__( + self, + num_classes, + name=None, + dtype=None, + ignore_class=None, + sparse_y_true=True, + sparse_y_pred=True, + axis=-1, + ): + target_class_ids = list(range(num_classes)) + super().__init__( + name=name, + num_classes=num_classes, + target_class_ids=target_class_ids, + axis=axis, + dtype=dtype, + ignore_class=ignore_class, + sparse_y_true=sparse_y_true, + sparse_y_pred=sparse_y_pred, + ) + + def get_config(self): + return { + "num_classes": self.num_classes, + "name": self.name, + "dtype": self._dtype, + "ignore_class": self.ignore_class, + "sparse_y_true": self.sparse_y_true, + "sparse_y_pred": self.sparse_y_pred, + "axis": self.axis, + } + + +@keras_export("keras.metrics.OneHotIoU") +class OneHotIoU(IoU): + """Computes the Intersection-Over-Union metric for one-hot encoded labels. + + Formula: + + ```python + iou = true_positives / (true_positives + false_positives + false_negatives) + ``` + Intersection-Over-Union is a common evaluation metric for semantic image + segmentation. + + To compute IoUs, the predictions are accumulated in a confusion matrix, + weighted by `sample_weight` and the metric is then calculated from it. + + If `sample_weight` is `None`, weights default to 1. + Use `sample_weight` of 0 to mask values. + + This class can be used to compute IoU for multi-class classification tasks + where the labels are one-hot encoded (the last axis should have one + dimension per class). Note that the predictions should also have the same + shape. To compute the IoU, first the labels and predictions are converted + back into integer format by taking the argmax over the class axis. Then the + same computation steps as for the base `IoU` class apply. + + Note, if there is only one channel in the labels and predictions, this class + is the same as class `IoU`. In this case, use `IoU` instead. + + Also, make sure that `num_classes` is equal to the number of classes in the + data, to avoid a "labels out of bound" error when the confusion matrix is + computed. + + Args: + num_classes: The possible number of labels the prediction task can have. + target_class_ids: A tuple or list of target class ids for which the + metric is returned. To compute IoU for a specific class, a list + (or tuple) of a single id value should be provided. + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + ignore_class: Optional integer. The ID of a class to be ignored during + metric computation. This is useful, for example, in segmentation + problems featuring a "void" class (commonly -1 or 255) in + segmentation maps. By default (`ignore_class=None`), all classes are + considered. + sparse_y_pred: Whether predictions are encoded using integers or + dense floating point vectors. If `False`, the `argmax` function + is used to determine each sample's most likely associated label. + axis: (Optional) The dimension containing the logits. Defaults to `-1`. + + + Example: + + >>> y_true = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]]) + >>> y_pred = np.array([[0.2, 0.3, 0.5], [0.1, 0.2, 0.7], [0.5, 0.3, 0.1], + ... [0.1, 0.4, 0.5]]) + >>> sample_weight = [0.1, 0.2, 0.3, 0.4] + >>> m = keras.metrics.OneHotIoU(num_classes=3, target_class_ids=[0, 2]) + >>> m.update_state( + ... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight) + >>> # cm = [[0, 0, 0.2+0.4], + >>> # [0.3, 0, 0], + >>> # [0, 0, 0.1]] + >>> # sum_row = [0.3, 0, 0.7], sum_col = [0.6, 0.3, 0.1] + >>> # true_positives = [0, 0, 0.1] + >>> # single_iou = true_positives / (sum_row + sum_col - true_positives)) + >>> # mean_iou = (0 / (0.3 + 0.6 - 0) + 0.1 / (0.7 + 0.1 - 0.1)) / 2 + >>> m.result() + 0.071 + + Usage with `compile()` API: + + ```python + model.compile( + optimizer='sgd', + loss='mse', + metrics=[keras.metrics.OneHotIoU( + num_classes=3, + target_class_id=[1] + )] + ) + ``` + """ + + def __init__( + self, + num_classes, + target_class_ids, + name=None, + dtype=None, + ignore_class=None, + sparse_y_pred=False, + axis=-1, + ): + super().__init__( + num_classes=num_classes, + target_class_ids=target_class_ids, + name=name, + dtype=dtype, + ignore_class=ignore_class, + sparse_y_true=False, + sparse_y_pred=sparse_y_pred, + axis=axis, + ) + + def get_config(self): + return { + "num_classes": self.num_classes, + "target_class_ids": self.target_class_ids, + "name": self.name, + "dtype": self._dtype, + "ignore_class": self.ignore_class, + "sparse_y_pred": self.sparse_y_pred, + "axis": self.axis, + } + + +@keras_export("keras.metrics.OneHotMeanIoU") +class OneHotMeanIoU(MeanIoU): + """Computes mean Intersection-Over-Union metric for one-hot encoded labels. + + Formula: + + ```python + iou = true_positives / (true_positives + false_positives + false_negatives) + ``` + Intersection-Over-Union is a common evaluation metric for semantic image + segmentation. + + To compute IoUs, the predictions are accumulated in a confusion matrix, + weighted by `sample_weight` and the metric is then calculated from it. + + If `sample_weight` is `None`, weights default to 1. + Use `sample_weight` of 0 to mask values. + + This class can be used to compute the mean IoU for multi-class + classification tasks where the labels are one-hot encoded (the last axis + should have one dimension per class). Note that the predictions should also + have the same shape. To compute the mean IoU, first the labels and + predictions are converted back into integer format by taking the argmax over + the class axis. Then the same computation steps as for the base `MeanIoU` + class apply. + + Note, if there is only one channel in the labels and predictions, this class + is the same as class `MeanIoU`. In this case, use `MeanIoU` instead. + + Also, make sure that `num_classes` is equal to the number of classes in the + data, to avoid a "labels out of bound" error when the confusion matrix is + computed. + + Args: + num_classes: The possible number of labels the prediction task can have. + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + ignore_class: Optional integer. The ID of a class to be ignored during + metric computation. This is useful, for example, in segmentation + problems featuring a "void" class (commonly -1 or 255) in + segmentation maps. By default (`ignore_class=None`), all classes are + considered. + sparse_y_pred: Whether predictions are encoded using natural numbers or + probability distribution vectors. If `False`, the `argmax` + function will be used to determine each sample's most likely + associated label. + axis: (Optional) The dimension containing the logits. Defaults to `-1`. + + + Example: + + >>> y_true = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]]) + >>> y_pred = np.array([[0.2, 0.3, 0.5], [0.1, 0.2, 0.7], [0.5, 0.3, 0.1], + ... [0.1, 0.4, 0.5]]) + >>> sample_weight = [0.1, 0.2, 0.3, 0.4] + >>> m = keras.metrics.OneHotMeanIoU(num_classes=3) + >>> m.update_state( + ... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight) + >>> # cm = [[0, 0, 0.2+0.4], + >>> # [0.3, 0, 0], + >>> # [0, 0, 0.1]] + >>> # sum_row = [0.3, 0, 0.7], sum_col = [0.6, 0.3, 0.1] + >>> # true_positives = [0, 0, 0.1] + >>> # single_iou = true_positives / (sum_row + sum_col - true_positives)) + >>> # mean_iou = (0 + 0 + 0.1 / (0.7 + 0.1 - 0.1)) / 3 + >>> m.result() + 0.048 + + Usage with `compile()` API: + + ```python + model.compile( + optimizer='sgd', + loss='mse', + metrics=[keras.metrics.OneHotMeanIoU(num_classes=3)]) + ``` + """ + + def __init__( + self, + num_classes, + name=None, + dtype=None, + ignore_class=None, + sparse_y_pred=False, + axis=-1, + ): + super().__init__( + num_classes=num_classes, + axis=axis, + name=name, + dtype=dtype, + ignore_class=ignore_class, + sparse_y_true=False, + sparse_y_pred=sparse_y_pred, + ) + + def get_config(self): + return { + "num_classes": self.num_classes, + "name": self.name, + "dtype": self._dtype, + "ignore_class": self.ignore_class, + "sparse_y_pred": self.sparse_y_pred, + "axis": self.axis, + } diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/metric.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/metric.py new file mode 100644 index 0000000000000000000000000000000000000000..b9417ece200e0b029a062f7701d2cc96984e7b8d --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/metric.py @@ -0,0 +1,253 @@ +from keras.src import backend +from keras.src import dtype_policies +from keras.src import initializers +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.saving.keras_saveable import KerasSaveable +from keras.src.utils.naming import auto_name +from keras.src.utils.tracking import Tracker + + +@keras_export(["keras.Metric", "keras.metrics.Metric"]) +class Metric(KerasSaveable): + """Encapsulates metric logic and state. + + Args: + name: Optional name for the metric instance. + dtype: The dtype of the metric's computations. Defaults to `None`, which + means using `keras.backend.floatx()`. `keras.backend.floatx()` is a + `"float32"` unless set to different value + (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is + provided, then the `compute_dtype` will be utilized. + + Example: + + ```python + m = SomeMetric(...) + for input in ...: + m.update_state(input) + print('Final result: ', m.result()) + ``` + + Usage with `compile()` API: + + ```python + model = keras.Sequential() + model.add(keras.layers.Dense(64, activation='relu')) + model.add(keras.layers.Dense(64, activation='relu')) + model.add(keras.layers.Dense(10, activation='softmax')) + + model.compile(optimizer=keras.optimizers.RMSprop(0.01), + loss=keras.losses.CategoricalCrossentropy(), + metrics=[keras.metrics.CategoricalAccuracy()]) + + data = np.random.random((1000, 32)) + labels = np.random.random((1000, 10)) + + model.fit(data, labels, epochs=10) + ``` + + To be implemented by subclasses: + + * `__init__()`: All state variables should be created in this method by + calling `self.add_variable()` like: `self.var = self.add_variable(...)` + * `update_state()`: Has all updates to the state variables like: + `self.var.assign(...)`. + * `result()`: Computes and returns a scalar value or a dict of scalar values + for the metric from the state variables. + + Example subclass implementation: + + ```python + class BinaryTruePositives(Metric): + + def __init__(self, name='binary_true_positives', **kwargs): + super().__init__(name=name, **kwargs) + self.true_positives = self.add_variable( + shape=(), + initializer='zeros', + name='true_positives' + ) + + def update_state(self, y_true, y_pred, sample_weight=None): + y_true = ops.cast(y_true, "bool") + y_pred = ops.cast(y_pred, "bool") + + values = ops.logical_and( + ops.equal(y_true, True), ops.equal(y_pred, True)) + values = ops.cast(values, self.dtype) + if sample_weight is not None: + sample_weight = ops.cast(sample_weight, self.dtype) + sample_weight = ops.broadcast_to( + sample_weight, ops.shape(values) + ) + values = ops.multiply(values, sample_weight) + self.true_positives.assign(self.true_positives + ops.sum(values)) + + def result(self): + return self.true_positives + ``` + """ + + def __init__(self, dtype=None, name=None): + self.name = name or auto_name(self.__class__.__name__) + self._dtype_policy = dtype_policies.get(dtype or backend.floatx()) + self._dtype = self._dtype_policy.compute_dtype + self._metrics = [] + self._variables = [] + self._tracker = Tracker( + { + "variables": ( + lambda x: isinstance(x, backend.Variable), + self._variables, + ), + "metrics": (lambda x: isinstance(x, Metric), self._metrics), + } + ) + + def reset_state(self): + """Reset all of the metric state variables. + + This function is called between epochs/steps, + when a metric is evaluated during training. + """ + for v in self.variables: + v.assign(ops.zeros(v.shape, dtype=v.dtype)) + + def update_state(self, *args, **kwargs): + """Accumulate statistics for the metric.""" + raise NotImplementedError + + def stateless_update_state(self, metric_variables, *args, **kwargs): + if len(metric_variables) != len(self.variables): + raise ValueError( + "Argument `metric_variables` must be a list of tensors " + f"corresponding 1:1 to {self.__class__.__name__}().variables. " + f"Received list with length {len(metric_variables)}, but " + f"expected {len(self.variables)} variables." + ) + # Gather variable mapping + mapping = list(zip(self.variables, metric_variables)) + + # Call in stateless scope + with backend.StatelessScope(state_mapping=mapping) as scope: + self.update_state(*args, **kwargs) + + # Gather updated variables + metric_variables = [] + for v in self.variables: + new_v = scope.get_current_value(v) + if new_v is not None: + metric_variables.append(new_v) + else: + metric_variables.append(v) + return metric_variables + + def result(self): + """Compute the current metric value. + + Returns: + A scalar tensor, or a dictionary of scalar tensors. + """ + raise NotImplementedError + + def stateless_result(self, metric_variables): + if len(metric_variables) != len(self.variables): + raise ValueError( + "Argument `metric_variables` must be a list of tensors " + f"corresponding 1:1 to {self.__class__.__name__}().variables. " + f"Received list with length {len(metric_variables)}, but " + f"expected {len(self.variables)} variables." + ) + # Gather variable mapping + mapping = list(zip(self.variables, metric_variables)) + + # Call in stateless scope + with backend.StatelessScope(state_mapping=mapping): + res = self.result() + return res + + def stateless_reset_state(self): + # Call in stateless scope + with backend.StatelessScope() as scope: + self.reset_state() + + # Gather updated variables + metric_variables = [] + for v in self.variables: + new_v = scope.get_current_value(v) + if new_v is not None: + metric_variables.append(new_v) + else: + metric_variables.append(v) + return metric_variables + + @property + def dtype(self): + return self._dtype + + def _obj_type(self): + return "Metric" + + def add_variable( + self, shape, initializer, dtype=None, aggregation="sum", name=None + ): + self._check_super_called() + with backend.name_scope(self.name.replace("/", ">"), caller=self): + initializer = initializers.get(initializer) + variable = backend.Variable( + initializer=initializer, + shape=shape, + dtype=dtype, + trainable=False, + aggregation=aggregation, + name=name, + ) + # Prevent double-tracking + self._tracker.add_to_store("variables", variable) + return variable + + def add_weight(self, shape=(), initializer=None, dtype=None, name=None): + # Backwards compatibility alias + return self.add_variable( + shape=shape, initializer=initializer, dtype=dtype, name=name + ) + + @property + def variables(self): + variables = list(self._variables) + for metric in self._metrics: + variables.extend(metric.variables) + return variables + + def __call__(self, *args, **kwargs): + self._check_super_called() + self.update_state(*args, **kwargs) + return self.result() + + def get_config(self): + """Return the serializable config of the metric.""" + return {"name": self.name, "dtype": self.dtype} + + @classmethod + def from_config(cls, config): + return cls(**config) + + def __setattr__(self, name, value): + # Track Variables, Layers, Metrics + if hasattr(self, "_tracker"): + value = self._tracker.track(value) + return super().__setattr__(name, value) + + def _check_super_called(self): + if not hasattr(self, "_tracker"): + raise RuntimeError( + "You forgot to call `super().__init__()` " + "in the `__init__()` method. Go add it!" + ) + + def __repr__(self): + return f"<{self.__class__.__name__} " f"name={self.name}>" + + def __str__(self): + return self.__repr__() diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/metrics_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/metrics_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..09989ab10b525c490411403d0282bbab97142f97 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/metrics_utils.py @@ -0,0 +1,683 @@ +from enum import Enum + +import numpy as np + +from keras.src import backend +from keras.src import ops +from keras.src.losses.loss import squeeze_or_expand_to_same_rank +from keras.src.utils.python_utils import to_list + +NEG_INF = -1e10 + + +def assert_thresholds_range(thresholds): + if thresholds is not None: + invalid_thresholds = [ + t for t in thresholds if t is None or t < 0 or t > 1 + ] + if invalid_thresholds: + raise ValueError( + "Threshold values must be in [0, 1]. " + f"Received: {invalid_thresholds}" + ) + + +def parse_init_thresholds(thresholds, default_threshold=0.5): + if thresholds is not None: + assert_thresholds_range(to_list(thresholds)) + thresholds = to_list( + default_threshold if thresholds is None else thresholds + ) + return thresholds + + +class ConfusionMatrix(Enum): + TRUE_POSITIVES = "tp" + FALSE_POSITIVES = "fp" + TRUE_NEGATIVES = "tn" + FALSE_NEGATIVES = "fn" + + +class AUCCurve(Enum): + """Type of AUC Curve (ROC or PR).""" + + ROC = "ROC" + PR = "PR" + + @staticmethod + def from_str(key): + if key in ("pr", "PR"): + return AUCCurve.PR + elif key in ("roc", "ROC"): + return AUCCurve.ROC + else: + raise ValueError( + f'Invalid AUC curve value: "{key}". ' + 'Expected values are ["PR", "ROC"]' + ) + + +class AUCSummationMethod(Enum): + """Type of AUC summation method. + + https://en.wikipedia.org/wiki/Riemann_sum) + + Contains the following values: + * 'interpolation': Applies mid-point summation scheme for `ROC` curve. For + `PR` curve, interpolates (true/false) positives but not the ratio that is + precision (see Davis & Goadrich 2006 for details). + * 'minoring': Applies left summation for increasing intervals and right + summation for decreasing intervals. + * 'majoring': Applies right summation for increasing intervals and left + summation for decreasing intervals. + """ + + INTERPOLATION = "interpolation" + MAJORING = "majoring" + MINORING = "minoring" + + @staticmethod + def from_str(key): + if key in ("interpolation", "Interpolation"): + return AUCSummationMethod.INTERPOLATION + elif key in ("majoring", "Majoring"): + return AUCSummationMethod.MAJORING + elif key in ("minoring", "Minoring"): + return AUCSummationMethod.MINORING + else: + raise ValueError( + f'Invalid AUC summation method value: "{key}". ' + 'Expected values are ["interpolation", "majoring", "minoring"]' + ) + + +def _update_confusion_matrix_variables_optimized( + variables_to_update, + y_true, + y_pred, + thresholds, + multi_label=False, + sample_weights=None, + label_weights=None, + thresholds_with_epsilon=False, +): + """Update confusion matrix variables with memory efficient alternative. + + Note that the thresholds need to be evenly distributed within the list, eg, + the diff between consecutive elements are the same. + + To compute TP/FP/TN/FN, we are measuring a binary classifier + C(t) = (predictions >= t) + at each threshold 't'. So we have + TP(t) = sum( C(t) * true_labels ) + FP(t) = sum( C(t) * false_labels ) + + But, computing C(t) requires computation for each t. To make it fast, + observe that C(t) is a cumulative integral, and so if we have + thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1} + where n = num_thresholds, and if we can compute the bucket function + B(i) = Sum( (predictions == t), t_i <= t < t{i+1} ) + then we get + C(t_i) = sum( B(j), j >= i ) + which is the reversed cumulative sum in ops.cumsum(). + + We can compute B(i) efficiently by taking advantage of the fact that + our thresholds are evenly distributed, in that + width = 1.0 / (num_thresholds - 1) + thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0] + Given a prediction value p, we can map it to its bucket by + bucket_index(p) = floor( p * (num_thresholds - 1) ) + so we can use ops.segment_sum() to update the buckets in one pass. + + Consider following example: + y_true = [0, 0, 1, 1] + y_pred = [0.1, 0.5, 0.3, 0.9] + thresholds = [0.0, 0.5, 1.0] + num_buckets = 2 # [0.0, 1.0], (1.0, 2.0] + bucket_index(y_pred) = ops.floor(y_pred * num_buckets) + = ops.floor([0.2, 1.0, 0.6, 1.8]) + = [0, 0, 0, 1] + # The meaning of this bucket is that if any of the label is true, + # then 1 will be added to the corresponding bucket with the index. + # Eg, if the label for 0.2 is true, then 1 will be added to bucket 0. If the + # label for 1.8 is true, then 1 will be added to bucket 1. + # + # Note the second item "1.0" is floored to 0, since the value need to be + # strictly larger than the bucket lower bound. + # In the implementation, we use ops.ceil() - 1 to achieve this. + tp_bucket_value = ops.segment_sum(true_labels, bucket_indices, + num_segments=num_thresholds) + = [1, 1, 0] + # For [1, 1, 0] here, it means there is 1 true value contributed by bucket + # 0, and 1 value contributed by bucket 1. When we aggregate them to + # together, the result become [a + b + c, b + c, c], since large thresholds + # will always contribute to the value for smaller thresholds. + true_positive = ops.cumsum(tp_bucket_value, reverse=True) + = [2, 1, 0] + + This implementation exhibits a run time and space complexity of O(T + N), + where T is the number of thresholds and N is the size of predictions. + Metrics that rely on standard implementation instead exhibit a complexity of + O(T * N). + + Args: + variables_to_update: Dictionary with 'tp', 'fn', 'tn', 'fp' as valid + keys and corresponding variables to update as values. + y_true: A floating point `Tensor` whose shape matches `y_pred`. Will be + cast to `bool`. + y_pred: A floating point `Tensor` of arbitrary shape and whose values + are in the range `[0, 1]`. + thresholds: A sorted floating point `Tensor` with value in `[0, 1]`. + It need to be evenly distributed (the diff between each element need + to be the same). + multi_label: Optional boolean indicating whether multidimensional + prediction/labels should be treated as multilabel responses, or + flattened into a single label. When True, the values of + `variables_to_update` must have a second dimension equal to the + number of labels in y_true and y_pred, and those tensors must not be + RaggedTensors. + sample_weights: Optional `Tensor` whose rank is either 0, or the same + rank as `y_true`, and must be broadcastable to `y_true` (i.e., all + dimensions must be either `1`, or the same as the corresponding + `y_true` dimension). + label_weights: Optional tensor of non-negative weights for multilabel + data. The weights are applied when calculating TP, FP, FN, and TN + without explicit multilabel handling (i.e. when the data is to be + flattened). + thresholds_with_epsilon: Optional boolean indicating whether the leading + and tailing thresholds has any epsilon added for floating point + imprecisions. It will change how we handle the leading and tailing + bucket. + """ + num_thresholds = ops.shape(thresholds)[0] + + if sample_weights is None: + sample_weights = 1.0 + else: + sample_weights = ops.broadcast_to( + ops.cast(sample_weights, dtype=y_pred.dtype), ops.shape(y_pred) + ) + if not multi_label: + sample_weights = ops.reshape(sample_weights, [-1]) + if label_weights is None: + label_weights = 1.0 + else: + label_weights = ops.expand_dims(label_weights, 0) + label_weights = ops.broadcast_to(label_weights, ops.shape(y_pred)) + if not multi_label: + label_weights = ops.reshape(label_weights, [-1]) + weights = ops.cast( + ops.multiply(sample_weights, label_weights), y_true.dtype + ) + + # We shouldn't need this, but in case there are predict value that is out of + # the range of [0.0, 1.0] + y_pred = ops.clip(y_pred, x_min=0.0, x_max=1.0) + + y_true = ops.cast(ops.cast(y_true, "bool"), y_true.dtype) + if not multi_label: + y_true = ops.reshape(y_true, [-1]) + y_pred = ops.reshape(y_pred, [-1]) + + true_labels = ops.multiply(y_true, weights) + false_labels = ops.multiply((1.0 - y_true), weights) + + # Compute the bucket indices for each prediction value. + # Since the predict value has to be strictly greater than the thresholds, + # eg, buckets like [0, 0.5], (0.5, 1], and 0.5 belongs to first bucket. + # We have to use math.ceil(val) - 1 for the bucket. + bucket_indices = ( + ops.ceil(y_pred * (ops.cast(num_thresholds, dtype=y_pred.dtype) - 1)) + - 1 + ) + + if thresholds_with_epsilon: + # In this case, the first bucket should actually take into account since + # the any prediction between [0.0, 1.0] should be larger than the first + # threshold. We change the bucket value from -1 to 0. + bucket_indices = ops.relu(bucket_indices) + + bucket_indices = ops.cast(bucket_indices, "int32") + + if multi_label: + # We need to run bucket segment sum for each of the label class. In the + # multi_label case, the rank of the label is 2. We first transpose it so + # that the label dim becomes the first and we can parallel run though + # them. + true_labels = ops.transpose(true_labels) + false_labels = ops.transpose(false_labels) + bucket_indices = ops.transpose(bucket_indices) + + def gather_bucket(label_and_bucket_index): + label, bucket_index = ( + label_and_bucket_index[0], + label_and_bucket_index[1], + ) + return ops.segment_sum( + data=label, + segment_ids=bucket_index, + num_segments=num_thresholds, + ) + + tp_bucket_v = backend.vectorized_map( + gather_bucket, + (true_labels, bucket_indices), + ) + fp_bucket_v = backend.vectorized_map( + gather_bucket, (false_labels, bucket_indices) + ) + tp = ops.transpose(ops.flip(ops.cumsum(ops.flip(tp_bucket_v), axis=1))) + fp = ops.transpose(ops.flip(ops.cumsum(ops.flip(fp_bucket_v), axis=1))) + else: + tp_bucket_v = ops.segment_sum( + data=true_labels, + segment_ids=bucket_indices, + num_segments=num_thresholds, + ) + fp_bucket_v = ops.segment_sum( + data=false_labels, + segment_ids=bucket_indices, + num_segments=num_thresholds, + ) + tp = ops.flip(ops.cumsum(ops.flip(tp_bucket_v))) + fp = ops.flip(ops.cumsum(ops.flip(fp_bucket_v))) + + # fn = sum(true_labels) - tp + # tn = sum(false_labels) - fp + if ( + ConfusionMatrix.TRUE_NEGATIVES in variables_to_update + or ConfusionMatrix.FALSE_NEGATIVES in variables_to_update + ): + if multi_label: + total_true_labels = ops.sum(true_labels, axis=1) + total_false_labels = ops.sum(false_labels, axis=1) + else: + total_true_labels = ops.sum(true_labels) + total_false_labels = ops.sum(false_labels) + + if ConfusionMatrix.TRUE_POSITIVES in variables_to_update: + variable = variables_to_update[ConfusionMatrix.TRUE_POSITIVES] + variable.assign(variable + tp) + if ConfusionMatrix.FALSE_POSITIVES in variables_to_update: + variable = variables_to_update[ConfusionMatrix.FALSE_POSITIVES] + variable.assign(variable + fp) + if ConfusionMatrix.TRUE_NEGATIVES in variables_to_update: + variable = variables_to_update[ConfusionMatrix.TRUE_NEGATIVES] + tn = total_false_labels - fp + variable.assign(variable + tn) + if ConfusionMatrix.FALSE_NEGATIVES in variables_to_update: + variable = variables_to_update[ConfusionMatrix.FALSE_NEGATIVES] + fn = total_true_labels - tp + variable.assign(variable + fn) + + +def is_evenly_distributed_thresholds(thresholds): + """Check if the thresholds list is evenly distributed. + + We could leverage evenly distributed thresholds to use less memory when + calculate metrcis like AUC where each individual threshold need to be + evaluated. + + Args: + thresholds: A python list or tuple, or 1D numpy array whose value is + ranged in [0, 1]. + + Returns: + boolean, whether the values in the inputs are evenly distributed. + """ + # Check the list value and see if it is evenly distributed. + num_thresholds = len(thresholds) + if num_thresholds < 3: + return False + even_thresholds = np.arange(num_thresholds, dtype=np.float32) / ( + num_thresholds - 1 + ) + return np.allclose(thresholds, even_thresholds, atol=backend.epsilon()) + + +def update_confusion_matrix_variables( + variables_to_update, + y_true, + y_pred, + thresholds, + top_k=None, + class_id=None, + sample_weight=None, + multi_label=False, + label_weights=None, + thresholds_distributed_evenly=False, +): + """Updates the given confusion matrix variables. + + For every pair of values in y_true and y_pred: + + true_positive: y_true == True and y_pred > thresholds + false_negatives: y_true == True and y_pred <= thresholds + true_negatives: y_true == False and y_pred <= thresholds + false_positive: y_true == False and y_pred > thresholds + + The results will be weighted and added together. When multiple thresholds + are provided, we will repeat the same for every threshold. + + For estimation of these metrics over a stream of data, the function creates + an `update_op` operation that updates the given variables. + + If `sample_weight` is `None`, weights default to 1. + Use weights of 0 to mask values. + + Args: + variables_to_update: Dictionary with 'tp', 'fn', 'tn', 'fp' as valid keys + and corresponding variables to update as values. + y_true: A `Tensor` whose shape matches `y_pred`. Will be cast to `bool`. + y_pred: A floating point `Tensor` of arbitrary shape and whose values are + in the range `[0, 1]`. + thresholds: A float value, float tensor, python list, or tuple of float + thresholds in `[0, 1]`, or NEG_INF (used when top_k is set). + top_k: Optional int, indicates that the positive labels should be limited + to the top k predictions. + class_id: Optional int, limits the prediction and labels to the class + specified by this argument. + sample_weight: Optional `Tensor` whose rank is either 0, or the same rank + as `y_true`, and must be broadcastable to `y_true` (i.e., all dimensions + must be either `1`, or the same as the corresponding `y_true` + dimension). + multi_label: Optional boolean indicating whether multidimensional + prediction/labels should be treated as multilabel responses, or + flattened into a single label. When True, the values of + `variables_to_update` must have a second dimension equal to the number + of labels in y_true and y_pred, and those tensors must not be + RaggedTensors. + label_weights: (optional) tensor of non-negative weights for multilabel + data. The weights are applied when calculating TP, FP, FN, and TN + without explicit multilabel handling (i.e. when the data is to be + flattened). + thresholds_distributed_evenly: Boolean, whether the thresholds are evenly + distributed within the list. An optimized method will be used if this is + the case. See _update_confusion_matrix_variables_optimized() for more + details. + + Raises: + ValueError: If `y_pred` and `y_true` have mismatched shapes, or if + `sample_weight` is not `None` and its shape doesn't match `y_pred`, or + if `variables_to_update` contains invalid keys. + """ + if multi_label and label_weights is not None: + raise ValueError( + "`label_weights` for multilabel data should be handled " + "outside of `update_confusion_matrix_variables` when " + "`multi_label` is True." + ) + if variables_to_update is None: + return + if not any( + key for key in variables_to_update if key in list(ConfusionMatrix) + ): + raise ValueError( + "Please provide at least one valid confusion matrix " + "variable to update. Valid variable key options are: " + f'"{list(ConfusionMatrix)}". ' + f'Received: "{variables_to_update.keys()}"' + ) + + variable_dtype = list(variables_to_update.values())[0].dtype + + y_true = ops.cast(y_true, dtype=variable_dtype) + y_pred = ops.cast(y_pred, dtype=variable_dtype) + + if thresholds_distributed_evenly: + # Check whether the thresholds has any leading or tailing epsilon added + # for floating point imprecision. The leading and tailing threshold will + # be handled bit differently as the corner case. At this point, + # thresholds should be a list/array with more than 2 items, and ranged + # between [0, 1]. See is_evenly_distributed_thresholds() for more + # details. + thresholds_with_epsilon = thresholds[0] < 0.0 or thresholds[-1] > 1.0 + + thresholds = ops.convert_to_tensor(thresholds, dtype=variable_dtype) + num_thresholds = ops.shape(thresholds)[0] + + if multi_label: + one_thresh = ops.equal( + np.array(1, dtype="int32"), + len(thresholds.shape), + ) + else: + one_thresh = np.array(True, dtype="bool") + + invalid_keys = [ + key for key in variables_to_update if key not in list(ConfusionMatrix) + ] + if invalid_keys: + raise ValueError( + f'Invalid keys: "{invalid_keys}". ' + f'Valid variable key options are: "{list(ConfusionMatrix)}"' + ) + + y_pred, y_true = squeeze_or_expand_to_same_rank(y_pred, y_true) + if sample_weight is not None: + sample_weight = ops.expand_dims( + ops.cast(sample_weight, dtype=variable_dtype), axis=-1 + ) + _, sample_weight = squeeze_or_expand_to_same_rank( + y_true, sample_weight, expand_rank_1=False + ) + + if top_k is not None: + y_pred = _filter_top_k(y_pred, top_k) + + if class_id is not None: + if len(y_pred.shape) == 1: + raise ValueError( + "When class_id is provided, y_pred must be a 2D array " + "with shape (num_samples, num_classes), found shape: " + f"{y_pred.shape}" + ) + + # Preserve dimension to match with sample_weight + y_true = y_true[..., class_id, None] + y_pred = y_pred[..., class_id, None] + + if thresholds_distributed_evenly: + return _update_confusion_matrix_variables_optimized( + variables_to_update, + y_true, + y_pred, + thresholds, + multi_label=multi_label, + sample_weights=sample_weight, + label_weights=label_weights, + thresholds_with_epsilon=thresholds_with_epsilon, + ) + + if None in y_pred.shape: + pred_shape = ops.shape(y_pred) + num_predictions = pred_shape[0] + if len(y_pred.shape) == 1: + num_labels = 1 + else: + num_labels = ops.cast( + ops.prod(ops.array(pred_shape[1:]), axis=0), "int32" + ) + thresh_label_tile = ops.where(one_thresh, num_labels, 1) + else: + pred_shape = ops.shape(y_pred) + num_predictions = pred_shape[0] + if len(y_pred.shape) == 1: + num_labels = 1 + else: + num_labels = np.prod(pred_shape[1:], axis=0).astype("int32") + thresh_label_tile = np.where(one_thresh, num_labels, 1) + + # Reshape predictions and labels, adding a dim for thresholding. + if multi_label: + predictions_extra_dim = ops.expand_dims(y_pred, 0) + labels_extra_dim = ops.expand_dims(ops.cast(y_true, dtype="bool"), 0) + else: + # Flatten predictions and labels when not multilabel. + predictions_extra_dim = ops.reshape(y_pred, [1, -1]) + labels_extra_dim = ops.reshape(ops.cast(y_true, dtype="bool"), [1, -1]) + + # Tile the thresholds for every prediction. + if multi_label: + thresh_pretile_shape = [num_thresholds, 1, -1] + thresh_tiles = [1, num_predictions, thresh_label_tile] + data_tiles = [num_thresholds, 1, 1] + else: + thresh_pretile_shape = [num_thresholds, -1] + thresh_tiles = [1, num_predictions * num_labels] + data_tiles = [num_thresholds, 1] + + thresh_tiled = ops.tile( + ops.reshape(thresholds, thresh_pretile_shape), thresh_tiles + ) + + # Tile the predictions for every threshold. + preds_tiled = ops.tile(predictions_extra_dim, data_tiles) + + # Compare predictions and threshold. + pred_is_pos = ops.greater(preds_tiled, thresh_tiled) + + # Tile labels by number of thresholds + label_is_pos = ops.tile(labels_extra_dim, data_tiles) + + if sample_weight is not None: + sample_weight = ops.broadcast_to( + ops.cast(sample_weight, dtype=y_pred.dtype), ops.shape(y_pred) + ) + weights_tiled = ops.tile( + ops.reshape(sample_weight, thresh_tiles), data_tiles + ) + else: + weights_tiled = None + + if label_weights is not None and not multi_label: + label_weights = ops.expand_dims(label_weights, 0) + label_weights = ops.broadcast_to(label_weights, ops.shape(y_pred)) + label_weights_tiled = ops.tile( + ops.reshape(label_weights, thresh_tiles), data_tiles + ) + if weights_tiled is None: + weights_tiled = label_weights_tiled + else: + weights_tiled = ops.multiply(weights_tiled, label_weights_tiled) + + def weighted_assign_add(label, pred, weights, var): + label_and_pred = ops.cast(ops.logical_and(label, pred), dtype=var.dtype) + if weights is not None: + label_and_pred *= ops.cast(weights, dtype=var.dtype) + var.assign(var + ops.sum(label_and_pred, 1)) + + loop_vars = { + ConfusionMatrix.TRUE_POSITIVES: (label_is_pos, pred_is_pos), + } + update_tn = ConfusionMatrix.TRUE_NEGATIVES in variables_to_update + update_fp = ConfusionMatrix.FALSE_POSITIVES in variables_to_update + update_fn = ConfusionMatrix.FALSE_NEGATIVES in variables_to_update + + if update_fn or update_tn: + pred_is_neg = ops.logical_not(pred_is_pos) + loop_vars[ConfusionMatrix.FALSE_NEGATIVES] = (label_is_pos, pred_is_neg) + + if update_fp or update_tn: + label_is_neg = ops.logical_not(label_is_pos) + loop_vars[ConfusionMatrix.FALSE_POSITIVES] = (label_is_neg, pred_is_pos) + if update_tn: + loop_vars[ConfusionMatrix.TRUE_NEGATIVES] = ( + label_is_neg, + pred_is_neg, + ) + + for matrix_cond, (label, pred) in loop_vars.items(): + if matrix_cond in variables_to_update: + weighted_assign_add( + label, pred, weights_tiled, variables_to_update[matrix_cond] + ) + + +def _filter_top_k(x, k): + """Filters top-k values in the last dim of x and set the rest to NEG_INF. + + Used for computing top-k prediction values in dense labels (which has the + same shape as predictions) for recall and precision top-k metrics. + + Args: + x: tensor with any dimensions. + k: the number of values to keep. + + Returns: + tensor with same shape and dtype as x. + """ + _, top_k_idx = ops.top_k(x, k) + top_k_mask = ops.sum( + ops.one_hot(top_k_idx, ops.shape(x)[-1], axis=-1), axis=-2 + ) + return x * top_k_mask + NEG_INF * (1 - top_k_mask) + + +def confusion_matrix( + labels, + predictions, + num_classes, + weights=None, + dtype="int32", +): + """Computes the confusion matrix from predictions and labels. + + The matrix columns represent the prediction labels and the rows represent + the real labels. The confusion matrix is always a 2-D array of shape + `(n, n)`, where `n` is the number of valid labels for a given classification + task. Both prediction and labels must be 1-D arrays of the same shape in + order for this function to work. + + If `num_classes` is `None`, then `num_classes` will be set to one plus the + maximum value in either predictions or labels. Class labels are expected to + start at 0. For example, if `num_classes` is 3, then the possible labels + would be `[0, 1, 2]`. + + If `weights` is not `None`, then each prediction contributes its + corresponding weight to the total value of the confusion matrix cell. + + For example: + + ```python + keras.metrics.metrics_utils.confusion_matrix([1, 2, 4], [2, 2, 4]) ==> + [[0 0 0 0 0] + [0 0 1 0 0] + [0 0 1 0 0] + [0 0 0 0 0] + [0 0 0 0 1]] + ``` + + Note that the possible labels are assumed to be `[0, 1, 2, 3, 4]`, + resulting in a 5x5 confusion matrix. + + Args: + labels: 1-D tensor of real labels for the classification task. + predictions: 1-D tensor of predictions for a given classification. + num_classes: The possible number of labels the classification + task can have. + weights: An optional tensor whose shape matches `predictions`. + dtype: Data type of the confusion matrix. + + Returns: + A tensor of type `dtype` with shape `(n, n)` representing the confusion + matrix, where `n` is the number of possible labels in the classification + task. + """ + labels = ops.convert_to_tensor(labels, dtype) + predictions = ops.convert_to_tensor(predictions, dtype) + labels, predictions = squeeze_or_expand_to_same_rank(labels, predictions) + + predictions = ops.cast(predictions, dtype) + labels = ops.cast(labels, dtype) + + if weights is not None: + weights = ops.convert_to_tensor(weights, dtype) + + indices = ops.stack([labels, predictions], axis=1) + values = ops.ones_like(predictions, dtype) if weights is None else weights + indices = ops.cast(indices, dtype="int64") + values = ops.cast(values, dtype=dtype) + num_classes = int(num_classes) + confusion_matrix = ops.scatter(indices, values, (num_classes, num_classes)) + return confusion_matrix diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/probabilistic_metrics.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/probabilistic_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..2f719d84630e373a2baee5f50c1e15989bd1730b --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/probabilistic_metrics.py @@ -0,0 +1,339 @@ +from keras.src.api_export import keras_export +from keras.src.losses.losses import binary_crossentropy +from keras.src.losses.losses import categorical_crossentropy +from keras.src.losses.losses import kl_divergence +from keras.src.losses.losses import poisson +from keras.src.losses.losses import sparse_categorical_crossentropy +from keras.src.metrics import reduction_metrics + + +@keras_export("keras.metrics.KLDivergence") +class KLDivergence(reduction_metrics.MeanMetricWrapper): + """Computes Kullback-Leibler divergence metric between `y_true` and + `y_pred`. + + Formula: + + ```python + metric = y_true * log(y_true / y_pred) + ``` + + `y_true` and `y_pred` are expected to be probability + distributions, with values between 0 and 1. They will get + clipped to the `[0, 1]` range. + + Args: + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Examples: + + >>> m = keras.metrics.KLDivergence() + >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) + >>> m.result() + 0.45814306 + + >>> m.reset_state() + >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], + ... sample_weight=[1, 0]) + >>> m.result() + 0.9162892 + + Usage with `compile()` API: + + ```python + model.compile(optimizer='sgd', + loss='mse', + metrics=[keras.metrics.KLDivergence()]) + ``` + """ + + def __init__(self, name="kl_divergence", dtype=None): + super().__init__(fn=kl_divergence, name=name, dtype=dtype) + + def get_config(self): + return {"name": self.name, "dtype": self.dtype} + + +@keras_export("keras.metrics.Poisson") +class Poisson(reduction_metrics.MeanMetricWrapper): + """Computes the Poisson metric between `y_true` and `y_pred`. + + Formula: + + ```python + metric = y_pred - y_true * log(y_pred) + ``` + + Args: + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Examples: + + >>> m = keras.metrics.Poisson() + >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) + >>> m.result() + 0.49999997 + + >>> m.reset_state() + >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], + ... sample_weight=[1, 0]) + >>> m.result() + 0.99999994 + + Usage with `compile()` API: + + ```python + model.compile(optimizer='sgd', + loss='mse', + metrics=[keras.metrics.Poisson()]) + ``` + """ + + def __init__(self, name="poisson", dtype=None): + super().__init__(fn=poisson, name=name, dtype=dtype) + + def get_config(self): + return {"name": self.name, "dtype": self.dtype} + + +@keras_export("keras.metrics.BinaryCrossentropy") +class BinaryCrossentropy(reduction_metrics.MeanMetricWrapper): + """Computes the crossentropy metric between the labels and predictions. + + This is the crossentropy metric class to be used when there are only two + label classes (0 and 1). + + Args: + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + from_logits: (Optional) Whether output is expected + to be a logits tensor. By default, we consider + that output encodes a probability distribution. + label_smoothing: (Optional) Float in `[0, 1]`. + When > 0, label values are smoothed, + meaning the confidence on label values are relaxed. + e.g. `label_smoothing=0.2` means that we will use + a value of 0.1 for label "0" and 0.9 for label "1". + + Examples: + + >>> m = keras.metrics.BinaryCrossentropy() + >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) + >>> m.result() + 0.81492424 + + >>> m.reset_state() + >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], + ... sample_weight=[1, 0]) + >>> m.result() + 0.9162905 + + Usage with `compile()` API: + + ```python + model.compile( + optimizer='sgd', + loss='mse', + metrics=[keras.metrics.BinaryCrossentropy()]) + ``` + """ + + def __init__( + self, + name="binary_crossentropy", + dtype=None, + from_logits=False, + label_smoothing=0, + ): + super().__init__( + binary_crossentropy, + name, + dtype=dtype, + from_logits=from_logits, + label_smoothing=label_smoothing, + ) + self.from_logits = from_logits + self.label_smoothing = label_smoothing + # Metric should be minimized during optimization. + self._direction = "down" + + def get_config(self): + return { + "name": self.name, + "dtype": self.dtype, + "from_logits": self.from_logits, + "label_smoothing": self.label_smoothing, + } + + +@keras_export("keras.metrics.CategoricalCrossentropy") +class CategoricalCrossentropy(reduction_metrics.MeanMetricWrapper): + """Computes the crossentropy metric between the labels and predictions. + + This is the crossentropy metric class to be used when there are multiple + label classes (2 or more). It assumes that labels are one-hot encoded, + e.g., when labels values are `[2, 0, 1]`, then + `y_true` is `[[0, 0, 1], [1, 0, 0], [0, 1, 0]]`. + + Args: + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + from_logits: (Optional) Whether output is expected to be + a logits tensor. By default, we consider that output + encodes a probability distribution. + label_smoothing: (Optional) Float in `[0, 1]`. + When > 0, label values are smoothed, meaning the confidence + on label values are relaxed. e.g. `label_smoothing=0.2` means + that we will use a value of 0.1 for label + "0" and 0.9 for label "1". + axis: (Optional) Defaults to `-1`. + The dimension along which entropy is computed. + + Examples: + + >>> # EPSILON = 1e-7, y = y_true, y` = y_pred + >>> # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON) + >>> # y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]] + >>> # xent = -sum(y * log(y'), axis = -1) + >>> # = -((log 0.95), (log 0.1)) + >>> # = [0.051, 2.302] + >>> # Reduced xent = (0.051 + 2.302) / 2 + >>> m = keras.metrics.CategoricalCrossentropy() + >>> m.update_state([[0, 1, 0], [0, 0, 1]], + ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) + >>> m.result() + 1.1769392 + + >>> m.reset_state() + >>> m.update_state([[0, 1, 0], [0, 0, 1]], + ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]], + ... sample_weight=np.array([0.3, 0.7])) + >>> m.result() + 1.6271976 + + Usage with `compile()` API: + + ```python + model.compile( + optimizer='sgd', + loss='mse', + metrics=[keras.metrics.CategoricalCrossentropy()]) + ``` + """ + + def __init__( + self, + name="categorical_crossentropy", + dtype=None, + from_logits=False, + label_smoothing=0, + axis=-1, + ): + super().__init__( + categorical_crossentropy, + name, + dtype=dtype, + from_logits=from_logits, + label_smoothing=label_smoothing, + axis=axis, + ) + self.from_logits = from_logits + self.label_smoothing = label_smoothing + self.axis = axis + # Metric should be minimized during optimization. + self._direction = "down" + + def get_config(self): + return { + "name": self.name, + "dtype": self.dtype, + "from_logits": self.from_logits, + "label_smoothing": self.label_smoothing, + "axis": self.axis, + } + + +@keras_export("keras.metrics.SparseCategoricalCrossentropy") +class SparseCategoricalCrossentropy(reduction_metrics.MeanMetricWrapper): + """Computes the crossentropy metric between the labels and predictions. + + Use this crossentropy metric when there are two or more label classes. + It expects labels to be provided as integers. If you want to provide labels + that are one-hot encoded, please use the `CategoricalCrossentropy` + metric instead. + + There should be `num_classes` floating point values per feature for `y_pred` + and a single floating point value per feature for `y_true`. + + Args: + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + from_logits: (Optional) Whether output is expected + to be a logits tensor. By default, we consider that output + encodes a probability distribution. + axis: (Optional) Defaults to `-1`. + The dimension along which entropy is computed. + + Examples: + + >>> # y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]] + >>> # logits = log(y_pred) + >>> # softmax = exp(logits) / sum(exp(logits), axis=-1) + >>> # softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]] + >>> # xent = -sum(y * log(softmax), 1) + >>> # log(softmax) = [[-2.9957, -0.0513, -16.1181], + >>> # [-2.3026, -0.2231, -2.3026]] + >>> # y_true * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]] + >>> # xent = [0.0513, 2.3026] + >>> # Reduced xent = (0.0513 + 2.3026) / 2 + >>> m = keras.metrics.SparseCategoricalCrossentropy() + >>> m.update_state([1, 2], + ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) + >>> m.result() + 1.1769392 + + >>> m.reset_state() + >>> m.update_state([1, 2], + ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]], + ... sample_weight=np.array([0.3, 0.7])) + >>> m.result() + 1.6271976 + + Usage with `compile()` API: + + ```python + model.compile( + optimizer='sgd', + loss='mse', + metrics=[keras.metrics.SparseCategoricalCrossentropy()]) + ``` + """ + + def __init__( + self, + name="sparse_categorical_crossentropy", + dtype=None, + from_logits=False, + axis=-1, + ): + super().__init__( + sparse_categorical_crossentropy, + name=name, + dtype=dtype, + from_logits=from_logits, + axis=axis, + ) + self.from_logits = from_logits + self.axis = axis + # Metric should be minimized during optimization. + self._direction = "down" + + def get_config(self): + return { + "name": self.name, + "dtype": self.dtype, + "from_logits": self.from_logits, + "axis": self.axis, + } diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/reduction_metrics.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/reduction_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..3dde46f958351d52d98df92350e9ed30ea2a2e23 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/reduction_metrics.py @@ -0,0 +1,220 @@ +from keras.src import backend +from keras.src import initializers +from keras.src import losses +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.metrics.metric import Metric +from keras.src.saving import serialization_lib + + +def reduce_to_samplewise_values(values, sample_weight, reduce_fn, dtype): + dtype = dtype or backend.floatx() + mask = backend.get_keras_mask(values) + values = ops.cast(values, dtype=dtype) + if sample_weight is not None: + sample_weight = ops.convert_to_tensor(sample_weight, dtype=dtype) + + if mask is not None: + sample_weight = losses.loss.apply_mask( + sample_weight, mask, dtype=dtype, reduction="sum" + ) + # Update dimensions of weights to match with values if possible. + values, sample_weight = losses.loss.squeeze_or_expand_to_same_rank( + values, sample_weight + ) + # Reduce values to same ndim as weight array. + weight_ndim = len(sample_weight.shape) + values_ndim = len(values.shape) + if values_ndim > weight_ndim: + values = reduce_fn( + values, axis=list(range(weight_ndim, values_ndim)) + ) + # Broadcast sample_weight. It doesn't change the multiplication below + # but changes the sample_weight reduction applied later. + sample_weight = ops.broadcast_to(sample_weight, ops.shape(values)) + values = values * sample_weight + if weight_ndim > 1: + sample_weight = reduce_fn( + sample_weight, axis=list(range(1, weight_ndim)) + ) + + values_ndim = len(values.shape) + if values_ndim > 1: + values = reduce_fn(values, axis=list(range(1, values_ndim))) + return values, sample_weight + + +@keras_export("keras.metrics.Sum") +class Sum(Metric): + """Compute the (weighted) sum of the given values. + + For example, if `values` is `[1, 3, 5, 7]` then their sum is 16. + If `sample_weight` was specified as `[1, 1, 0, 0]` then the sum would be 4. + + This metric creates one variable, `total`. + This is ultimately returned as the sum value. + + Args: + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Example: + + >>> m = metrics.Sum() + >>> m.update_state([1, 3, 5, 7]) + >>> m.result() + 16.0 + + >>> m = metrics.Sum() + >>> m.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0]) + >>> m.result() + 4.0 + """ + + def __init__(self, name="sum", dtype=None): + super().__init__(name=name, dtype=dtype) + self.total = self.add_variable( + shape=(), + initializer=initializers.Zeros(), + dtype=self.dtype, + name="total", + ) + + def update_state(self, values, sample_weight=None): + values, _ = reduce_to_samplewise_values( + values, sample_weight, reduce_fn=ops.sum, dtype=self.dtype + ) + self.total.assign_add(ops.sum(values)) + + def reset_state(self): + self.total.assign(0) + + def result(self): + return ops.cast(self.total, self.dtype) + + +@keras_export("keras.metrics.Mean") +class Mean(Metric): + """Compute the (weighted) mean of the given values. + + For example, if values is `[1, 3, 5, 7]` then the mean is 4. + If `sample_weight` was specified as `[1, 1, 0, 0]` then the mean would be 2. + + This metric creates two variables, `total` and `count`. + The mean value returned is simply `total` divided by `count`. + + Args: + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Example: + + >>> m = Mean() + >>> m.update_state([1, 3, 5, 7]) + >>> m.result() + 4.0 + + >>> m.reset_state() + >>> m.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0]) + >>> m.result() + 2.0 + """ + + def __init__(self, name="mean", dtype=None): + super().__init__(name=name, dtype=dtype) + self.total = self.add_variable( + shape=(), + initializer=initializers.Zeros(), + dtype=self.dtype, + name="total", + ) + self.count = self.add_variable( + shape=(), + initializer=initializers.Zeros(), + dtype=self.dtype, + name="count", + ) + + def update_state(self, values, sample_weight=None): + values, sample_weight = reduce_to_samplewise_values( + values, sample_weight, reduce_fn=ops.mean, dtype=self.dtype + ) + self.total.assign_add(ops.sum(values)) + if sample_weight is not None: + num_samples = ops.sum(sample_weight) + elif len(values.shape) >= 1: + num_samples = ops.shape(values)[0] + else: + num_samples = 1 + self.count.assign_add(ops.cast(num_samples, dtype=self.dtype)) + + def reset_state(self): + self.total.assign(0) + self.count.assign(0) + + def result(self): + return ops.divide_no_nan( + self.total, ops.cast(self.count, dtype=self.dtype) + ) + + +@keras_export("keras.metrics.MeanMetricWrapper") +class MeanMetricWrapper(Mean): + """Wrap a stateless metric function with the `Mean` metric. + + You could use this class to quickly build a mean metric from a function. The + function needs to have the signature `fn(y_true, y_pred)` and return a + per-sample loss array. `MeanMetricWrapper.result()` will return + the average metric value across all samples seen so far. + + For example: + + ```python + def mse(y_true, y_pred): + return (y_true - y_pred) ** 2 + + mse_metric = MeanMetricWrapper(fn=mse) + ``` + + Args: + fn: The metric function to wrap, with signature + `fn(y_true, y_pred, **kwargs)`. + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + **kwargs: Keyword arguments to pass on to `fn`. + """ + + def __init__(self, fn, name=None, dtype=None, **kwargs): + super().__init__(name=name, dtype=dtype) + self._fn = fn + self._fn_kwargs = kwargs + + # If we are wrapping a Keras loss, register the metric's + # direction as "down" (needs to be minimized during training). + if ( + self._fn in losses.ALL_OBJECTS + or hasattr(self._fn, "__class__") + and self._fn.__class__ in losses.ALL_OBJECTS + ): + self._direction = "down" + + def update_state(self, y_true, y_pred, sample_weight=None): + mask = backend.get_keras_mask(y_pred) + values = self._fn(y_true, y_pred, **self._fn_kwargs) + if sample_weight is not None and mask is not None: + sample_weight = losses.loss.apply_mask( + sample_weight, mask, dtype=self.dtype, reduction="sum" + ) + return super().update_state(values, sample_weight=sample_weight) + + def get_config(self): + base_config = super().get_config() + config = {"fn": serialization_lib.serialize_keras_object(self._fn)} + config.update(serialization_lib.serialize_keras_object(self._fn_kwargs)) + return {**base_config, **config} + + @classmethod + def from_config(cls, config): + if "fn" in config: + config = serialization_lib.deserialize_keras_object(config) + return cls(**config) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/regression_metrics.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/regression_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..1ec0f86c6373b94f717c3664c8ee0b9d3c6517fc --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/metrics/regression_metrics.py @@ -0,0 +1,608 @@ +import warnings + +from keras.src import initializers +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.losses.loss import squeeze_or_expand_to_same_rank +from keras.src.losses.losses import log_cosh +from keras.src.losses.losses import mean_absolute_error +from keras.src.losses.losses import mean_absolute_percentage_error +from keras.src.losses.losses import mean_squared_error +from keras.src.losses.losses import mean_squared_logarithmic_error +from keras.src.metrics import reduction_metrics +from keras.src.utils.numerical_utils import normalize + + +@keras_export("keras.metrics.MeanSquaredError") +class MeanSquaredError(reduction_metrics.MeanMetricWrapper): + """Computes the mean squared error between `y_true` and `y_pred`. + + Formula: + + ```python + loss = mean(square(y_true - y_pred)) + ``` + + Args: + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Example: + >>> m = keras.metrics.MeanSquaredError() + >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) + >>> m.result() + 0.25 + """ + + def __init__(self, name="mean_squared_error", dtype=None): + super().__init__(fn=mean_squared_error, name=name, dtype=dtype) + # Metric should be minimized during optimization. + self._direction = "down" + + def get_config(self): + return {"name": self.name, "dtype": self.dtype} + + +@keras_export("keras.metrics.MeanAbsoluteError") +class MeanAbsoluteError(reduction_metrics.MeanMetricWrapper): + """Computes the mean absolute error between the labels and predictions. + + Formula: + + ```python + loss = mean(abs(y_true - y_pred)) + ``` + + Args: + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Examples: + + >>> m = keras.metrics.MeanAbsoluteError() + >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) + >>> m.result() + 0.25 + + >>> m.reset_state() + >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], + ... sample_weight=[1, 0]) + >>> m.result() + 0.5 + + Usage with `compile()` API: + + ```python + model.compile( + optimizer='sgd', + loss='mse', + metrics=[keras.metrics.MeanAbsoluteError()]) + ``` + """ + + def __init__(self, name="mean_absolute_error", dtype=None): + super().__init__(mean_absolute_error, name, dtype=dtype) + # Metric should be minimized during optimization. + self._direction = "down" + + def get_config(self): + return {"name": self.name, "dtype": self.dtype} + + +@keras_export("keras.metrics.MeanAbsolutePercentageError") +class MeanAbsolutePercentageError(reduction_metrics.MeanMetricWrapper): + """Computes mean absolute percentage error between `y_true` and `y_pred`. + + Formula: + + ```python + loss = 100 * mean(abs((y_true - y_pred) / y_true)) + ``` + + Args: + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Examples: + >>> m = keras.metrics.MeanAbsolutePercentageError() + >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) + >>> m.result() + 250000000.0 + + >>> m.reset_state() + >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], + ... sample_weight=[1, 0]) + >>> m.result() + 500000000.0 + + Usage with `compile()` API: + + ```python + model.compile( + optimizer='sgd', + loss='mse', + metrics=[keras.metrics.MeanAbsolutePercentageError()]) + ``` + """ + + def __init__(self, name="mean_absolute_percentage_error", dtype=None): + super().__init__(mean_absolute_percentage_error, name, dtype=dtype) + # Metric should be minimized during optimization. + self._direction = "down" + + def get_config(self): + return {"name": self.name, "dtype": self.dtype} + + +@keras_export("keras.metrics.MeanSquaredLogarithmicError") +class MeanSquaredLogarithmicError(reduction_metrics.MeanMetricWrapper): + """Computes mean squared logarithmic error between `y_true` and `y_pred`. + + Formula: + + ```python + loss = mean(square(log(y_true + 1) - log(y_pred + 1))) + ``` + + Args: + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Examples: + + >>> m = keras.metrics.MeanSquaredLogarithmicError() + >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) + >>> m.result() + 0.12011322 + + >>> m.reset_state() + >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], + ... sample_weight=[1, 0]) + >>> m.result() + 0.24022643 + + Usage with `compile()` API: + + ```python + model.compile( + optimizer='sgd', + loss='mse', + metrics=[keras.metrics.MeanSquaredLogarithmicError()]) + ``` + """ + + def __init__(self, name="mean_squared_logarithmic_error", dtype=None): + super().__init__(mean_squared_logarithmic_error, name, dtype=dtype) + # Metric should be minimized during optimization. + self._direction = "down" + + def get_config(self): + return {"name": self.name, "dtype": self.dtype} + + +@keras_export("keras.metrics.RootMeanSquaredError") +class RootMeanSquaredError(reduction_metrics.Mean): + """Computes root mean squared error metric between `y_true` and `y_pred`. + + Formula: + + ```python + loss = sqrt(mean((y_pred - y_true) ** 2)) + ``` + + Args: + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Examples: + + >>> m = keras.metrics.RootMeanSquaredError() + >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) + >>> m.result() + 0.5 + + >>> m.reset_state() + >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], + ... sample_weight=[1, 0]) + >>> m.result() + 0.70710677 + + Usage with `compile()` API: + + ```python + model.compile( + optimizer='sgd', + loss='mse', + metrics=[keras.metrics.RootMeanSquaredError()]) + ``` + """ + + def __init__(self, name="root_mean_squared_error", dtype=None): + super().__init__(name, dtype=dtype) + # Metric should be minimized during optimization. + self._direction = "down" + + def update_state(self, y_true, y_pred, sample_weight=None): + """Accumulates root mean squared error statistics. + + Args: + y_true: The ground truth values. + y_pred: The predicted values. + sample_weight: Optional weighting of each example. Can + be a `Tensor` whose rank is either 0, or the same rank as + `y_true`, and must be broadcastable to `y_true`. + Defaults to `1`. + + Returns: + Update op. + """ + y_true = ops.convert_to_tensor(y_true, self._dtype) + y_pred = ops.convert_to_tensor(y_pred, self._dtype) + y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred) + error_sq = ops.square(y_pred - y_true) + return super().update_state(error_sq, sample_weight=sample_weight) + + def result(self): + return ops.sqrt(super().result()) + + +@keras_export("keras.metrics.CosineSimilarity") +class CosineSimilarity(reduction_metrics.MeanMetricWrapper): + """Computes the cosine similarity between the labels and predictions. + + Formula: + + ```python + loss = sum(l2_norm(y_true) * l2_norm(y_pred)) + ``` + See: [Cosine Similarity](https://en.wikipedia.org/wiki/Cosine_similarity). + This metric keeps the average cosine similarity between `predictions` and + `labels` over a stream of data. + + Args: + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + axis: (Optional) Defaults to `-1`. The dimension along which the cosine + similarity is computed. + + Examples: + + >>> # l2_norm(y_true) = [[0., 1.], [1./1.414, 1./1.414]] + >>> # l2_norm(y_pred) = [[1., 0.], [1./1.414, 1./1.414]] + >>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]] + >>> # result = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1)) + >>> # = ((0. + 0.) + (0.5 + 0.5)) / 2 + >>> m = keras.metrics.CosineSimilarity(axis=1) + >>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]]) + >>> m.result() + 0.49999997 + + >>> m.reset_state() + >>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]], + ... sample_weight=[0.3, 0.7]) + >>> m.result() + 0.6999999 + + Usage with `compile()` API: + + ```python + model.compile( + optimizer='sgd', + loss='mse', + metrics=[keras.metrics.CosineSimilarity(axis=1)]) + ``` + """ + + def __init__(self, name="cosine_similarity", dtype=None, axis=-1): + super().__init__(cosine_similarity, name, dtype=dtype, axis=axis) + # Metric should be maximized during optimization. + self._direction = "up" + + def get_config(self): + return {"name": self.name, "dtype": self.dtype} + + +@keras_export("keras.metrics.LogCoshError") +class LogCoshError(reduction_metrics.MeanMetricWrapper): + """Computes the logarithm of the hyperbolic cosine of the prediction error. + + Formula: + + ```python + error = y_pred - y_true + logcosh = mean(log((exp(error) + exp(-error))/2), axis=-1) + ``` + + Args: + name: (Optional) string name of the metric instance. + dtype: (Optional) data type of the metric result. + + Examples: + + >>> m = keras.metrics.LogCoshError() + >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) + >>> m.result() + 0.10844523 + + >>> m.reset_state() + >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], + ... sample_weight=[1, 0]) + >>> m.result() + 0.21689045 + + Usage with `compile()` API: + + ```python + model.compile(optimizer='sgd', + loss='mse', + metrics=[keras.metrics.LogCoshError()]) + ``` + """ + + def __init__(self, name="logcosh", dtype=None): + super().__init__(log_cosh, name, dtype=dtype) + # Metric should be minimized during optimization. + self._direction = "down" + + def get_config(self): + return {"name": self.name, "dtype": self.dtype} + + +# Adapted from TF-Addons implementation (RSquare class). +@keras_export("keras.metrics.R2Score") +class R2Score(reduction_metrics.Metric): + """Computes R2 score. + + Formula: + + ```python + sum_squares_residuals = sum((y_true - y_pred) ** 2) + sum_squares = sum((y_true - mean(y_true)) ** 2) + R2 = 1 - sum_squares_residuals / sum_squares + ``` + + This is also called the + [coefficient of determination]( + https://en.wikipedia.org/wiki/Coefficient_of_determination). + + It indicates how close the fitted regression line + is to ground-truth data. + + - The highest score possible is 1.0. It indicates that the predictors + perfectly accounts for variation in the target. + - A score of 0.0 indicates that the predictors do not + account for variation in the target. + - It can also be negative if the model is worse than random. + + This metric can also compute the "Adjusted R2" score. + + Args: + class_aggregation: Specifies how to aggregate scores corresponding to + different output classes (or target dimensions), + i.e. different dimensions on the last axis of the predictions. + Equivalent to `multioutput` argument in Scikit-Learn. + Should be one of + `None` (no aggregation), `"uniform_average"`, + `"variance_weighted_average"`. + num_regressors: Number of independent regressors used + ("Adjusted R2" score). 0 is the standard R2 score. + Defaults to `0`. + name: Optional. string name of the metric instance. + dtype: Optional. data type of the metric result. + + Example: + + >>> y_true = np.array([[1], [4], [3]], dtype=np.float32) + >>> y_pred = np.array([[2], [4], [4]], dtype=np.float32) + >>> metric = keras.metrics.R2Score() + >>> metric.update_state(y_true, y_pred) + >>> result = metric.result() + >>> result + 0.57142854 + """ + + def __init__( + self, + class_aggregation="uniform_average", + num_regressors=0, + name="r2_score", + dtype=None, + ): + super().__init__(name=name, dtype=dtype) + # Metric should be maximized during optimization. + self._direction = "up" + + valid_class_aggregation_values = ( + None, + "uniform_average", + "variance_weighted_average", + ) + if class_aggregation not in valid_class_aggregation_values: + raise ValueError( + "Invalid value for argument `class_aggregation`. Expected " + f"one of {valid_class_aggregation_values}. " + f"Received: class_aggregation={class_aggregation}" + ) + if num_regressors < 0: + raise ValueError( + "Invalid value for argument `num_regressors`. " + "Expected a value >= 0. " + f"Received: num_regressors={num_regressors}" + ) + self.class_aggregation = class_aggregation + self.num_regressors = num_regressors + self.num_samples = self.add_variable( + shape=(), + initializer=initializers.Zeros(), + name="num_samples", + ) + self._built = False + + def _build(self, y_true_shape, y_pred_shape): + if len(y_pred_shape) != 2 or len(y_true_shape) != 2: + raise ValueError( + "R2Score expects 2D inputs with shape " + "(batch_size, output_dim). Received input " + f"shapes: y_pred.shape={y_pred_shape} and " + f"y_true.shape={y_true_shape}." + ) + if y_pred_shape[-1] is None or y_true_shape[-1] is None: + raise ValueError( + "R2Score expects 2D inputs with shape " + "(batch_size, output_dim), with output_dim fully " + "defined (not None). Received input " + f"shapes: y_pred.shape={y_pred_shape} and " + f"y_true.shape={y_true_shape}." + ) + num_classes = y_pred_shape[-1] + self.squared_sum = self.add_variable( + name="squared_sum", + shape=[num_classes], + initializer=initializers.Zeros(), + ) + self.sum = self.add_variable( + name="sum", + shape=[num_classes], + initializer=initializers.Zeros(), + ) + self.total_mse = self.add_variable( + name="residual", + shape=[num_classes], + initializer=initializers.Zeros(), + ) + self.count = self.add_variable( + name="count", + shape=[num_classes], + initializer=initializers.Zeros(), + ) + self._built = True + + def update_state(self, y_true, y_pred, sample_weight=None): + """Accumulates root mean squared error statistics. + + Args: + y_true: The ground truth values. + y_pred: The predicted values. + sample_weight: Optional weighting of each example. Can + be a `Tensor` whose rank is either 0, or the same rank as + `y_true`, and must be broadcastable to `y_true`. + Defaults to `1`. + + Returns: + Update op. + """ + y_true = ops.convert_to_tensor(y_true, dtype=self._dtype) + y_pred = ops.convert_to_tensor(y_pred, dtype=self._dtype) + y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred) + if not self._built: + self._build(y_true.shape, y_pred.shape) + + if sample_weight is None: + sample_weight = 1 + + sample_weight = ops.convert_to_tensor(sample_weight, dtype=self.dtype) + + if len(sample_weight.shape) == 1: + # Make sure there's a features dimension + sample_weight = ops.expand_dims(sample_weight, axis=1) + + sample_weight = ops.broadcast_to(sample_weight, ops.shape(y_true)) + + weighted_y_true = y_true * ops.cast(sample_weight, y_true.dtype) + self.sum.assign(self.sum + ops.sum(weighted_y_true, axis=0)) + self.squared_sum.assign( + self.squared_sum + ops.sum(y_true * weighted_y_true, axis=0) + ) + self.total_mse.assign( + self.total_mse + + ops.sum( + (y_true - y_pred) ** 2 * ops.cast(sample_weight, y_true.dtype), + axis=0, + ) + ) + self.count.assign(self.count + ops.sum(sample_weight, axis=0)) + self.num_samples.assign(self.num_samples + ops.size(y_true)) + + def result(self): + mean = self.sum / self.count + total = self.squared_sum - self.sum * mean + raw_scores = 1 - (self.total_mse / total) + raw_scores = ops.where(ops.isinf(raw_scores), 0.0, raw_scores) + + if self.class_aggregation == "uniform_average": + r2_score = ops.mean(raw_scores) + elif self.class_aggregation == "variance_weighted_average": + weighted_sum = ops.sum(total * raw_scores) + sum_of_weights = ops.sum(total) + r2_score = weighted_sum / sum_of_weights + else: + r2_score = raw_scores + + if self.num_regressors != 0: + if self.num_regressors > self.num_samples - 1: + warnings.warn( + "More independent predictors than datapoints " + "in adjusted R2 score. Falling back to standard R2 score.", + stacklevel=2, + ) + elif self.num_regressors == self.num_samples - 1: + warnings.warn( + "Division by zero in Adjusted R2 score. " + "Falling back to standard R2 score.", + stacklevel=2, + ) + else: + n = ops.convert_to_tensor(self.num_samples, dtype="float32") + p = ops.convert_to_tensor(self.num_regressors, dtype="float32") + num = ops.multiply( + ops.subtract(1.0, r2_score), ops.subtract(n, 1.0) + ) + den = ops.subtract(ops.subtract(n, p), 1.0) + r2_score = ops.subtract(1.0, ops.divide(num, den)) + return r2_score + + def reset_state(self): + for v in self.variables: + v.assign(ops.zeros(v.shape, dtype=v.dtype)) + + def get_config(self): + config = { + "name": self.name, + "dtype": self.dtype, + "class_aggregation": self.class_aggregation, + "num_regressors": self.num_regressors, + } + base_config = super().get_config() + return {**base_config, **config} + + +def cosine_similarity(y_true, y_pred, axis=-1): + """Computes the cosine similarity between labels and predictions. + + Formula: + + ```python + loss = sum(l2_norm(y_true) * l2_norm(y_pred)) + ``` + + Args: + y_true: Tensor of true targets. + y_pred: Tensor of predicted targets. + axis: Axis along which to determine similarity. Defaults to `-1`. + + Returns: + Cosine similarity tensor. + + Example: + + >>> y_true = [[0., 1.], [1., 1.], [1., 1.]] + >>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]] + >>> loss = keras.losses.cosine_similarity(y_true, y_pred, axis=-1) + [0., 0.99999994, -0.99999994] + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) + y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred) + y_pred = normalize(y_pred, axis=axis) + y_true = normalize(y_true, axis=axis) + return ops.sum(y_true * y_pred, axis=axis) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1f3f73c99961b3b230a8491f606e3aa36ede57ed --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/__init__.py @@ -0,0 +1,3 @@ +from keras.src.models.functional import Functional +from keras.src.models.model import Model +from keras.src.models.sequential import Sequential diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ae59c08b5a749be0119d9d045c54525fccdd08c Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/__pycache__/cloning.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/__pycache__/cloning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf8a63af44865a647d535464d7dc978f6f98114f Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/__pycache__/cloning.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/__pycache__/functional.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/__pycache__/functional.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..818f157b1bc91ef30c1b88a139d3b717ac211eae Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/__pycache__/functional.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/__pycache__/model.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f53f03ccc67cceab0a15220b83f9d8ab7a0a489 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/__pycache__/model.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/__pycache__/sequential.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/__pycache__/sequential.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23e2848c5d60d5f6ffd41d7345d3a554bad82221 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/__pycache__/sequential.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/__pycache__/variable_mapping.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/__pycache__/variable_mapping.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2519606ef505cddd96d495ed8f6114b72b5869f5 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/__pycache__/variable_mapping.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/cloning.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/cloning.py new file mode 100644 index 0000000000000000000000000000000000000000..f2b88faaa581803815d0ac0846d443cd4fe59a15 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/cloning.py @@ -0,0 +1,409 @@ +from keras.src import backend +from keras.src import tree +from keras.src import utils +from keras.src.api_export import keras_export +from keras.src.layers import Input +from keras.src.layers import InputLayer +from keras.src.models.functional import Functional +from keras.src.models.functional import functional_like_constructor +from keras.src.models.sequential import Sequential +from keras.src.saving import serialization_lib + + +@keras_export("keras.models.clone_model") +def clone_model( + model, + input_tensors=None, + clone_function=None, + call_function=None, + recursive=False, + **kwargs, +): + """Clone a Functional or Sequential `Model` instance. + + Model cloning is similar to calling a model on new inputs, + except that it creates new layers (and thus new weights) instead + of sharing the weights of the existing layers. + + Note that + `clone_model` will not preserve the uniqueness of shared objects within the + model (e.g. a single variable attached to two distinct layers will be + restored as two separate variables). + + Args: + model: Instance of `Model` + (could be a Functional model or a Sequential model). + input_tensors: optional list of input tensors or InputLayer objects + to build the model upon. If not provided, + new `Input` objects will be created. + clone_function: Callable with signature `fn(layer)` + to be used to clone each layer in the target + model (except `Input` instances). It takes as argument the + layer instance to be cloned, and returns the corresponding layer + instance to be used in the model copy. If unspecified, this callable + defaults to the following serialization/deserialization function: + `lambda layer: layer.__class__.from_config(layer.get_config())`. + By passing a custom callable, you can customize your copy of the + model, e.g. by wrapping certain layers of interest (you might want + to replace all `LSTM` instances with equivalent + `Bidirectional(LSTM(...))` instances, for example). + Defaults to `None`. + call_function: Callable with signature + `fn(layer, *args, **kwargs)` to be used to call each + cloned layer and a set of inputs. It takes the layer instance, + the call arguments and keyword arguments, and returns the + call outputs. If unspecified, this callable defaults to + the regular `__call__()` method: + `def fn(layer, *args, **kwargs): return layer(*args, **kwargs)`. + By passing a custom callable, you can insert new layers before or + after a given layer. Note: this argument can only be used with + Functional models. + recursive: Boolean. Whether to recursively clone any Sequential + or Functional models encountered in the original + Sequential/Functional model. If `False`, + then inner models are cloned by calling `clone_function()`. + If `True`, then inner models are cloned by calling `clone_model()` + with the same `clone_function`, `call_function`, and `recursive` + arguments. Note that in this case, `call_function` + will not be propagated to any Sequential model + (since it is not applicable to Sequential models). + + Returns: + An instance of `Model` reproducing the behavior + of the original model, on top of new inputs tensors, + using newly instantiated weights. The cloned model may behave + differently from the original model if a custom `clone_function` + or `call_function` modifies a layer or layer call. + + Example: + + ```python + # Create a test Sequential model. + model = keras.Sequential([ + keras.layers.Input(shape=(728,)), + keras.layers.Dense(32, activation='relu'), + keras.layers.Dense(1, activation='sigmoid'), + ]) + # Create a copy of the test model (with freshly initialized weights). + new_model = clone_model(model) + ``` + + Using a `clone_function` to make a model deterministic by setting the + random seed everywhere: + + ```python + def clone_function(layer): + config = layer.get_config() + if "seed" in config: + config["seed"] = 1337 + return layer.__class__.from_config(config) + + new_model = clone_model(model, clone_function=clone_function) + ``` + + Using a `call_function` to add a `Dropout` layer after each `Dense` layer + (without recreating new layers): + + ```python + def call_function(layer, *args, **kwargs): + out = layer(*args, **kwargs) + if isinstance(layer, keras.layers.Dense): + out = keras.layers.Dropout(0.5)(out) + return out + + new_model = clone_model( + model, + clone_function=lambda x: x, # Reuse the same layers. + call_function=call_function, + ) + ``` + + Note that subclassed models cannot be cloned by default, + since their internal layer structure is not known. + To achieve equivalent functionality + as `clone_model` in the case of a subclassed model, simply make sure + that the model class implements `get_config()` + (and optionally `from_config()`), and call: + + ```python + new_model = model.__class__.from_config(model.get_config()) + ``` + + In the case of a subclassed model, you cannot using a custom + `clone_function`. + """ + cache = kwargs.pop("cache", None) + if kwargs: + raise ValueError( + f"Unexpected keyword argument(s): {tuple(kwargs.keys())}" + ) + + if isinstance(model, Sequential): + # Wrap clone_function to handle recursiveness and layer sharing. + clone_function = _wrap_clone_function( + clone_function, + call_function=call_function, + recursive=recursive, + cache=cache, + ) + if call_function is not None: + raise ValueError( + "`call_function` argument is not supported with Sequential " + "models. In a Sequential model, layers aren't called " + "at model-construction time (they're merely listed). " + "Use `call_function` with Functional models only. " + "Received model of " + f"type '{model.__class__.__name__}', with " + f"call_function={clone_function}" + ) + return _clone_sequential_model( + model, + clone_function=clone_function, + input_tensors=input_tensors, + ) + if isinstance(model, Functional): + # Wrap clone_function to handle recursiveness and layer sharing. + clone_function = _wrap_clone_function( + clone_function, + call_function=call_function, + recursive=recursive, + cache=cache, + ) + + # If the get_config() method is the same as a regular Functional + # model, we're safe to use _clone_functional_model (which relies + # on a Functional constructor). In the case where the get_config + # is custom, this may not necessarily work, but if clone_function + # or input_tensors are passed, we attempt it anyway + # in order to preserve backwards compatibility. + if utils.is_default(model.get_config) or ( + clone_function or input_tensors + ): + return _clone_functional_model( + model, + clone_function=clone_function, + call_function=call_function, + input_tensors=input_tensors, + ) + + # Case of a custom model class + if clone_function or input_tensors: + raise ValueError( + "Arguments `clone_function` and `input_tensors` " + "are only supported for Sequential models " + "or Functional models. Received model of " + f"type '{model.__class__.__name__}', with " + f"clone_function={clone_function} and " + f"input_tensors={input_tensors}" + ) + if call_function is not None: + raise ValueError( + "Argument `call_function` is only supported " + "for Functional models. Received model of " + f"type '{model.__class__.__name__}', with " + f"call_function={clone_function}" + ) + config = serialization_lib.serialize_keras_object(model) + return serialization_lib.deserialize_keras_object( + config, custom_objects={model.__class__.__name__: model.__class__} + ) + + +def _wrap_clone_function( + clone_function, call_function=None, recursive=False, cache=None +): + """Wrapper to handle recursiveness and layer sharing.""" + if clone_function is None: + + def _clone_layer(layer): + return layer.__class__.from_config(layer.get_config()) + + clone_function = _clone_layer + + if cache is None: + cache = {} + + def wrapped_clone_function(layer): + if id(layer) in cache: + return cache[id(layer)] + if recursive: + if isinstance(layer, Sequential): + # Note: Sequential doesn't support call_function. + clone = clone_model( + layer, + clone_function=clone_function, + cache=cache, + ) + cache[id(layer)] = clone + return clone + elif isinstance(layer, Functional): + clone = clone_model( + layer, + clone_function=clone_function, + call_function=call_function, + cache=cache, + ) + cache[id(layer)] = clone + return clone + clone = clone_function(layer) + cache[id(layer)] = clone + return clone + + return wrapped_clone_function + + +def _clone_sequential_model(model, clone_function, input_tensors=None): + """Clone a `Sequential` model instance. + + Model cloning is similar to calling a model on new inputs, + except that it creates new layers (and thus new weights) instead + of sharing the weights of the existing layers. + + Args: + model: Instance of `Sequential`. + input_tensors: optional list of input tensors + to build the model upon. If not provided, + placeholders will be created. + clone_function: callable to be applied on non-input layers in the model. + By default, it clones the layer (without copying the weights). + + Returns: + An instance of `Sequential` reproducing the behavior + of the original model, on top of new inputs tensors, + using newly instantiated weights. + """ + + if not isinstance(model, Sequential): + raise ValueError( + "Expected `model` argument " + "to be a `Sequential` model instance. " + f"Received: model={model}" + ) + + if not callable(clone_function): + raise ValueError( + "Expected `clone_function` argument to be a callable. " + f"Received: clone_function={clone_function}" + ) + + new_layers = [clone_function(layer) for layer in model.layers] + + if isinstance(model._layers[0], InputLayer): + ref_input_layer = model._layers[0] + input_name = ref_input_layer.name + input_batch_shape = ref_input_layer.batch_shape + input_dtype = ref_input_layer._dtype + else: + input_name = None + input_dtype = None + input_batch_shape = None + + if input_tensors is not None: + if isinstance(input_tensors, (list, tuple)): + if len(input_tensors) != 1: + raise ValueError( + "Argument `input_tensors` must contain a single tensor." + ) + input_tensors = input_tensors[0] + if not isinstance(input_tensors, backend.KerasTensor): + raise ValueError( + "Argument `input_tensors` must be a KerasTensor. " + f"Received invalid value: input_tensors={input_tensors}" + ) + inputs = Input( + tensor=input_tensors, + name=input_name, + ) + new_layers = [inputs] + new_layers + else: + if input_batch_shape is not None: + inputs = Input( + batch_shape=input_batch_shape, + dtype=input_dtype, + name=input_name, + ) + new_layers = [inputs] + new_layers + return Sequential(new_layers, name=model.name, trainable=model.trainable) + + +def _clone_functional_model( + model, clone_function, input_tensors=None, call_function=None +): + """Clone a `Functional` model instance. + + Model cloning is similar to calling a model on new inputs, + except that it creates new layers (and thus new weights) instead + of sharing the weights of the existing layers. + + Input layers are always cloned. + + Args: + model: Instance of `Functional`. + input_tensors: optional list of input tensors + to build the model upon. If not provided, + placeholders will be created. + clone_function: callable to be applied on non-input layers in the model. + By default, it clones the layer (without copying the weights). + + Returns: + An instance of `Functional` reproducing the behavior + of the original model, on top of new inputs tensors, + using newly instantiated weights. + """ + + if not callable(clone_function): + raise ValueError( + "Expected `clone_function` argument to be a callable. " + f"Received: clone_function={clone_function}" + ) + + if not isinstance(model, Functional): + raise ValueError( + "Expected `model` argument " + f"to be a Functional Model instance. Received: model={model}" + ) + + if input_tensors is not None: + if not all( + isinstance(x, backend.KerasTensor) + for x in tree.flatten(input_tensors) + ): + raise ValueError( + "All entries in `input_tensors` must be KerasTensors. " + f"Received invalid values: inputs_tensors={input_tensors}" + ) + try: + tree.assert_same_structure(input_tensors, model.input) + except ValueError as e: + raise ValueError( + "`input_tensors` must have the same structure as model.input" + f"\nReference structure: {model.input}" + f"\nReceived structure: {input_tensors}" + ) from e + else: + input_tensors = tree.map_structure( + lambda x: Input(batch_shape=x.shape, dtype=x.dtype, name=x.name), + model.input, + ) + + def operation_fn(layer): + new_layer = clone_function(layer) + return new_layer + + output_tensors = model._run_through_graph( + input_tensors, + operation_fn=operation_fn, + call_fn=call_function, + ) + + if functional_like_constructor(model.__class__): + new_model = model.__class__( + input_tensors, output_tensors, name=model.name + ) + else: + # This may be incorrect: the new model will end up having a different + # class than the original. However various existing models rely + # on this behavior, so we keep it. + new_model = Functional(input_tensors, output_tensors, name=model.name) + + return new_model diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/functional.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/functional.py new file mode 100644 index 0000000000000000000000000000000000000000..e01052bc57ec84bc36070141d9b75d4da3377f50 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/functional.py @@ -0,0 +1,878 @@ +import copy +import inspect +import typing +import warnings + +from keras.src import backend +from keras.src import ops +from keras.src import tree +from keras.src.backend.common import global_state +from keras.src.layers.core.input_layer import Input +from keras.src.layers.core.input_layer import InputLayer +from keras.src.layers.input_spec import InputSpec +from keras.src.layers.layer import Layer +from keras.src.legacy.saving import saving_utils +from keras.src.legacy.saving import serialization as legacy_serialization +from keras.src.models.model import Model +from keras.src.ops.function import Function +from keras.src.ops.function import _build_map +from keras.src.ops.function import make_node_key +from keras.src.ops.node import KerasHistory +from keras.src.ops.node import Node +from keras.src.saving import serialization_lib +from keras.src.utils import tracking + + +class Functional(Function, Model): + """A `Functional` model is a `Model` defined as a directed graph of layers. + + Three types of `Model` exist: subclassed `Model`, `Functional` model, + and `Sequential` (a special case of `Functional`). + + A `Functional` model can be instantiated by passing two arguments to + `__init__()`. The first argument is the `keras.Input` objects + that represent the inputs to the model. + The second argument specifies the output tensors that represent + the outputs of this model. Both arguments can be a nested structure + of tensors. + + Example: + + ``` + inputs = {'x1': keras.Input(shape=(10,), name='x1'), + 'x2': keras.Input(shape=(1,), name='x2')} + t = keras.layers.Dense(1, activation='relu')(inputs['x1']) + outputs = keras.layers.Add()([t, inputs['x2']]) + model = keras.Model(inputs, outputs) + ``` + + A `Functional` model constructed using the Functional API can also + include raw Keras 3 ops. + + Example: + + ```python + inputs = keras.Input(shape=(10,)) + x = keras.layers.Dense(1)(inputs) + outputs = ops.nn.relu(x) + model = keras.Model(inputs, outputs) + ``` + + A new `Functional` model can also be created by using the + intermediate tensors. This enables you to quickly extract sub-components + of the model. + + Example: + + ```python + inputs = keras.Input(shape=(None, None, 3)) + processed = keras.layers.RandomCrop(width=32, height=32)(inputs) + conv = keras.layers.Conv2D(filters=2, kernel_size=3)(processed) + pooling = keras.layers.GlobalAveragePooling2D()(conv) + feature = keras.layers.Dense(10)(pooling) + + full_model = keras.Model(inputs, feature) + backbone = keras.Model(processed, conv) + activations = keras.Model(conv, feature) + ``` + + Note that the `backbone` and `activations` models are not + created with `keras.Input` objects, but with the tensors + that are originated from `keras.Input` objects. + Under the hood, the layers and weights will + be shared across these models, so that user can train the `full_model`, and + use `backbone` or `activations` to do feature extraction. + The inputs and outputs of the model can be nested structures of tensors as + well, and the created models are standard `Functional` model that support + all the existing API. + + Args: + inputs: List of input tensors (must be created via `keras.Input()` + or originated from `keras.Input()`). + outputs: List of output tensors. + name: String, optional. Name of the model. + trainable: Boolean, optional. If the model's variables should be + trainable. + """ + + def __new__(cls, *args, **kwargs): + return typing.cast(cls, super().__new__(cls)) + + @tracking.no_automatic_dependency_tracking + def __init__(self, inputs, outputs, name=None, **kwargs): + if isinstance(inputs, dict): + for k, v in inputs.items(): + if isinstance(v, backend.KerasTensor) and k != v.name: + warnings.warn( + "When providing `inputs` as a dict, all keys in the " + "dict must match the names of the corresponding " + f"tensors. Received key '{k}' mapping to value {v} " + f"which has name '{v.name}'. Change the tensor name to " + f"'{k}' (via `Input(..., name='{k}')`)" + ) + + trainable = kwargs.pop("trainable", None) + flat_inputs = tree.flatten(inputs) + flat_outputs = tree.flatten(outputs) + for x in flat_inputs: + if not isinstance(x, backend.KerasTensor): + raise ValueError( + "All `inputs` values must be KerasTensors. Received: " + f"inputs={inputs} including invalid value {x} of " + f"type {type(x)}" + ) + for x in flat_outputs: + if not isinstance(x, backend.KerasTensor): + raise ValueError( + "All `outputs` values must be KerasTensors. Received: " + f"outputs={outputs} including invalid value {x} of " + f"type {type(x)}" + ) + + if not all(is_input_keras_tensor(t) for t in flat_inputs): + inputs, outputs = clone_graph_nodes(inputs, outputs) + + Function.__init__(self, inputs, outputs, name=name) + + if trainable is not None: + self.trainable = trainable + + self._layers = self.layers + self.build(None) + # We will convert directly (to the correct dtype per input). + self._convert_input_args = False + self._allow_non_tensor_positional_args = True + output_layers = [x._keras_history[0] for x in self.outputs] + self.output_names = [x.name for x in output_layers] + + def _lock_state(self): + # Unlike other layers, we allow Functional state to be mutable after + # build. E.g. to attach a layer to a model that is not part of the + # functional DAG. + pass + + def _obj_type(self): + return "Functional" + + @property + def layers(self): + layers = [] + for operation in self._operations: + if isinstance(operation, Layer): + layers.append(operation) + return layers + + @layers.setter + def layers(self, _): + raise AttributeError( + "`Model.layers` attribute is reserved and should not be used. " + "Please use another name." + ) + + def call(self, inputs, training=None, mask=None): + # Add support for training, masking + inputs = self._standardize_inputs(inputs) + if mask is None: + masks = [None] * len(inputs) + else: + masks = tree.flatten(mask) + for x, mask in zip(inputs, masks): + if mask is not None: + backend.set_keras_mask(x, mask) + outputs = self._run_through_graph( + inputs, operation_fn=lambda op: operation_fn(op, training=training) + ) + return unpack_singleton(outputs) + + def compute_output_spec(self, inputs, training=None, mask=None): + # From Function + return super().compute_output_spec(inputs) + + def compute_output_shape(self, input_shape): + # From Function + return super().compute_output_shape(input_shape) + + def build(self, input_shape): + self.built = True + + @property + def input_shape(self): + input_shapes = tree.map_structure(lambda x: x.shape, self.inputs) + if isinstance(input_shapes, list) and len(input_shapes) == 1: + return input_shapes[0] + return input_shapes + + @property + def output_shape(self): + output_shapes = tree.map_structure(lambda x: x.shape, self.outputs) + if isinstance(output_shapes, list) and len(output_shapes) == 1: + return output_shapes[0] + return output_shapes + + def _assert_input_compatibility(self, *args): + return super(Model, self)._assert_input_compatibility(*args) + + def _maybe_warn_inputs_struct_mismatch(self, inputs, raise_exception=False): + try: + # We first normalize to tuples before performing the check to + # suppress warnings when encountering mismatched tuples and lists. + tree.assert_same_structure( + tree.lists_to_tuples(inputs), + tree.lists_to_tuples(self._inputs_struct), + ) + except: + model_inputs_struct = tree.map_structure( + lambda x: x.name, self._inputs_struct + ) + inputs_struct = tree.map_structure( + lambda x: f"Tensor(shape={x.shape})", inputs + ) + msg = ( + "The structure of `inputs` doesn't match the expected " + f"structure.\nExpected: {model_inputs_struct}\n" + f"Received: inputs={inputs_struct}" + ) + if raise_exception: + raise ValueError(msg) + warnings.warn(msg) + + def _convert_inputs_to_tensors(self, flat_inputs): + converted = [] + for x, input in zip(flat_inputs, self._inputs): + if x is None: # TODO: check if optional + converted.append(x) + else: + converted.append( + ops.convert_to_tensor( + x, dtype=input.dtype, sparse=input.sparse + ) + ) + return converted + + def _adjust_input_rank(self, flat_inputs): + flat_ref_shapes = [x.shape for x in self._inputs] + adjusted = [] + for x, ref_shape in zip(flat_inputs, flat_ref_shapes): + if x is None: + adjusted.append(x) + continue + x_rank = len(x.shape) + ref_rank = len(ref_shape) + if x_rank == ref_rank: + adjusted.append(x) + continue + if x_rank == ref_rank + 1: + if x.shape[-1] == 1: + adjusted.append(ops.squeeze(x, axis=-1)) + continue + if x_rank == ref_rank - 1: + if ref_shape[-1] == 1: + adjusted.append(ops.expand_dims(x, axis=-1)) + continue + raise ValueError( + f"Invalid input shape for input {x}. Expected shape " + f"{ref_shape}, but input has incompatible shape {x.shape}" + ) + # Add back metadata. + for i in range(len(flat_inputs)): + if hasattr(flat_inputs[i], "_keras_history"): + adjusted[i]._keras_history = flat_inputs[i]._keras_history + mask = backend.get_keras_mask(flat_inputs[i]) + if mask is not None: + backend.set_keras_mask(adjusted[i], mask) + return adjusted + + def _standardize_inputs(self, inputs): + raise_exception = False + if isinstance(inputs, dict) and not isinstance( + self._inputs_struct, dict + ): + # This is to avoid warning + # when we have reconciable dict/list structs + if hasattr(self._inputs_struct, "__len__") and all( + isinstance(i, backend.KerasTensor) for i in self._inputs_struct + ): + expected_keys = set(i.name for i in self._inputs_struct) + keys = set(inputs.keys()) + if expected_keys.issubset(keys): + inputs = [inputs[i.name] for i in self._inputs_struct] + else: + raise_exception = True + elif isinstance(self._inputs_struct, backend.KerasTensor): + if self._inputs_struct.name in inputs: + inputs = [inputs[self._inputs_struct.name]] + else: + raise_exception = True + else: + raise_exception = True + if ( + isinstance(self._inputs_struct, dict) + and not isinstance(inputs, dict) + and list(self._inputs_struct.keys()) + != sorted(self._inputs_struct.keys()) + ): + raise_exception = True + self._maybe_warn_inputs_struct_mismatch( + inputs, raise_exception=raise_exception + ) + + flat_inputs = tree.flatten(inputs) + flat_inputs = self._convert_inputs_to_tensors(flat_inputs) + return self._adjust_input_rank(flat_inputs) + + @property + def input(self): + # For backwards compatibility, + # override `input` to retrieve the used-provided + # constructor inputs + return self._inputs_struct + + @property + def output(self): + return self._outputs_struct + + def add_loss(self, loss): + # Symbolic only. TODO + raise NotImplementedError + + @property + def input_spec(self): + if hasattr(self, "_manual_input_spec"): + return self._manual_input_spec + + def shape_with_no_batch_size(x): + x = list(x) + if x: + x[0] = None + return tuple(x) + + def make_spec_for_tensor(x): + optional = False + if isinstance(x._keras_history[0], InputLayer): + if x._keras_history[0].optional: + optional = True + return InputSpec( + shape=shape_with_no_batch_size(x.shape), + allow_last_axis_squeeze=True, + name=x._keras_history[0].name, + optional=optional, + ) + + if isinstance(self._inputs_struct, dict): + if all( + isinstance(x, backend.KerasTensor) + for x in self._inputs_struct.values() + ): + # Case where `_nested_inputs` is a plain dict of Inputs. + names = sorted(self._inputs_struct.keys()) + return [ + InputSpec( + shape=shape_with_no_batch_size( + self._inputs_struct[name].shape + ), + allow_last_axis_squeeze=True, + name=name, + ) + for name in names + ] + return None # Deeply nested dict: skip checks. + return [make_spec_for_tensor(x) for x in self.inputs] + + @input_spec.setter + def input_spec(self, value): + self._manual_input_spec = value + + def get_config(self): + if not functional_like_constructor(self.__class__): + # Subclassed networks are not serializable + # (unless serialization is implemented by + # the author of the subclassed network). + return Model.get_config(self) + + config = { + "name": self.name, + "trainable": self.trainable, + } + # Build a map from a layer unique name (make_node_key) + # to the index of the nodes that are saved in the config. + # Only nodes in network_nodes are saved. + node_reindexing_map = {} + for operation in self.operations: + if issubclass(operation.__class__, Functional): + # Functional models start with a pre-existing node + # linking their input to output. + kept_nodes = 1 + else: + kept_nodes = 0 + for original_node_index, node in enumerate( + operation._inbound_nodes + ): + node_key = make_node_key(operation, original_node_index) + if node_key in self._nodes: + # i.e. we mark it to be saved + node_reindexing_map[node_key] = kept_nodes + kept_nodes += 1 + + # serialize and save the layers in layer_configs + layer_configs = [] + for operation in self.operations: # From the earliest layers on. + filtered_inbound_nodes = [] + for original_node_index, node in enumerate( + operation._inbound_nodes + ): + node_key = make_node_key(operation, original_node_index) + if node_key in self._nodes: + # The node is relevant to the model: + # add to filtered_inbound_nodes. + node_data = serialize_node(node, own_nodes=self._nodes) + if node_data is not None: + filtered_inbound_nodes.append(node_data) + + serialize_obj_fn = serialization_lib.serialize_keras_object + if global_state.get_global_attribute("use_legacy_config", False): + # Legacy format serialization used for H5 and SavedModel + serialize_obj_fn = legacy_serialization.serialize_keras_object + layer_config = serialize_obj_fn(operation) + layer_config["name"] = operation.name + layer_config["inbound_nodes"] = filtered_inbound_nodes + layer_configs.append(layer_config) + config["layers"] = layer_configs + + # Gather info about inputs and outputs. + def get_tensor_config(tensor): + operation = tensor._keras_history[0] + node_index = tensor._keras_history[1] + tensor_index = tensor._keras_history[2] + node_key = make_node_key(operation, node_index) + assert node_key in self._nodes + new_node_index = node_reindexing_map[node_key] + return [operation.name, new_node_index, tensor_index] + + def map_tensors(tensors): + if isinstance(tensors, backend.KerasTensor): + return [get_tensor_config(tensors)] + return tree.map_structure(get_tensor_config, tensors) + + config["input_layers"] = map_tensors(self._inputs_struct) + config["output_layers"] = map_tensors(self._outputs_struct) + return copy.deepcopy(config) + + +def functional_from_config(cls, config, custom_objects=None): + """Instantiates a Functional model from its config (from `get_config()`). + + Args: + cls: Class of the model, e.g. a custom subclass of `Model`. + config: Output of `get_config()` for the original model instance. + custom_objects: Optional dict of custom objects. + + Returns: + An instance of `cls`. + """ + # Layer instances created during + # the graph reconstruction process + created_layers = {} + + # Dictionary mapping layer instances to + # node data that specifies a layer call. + # It acts as a queue that maintains any unprocessed + # layer call until it becomes possible to process it + # (i.e. until the input tensors to the call all exist). + unprocessed_nodes = {} + + def add_unprocessed_node(layer, node_data): + """Add node to layer list + + Arg: + layer: layer object + node_data: Node data specifying layer call + """ + if layer not in unprocessed_nodes: + unprocessed_nodes[layer] = [node_data] + else: + unprocessed_nodes[layer].append(node_data) + + def process_node(layer, node_data): + """Reconstruct node by linking to inbound layers + + Args: + layer: Layer to process + node_data: List of layer configs + """ + args, kwargs = deserialize_node(node_data, created_layers) + # Call layer on its inputs, thus creating the node + # and building the layer if needed. + layer(*args, **kwargs) + + def process_layer(layer_data): + """Deserializes a layer and index its inbound nodes. + + Args: + layer_data: layer config dict. + """ + layer_name = layer_data["name"] + + # Instantiate layer. + if "module" not in layer_data: + # Legacy format deserialization (no "module" key) + # used for H5 and SavedModel formats + layer = saving_utils.model_from_config( + layer_data, custom_objects=custom_objects + ) + else: + layer = serialization_lib.deserialize_keras_object( + layer_data, custom_objects=custom_objects + ) + created_layers[layer_name] = layer + + # Gather layer inputs. + inbound_nodes_data = layer_data["inbound_nodes"] + for node_data in inbound_nodes_data: + # We don't process nodes (i.e. make layer calls) + # on the fly because the inbound node may not yet exist, + # in case of layer shared at different topological depths + # (e.g. a model such as A(B(A(B(x))))) + add_unprocessed_node(layer, node_data) + + # Extract config used to instantiate Functional model from the config. The + # remaining config will be passed as keyword arguments to the Model + # constructor. + functional_config = {} + for key in ["layers", "input_layers", "output_layers"]: + functional_config[key] = config.pop(key) + for key in ["name", "trainable"]: + if key in config: + functional_config[key] = config.pop(key) + else: + functional_config[key] = None + + # First, we create all layers and enqueue nodes to be processed + for layer_data in functional_config["layers"]: + process_layer(layer_data) + + # Then we process nodes in order of layer depth. + # Nodes that cannot yet be processed (if the inbound node + # does not yet exist) are re-enqueued, and the process + # is repeated until all nodes are processed. + while unprocessed_nodes: + for layer_data in functional_config["layers"]: + layer = created_layers[layer_data["name"]] + + # Process all nodes in layer, if not yet processed + if layer in unprocessed_nodes: + node_data_list = unprocessed_nodes[layer] + + # Process nodes in order + node_index = 0 + while node_index < len(node_data_list): + node_data = node_data_list[node_index] + try: + process_node(layer, node_data) + + # If the node does not have all inbound layers + # available, stop processing and continue later + except IndexError: + break + + node_index += 1 + + # If not all nodes processed then store unprocessed nodes + if node_index < len(node_data_list): + unprocessed_nodes[layer] = node_data_list[node_index:] + # If all nodes processed remove the layer + else: + del unprocessed_nodes[layer] + + # Create list of input and output tensors and return new class + name = functional_config["name"] + trainable = functional_config["trainable"] + + def get_tensor(layer_name, node_index, tensor_index): + assert layer_name in created_layers + layer = created_layers[layer_name] + if isinstance(layer, Functional): + # Functional models start out with a built-in node. + node_index -= 1 + layer_output_tensors = layer._inbound_nodes[node_index].output_tensors + return layer_output_tensors[tensor_index] + + def map_tensors(tensors): + if ( + isinstance(tensors, list) + and len(tensors) == 3 + and isinstance(tensors[0], str) + ): + # Leaf + return get_tensor(*tensors) + if isinstance(tensors, dict): + return {k: map_tensors(v) for k, v in tensors.items()} + if isinstance(tensors, tuple): + return tuple([map_tensors(v) for v in tensors]) + return [map_tensors(v) for v in tensors] + + input_tensors = map_tensors(functional_config["input_layers"]) + output_tensors = map_tensors(functional_config["output_layers"]) + if isinstance(input_tensors, list) and len(input_tensors) == 1: + input_tensors = input_tensors[0] + if isinstance(output_tensors, list) and len(output_tensors) == 1: + output_tensors = output_tensors[0] + + return cls( + inputs=input_tensors, + outputs=output_tensors, + name=name, + trainable=trainable, + **config, + ) + + +def operation_fn(operation, training): + def call(*args, **kwargs): + if ( + hasattr(operation, "_call_has_training_arg") + and operation._call_has_training_arg + and training is not None + ): + kwargs["training"] = training + return operation(*args, **kwargs) + + return call + + +def functional_like_constructor(cls): + init_args = inspect.getfullargspec(cls.__init__).args[1:] + functional_init_args = inspect.getfullargspec(Functional.__init__).args[1:] + if init_args == functional_init_args: + return True + return False + + +def unpack_singleton(x): + if isinstance(x, (list, tuple)) and len(x) == 1: + return x[0] + return x + + +def serialize_node(node, own_nodes=()): + if not node.input_tensors: + # Does not need to be serialized. + return + + def serialize_keras_tensor(x): + # Serialize KerasTensor while converting + # node indices to only include nodes relevant to `own_nodes`. + if isinstance(x, backend.KerasTensor): + operation, node_index, tensor_index = x._keras_history + irrelevant_node_count = 0 + for i, node in enumerate(operation._inbound_nodes[:node_index]): + node_key = make_node_key(operation, i) + if node_key not in own_nodes: + irrelevant_node_count += 1 + x._keras_history = KerasHistory( + operation, node_index - irrelevant_node_count, tensor_index + ) + serialized = serialization_lib.serialize_keras_object(x) + x._keras_history = KerasHistory(operation, node_index, tensor_index) + return serialized + return x + + args = node.arguments.args + kwargs = node.arguments.kwargs + + args = tree.map_structure(serialize_keras_tensor, args) + kwargs = tree.map_structure(serialize_keras_tensor, kwargs) + return { + "args": serialization_lib.serialize_keras_object(args), + "kwargs": serialization_lib.serialize_keras_object(kwargs), + } + + +def deserialize_node(node_data, created_layers): + """Return (args, kwargs) for calling the node layer.""" + if not node_data: + return [], {} + + if isinstance(node_data, list): + # Legacy case. + input_tensors = [] + for input_data in node_data: + inbound_layer_name = input_data[0] + inbound_node_index = input_data[1] + inbound_tensor_index = input_data[2] + if len(input_data) == 3: + kwargs = {} + elif len(input_data) == 4: + kwargs = input_data[3] + else: + raise ValueError( + "Cannot deserialize the model (invalid config data?)" + ) + inbound_layer = created_layers[inbound_layer_name] + + # Raise an error if the corresponding layer node + # has not yet been created + if len(inbound_layer._inbound_nodes) <= inbound_node_index: + raise IndexError( + "Layer node index out of bounds.\n" + f"inbound_layer = {inbound_layer}\n" + "inbound_layer._inbound_nodes = " + f"{inbound_layer._inbound_nodes}\n" + f"inbound_node_index = {inbound_node_index}" + ) + inbound_node = inbound_layer._inbound_nodes[inbound_node_index] + input_tensors.append( + inbound_node.output_tensors[inbound_tensor_index] + ) + return [unpack_singleton(input_tensors)], kwargs + + args = serialization_lib.deserialize_keras_object(node_data["args"]) + kwargs = serialization_lib.deserialize_keras_object(node_data["kwargs"]) + + def convert_revived_tensor(x): + if isinstance(x, backend.KerasTensor): + history = x._pre_serialization_keras_history + if history is None: + return x + layer = created_layers.get(history[0], None) + if layer is None: + raise ValueError(f"Unknown layer: {history[0]}") + inbound_node_index = history[1] + inbound_tensor_index = history[2] + if len(layer._inbound_nodes) <= inbound_node_index: + raise IndexError( + "Layer node index out of bounds.\n" + f"inbound_layer = {layer}\n" + f"inbound_layer._inbound_nodes = {layer._inbound_nodes}\n" + f"inbound_node_index = {inbound_node_index}" + ) + inbound_node = layer._inbound_nodes[inbound_node_index] + return inbound_node.output_tensors[inbound_tensor_index] + return x + + args = tree.map_structure(convert_revived_tensor, args) + kwargs = tree.map_structure(convert_revived_tensor, kwargs) + return args, kwargs + + +def is_input_keras_tensor(x): + ( + operation, + node_index, + _, + ) = x._keras_history + node = operation._inbound_nodes[node_index] + return node.is_input + + +def clone_single_keras_tensor(x): + return backend.KerasTensor( + shape=x.shape, dtype=x.dtype, sparse=x.sparse, name=x.name + "_clone" + ) + + +def clone_keras_tensors(tensors, kt_id_mapping): + def swap(x): + if not isinstance(x, backend.KerasTensor): + return x + if id(x) in kt_id_mapping: + return kt_id_mapping[id(x)] + new_x = clone_single_keras_tensor(x) + kt_id_mapping[id(x)] = new_x + return new_x + + return tree.map_structure(swap, tensors) + + +def find_nodes_by_inputs_and_outputs(inputs, outputs): + nodes, _ = _build_map(inputs, outputs) + return nodes + + +def clone_graph_nodes(inputs, outputs): + """Clone the `Node` between the inputs and output tensors. + + This function is used to create a new functional model from any intermediate + Keras tensors. The clone of the nodes mimic the behavior of reconstructing + the functional graph network by re-executing all the `__call__()` methods. + The cloned nodes will be appended to the layers. + + Note that a new `keras.Input` will be created for any items in the + `inputs` + + Args: + inputs: A nested structure of `KerasTensor` instances. + outputs: A nested structure of `KerasTensor` instances. + + Returns: + A pair of inputs and outputs, with cloned `KerasTensor` instances. + They can be used to create a new functional model. + """ + nodes_to_clone = find_nodes_by_inputs_and_outputs(inputs, outputs) + cloned_inputs = [] + cloned_outputs = [] + # We not only need to create copies of Nodes (mimic the calls), also need to + # clone Keras tensors to avoid the override of _keras_history attached on + # the Keras tensor. The following dict is used to track any keras tensor we + # cloned The key is the string ID of the original keras tensor, and value is + # the cloned Keras tensor instance. + kt_id_mapping = {} + op_id_mapping = {} + + for kt_input in tree.flatten(inputs): + if is_input_keras_tensor(kt_input): + # For any existing Keras tensor from keras.Input, leave them as is. + cloned_inputs.append(kt_input) + kt_id_mapping[id(kt_input)] = kt_input + else: + # We need to create a new Keras tensor for any intermediate tensor + cloned_input = Input( + batch_shape=kt_input.shape, + dtype=kt_input.dtype, + sparse=kt_input.sparse, + name=kt_input.name + "CLONE", + ) + cloned_inputs.append(cloned_input) + kt_id_mapping[id(kt_input)] = cloned_input + op_id_mapping[id(kt_input._keras_history[0])] = ( + cloned_input._keras_history[0] + ) + cloned_inputs = tree.pack_sequence_as(inputs, cloned_inputs) + + for kt_output in tree.flatten(outputs): + cpy = clone_single_keras_tensor(kt_output) + # We reuse the _keras_history here, which contains the old information. + cpy._keras_history = kt_output._keras_history + cloned_outputs.append(cpy) + kt_id_mapping[id(kt_output)] = cpy + cloned_outputs = tree.pack_sequence_as(outputs, cloned_outputs) + + for node in nodes_to_clone: + if id(node.operation) in op_id_mapping: + operation = op_id_mapping[id(node.operation)] + else: + operation = node.operation + # Clone any Keras tensor to avoid override of _keras_history + # Or reuse an existing Keras tensor if it has already been cloned. + output_copy = clone_keras_tensors(node.output_tensors, kt_id_mapping) + if not isinstance(operation, InputLayer): + call_args_copy = clone_keras_tensors( + node.arguments.args, kt_id_mapping + ) + call_kwargs_copy = clone_keras_tensors( + node.arguments.kwargs, kt_id_mapping + ) + else: + call_args_copy = () + call_kwargs_copy = {} + # Creating new nodes based on the existing node information. Node wires + # itself to inbound and outbound layers. The Node constructor actually + # updates this layer's self._inbound_nodes, sets _keras_history on the + # outputs, and adds itself to the `_outbound_nodes` of the layers that + # produced the inputs to this layer call. + Node( + operation, + call_args=call_args_copy, + call_kwargs=call_kwargs_copy, + outputs=output_copy, + ) + return cloned_inputs, cloned_outputs diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/model.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/model.py new file mode 100644 index 0000000000000000000000000000000000000000..46f1030765445f0a5e3be87f7be0cb2b0c62a2fc --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/model.py @@ -0,0 +1,839 @@ +import inspect +import json +import typing +import warnings + +from keras.src import backend +from keras.src import utils +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.models.variable_mapping import map_saveable_variables +from keras.src.saving import saving_api +from keras.src.trainers import trainer as base_trainer +from keras.src.utils import summary_utils +from keras.src.utils import traceback_utils + +if backend.backend() == "tensorflow": + from keras.src.backend.tensorflow.trainer import ( + TensorFlowTrainer as Trainer, + ) +elif backend.backend() == "jax": + from keras.src.backend.jax.trainer import JAXTrainer as Trainer +elif backend.backend() == "torch": + from keras.src.backend.torch.trainer import TorchTrainer as Trainer +elif backend.backend() == "numpy": + from keras.src.backend.numpy.trainer import NumpyTrainer as Trainer +elif backend.backend() == "openvino": + from keras.src.backend.openvino.trainer import OpenVINOTrainer as Trainer +else: + raise RuntimeError( + f"Backend '{backend.backend()}' must implement the Trainer class." + ) + + +@keras_export(["keras.Model", "keras.models.Model"]) +class Model(Trainer, base_trainer.Trainer, Layer): + """A model grouping layers into an object with training/inference features. + + There are three ways to instantiate a `Model`: + + ## With the "Functional API" + + You start from `Input`, + you chain layer calls to specify the model's forward pass, + and finally, you create your model from inputs and outputs: + + ```python + inputs = keras.Input(shape=(37,)) + x = keras.layers.Dense(32, activation="relu")(inputs) + outputs = keras.layers.Dense(5, activation="softmax")(x) + model = keras.Model(inputs=inputs, outputs=outputs) + ``` + + Note: Only dicts, lists, and tuples of input tensors are supported. Nested + inputs are not supported (e.g. lists of list or dicts of dict). + + A new Functional API model can also be created by using the + intermediate tensors. This enables you to quickly extract sub-components + of the model. + + Example: + + ```python + inputs = keras.Input(shape=(None, None, 3)) + processed = keras.layers.RandomCrop(width=128, height=128)(inputs) + conv = keras.layers.Conv2D(filters=32, kernel_size=3)(processed) + pooling = keras.layers.GlobalAveragePooling2D()(conv) + feature = keras.layers.Dense(10)(pooling) + + full_model = keras.Model(inputs, feature) + backbone = keras.Model(processed, conv) + activations = keras.Model(conv, feature) + ``` + + Note that the `backbone` and `activations` models are not + created with `keras.Input` objects, but with the tensors that originate + from `keras.Input` objects. Under the hood, the layers and weights will + be shared across these models, so that user can train the `full_model`, and + use `backbone` or `activations` to do feature extraction. + The inputs and outputs of the model can be nested structures of tensors as + well, and the created models are standard Functional API models that support + all the existing APIs. + + ## By subclassing the `Model` class + + In that case, you should define your + layers in `__init__()` and you should implement the model's forward pass + in `call()`. + + ```python + class MyModel(keras.Model): + def __init__(self): + super().__init__() + self.dense1 = keras.layers.Dense(32, activation="relu") + self.dense2 = keras.layers.Dense(5, activation="softmax") + + def call(self, inputs): + x = self.dense1(inputs) + return self.dense2(x) + + model = MyModel() + ``` + + If you subclass `Model`, you can optionally have + a `training` argument (boolean) in `call()`, which you can use to specify + a different behavior in training and inference: + + ```python + class MyModel(keras.Model): + def __init__(self): + super().__init__() + self.dense1 = keras.layers.Dense(32, activation="relu") + self.dense2 = keras.layers.Dense(5, activation="softmax") + self.dropout = keras.layers.Dropout(0.5) + + def call(self, inputs, training=False): + x = self.dense1(inputs) + x = self.dropout(x, training=training) + return self.dense2(x) + + model = MyModel() + ``` + + Once the model is created, you can config the model with losses and metrics + with `model.compile()`, train the model with `model.fit()`, or use the model + to do prediction with `model.predict()`. + + ## With the `Sequential` class + + In addition, `keras.Sequential` is a special case of model where + the model is purely a stack of single-input, single-output layers. + + ```python + model = keras.Sequential([ + keras.Input(shape=(None, None, 3)), + keras.layers.Conv2D(filters=32, kernel_size=3), + ]) + ``` + """ + + def __new__(cls, *args, **kwargs): + # Signature detection for usage of `Model` as a `Functional` + if functional_init_arguments(args, kwargs) and cls == Model: + from keras.src.models.functional import Functional + + return Functional.__new__(Functional, *args, **kwargs) + return typing.cast(cls, super().__new__(cls)) + + def __init__(self, *args, **kwargs): + Trainer.__init__(self) + from keras.src.models import functional + + # Signature detection for usage of a `Model` subclass + # as a `Functional` subclass + if functional_init_arguments(args, kwargs): + inject_functional_model_class(self.__class__) + functional.Functional.__init__(self, *args, **kwargs) + else: + Layer.__init__(self, *args, **kwargs) + + def call(self, *args, **kwargs): + raise NotImplementedError( + f"Model {self.__class__.__name__} does not have a `call()` " + "method implemented." + ) + + @property + def layers(self): + return list(self._flatten_layers(include_self=False, recursive=False)) + + @layers.setter + def layers(self, _): + raise AttributeError( + "`Model.layers` attribute is reserved and should not be used. " + "Please use another name." + ) + + @traceback_utils.filter_traceback + def get_layer(self, name=None, index=None): + """Retrieves a layer based on either its name (unique) or index. + + If `name` and `index` are both provided, `index` will take precedence. + Indices are based on order of horizontal graph traversal (bottom-up). + + Args: + name: String, name of layer. + index: Integer, index of layer. + + Returns: + A layer instance. + """ + if index is not None and name is not None: + raise ValueError( + "Provide only a layer name or a layer index. Received: " + f"index={index}, name={name}." + ) + if index is not None: + if len(self.layers) <= index: + raise ValueError( + f"Was asked to retrieve layer at index {index}" + f" but model only has {len(self.layers)}" + " layers." + ) + else: + return self.layers[index] + + if name is not None: + for layer in self.layers: + if layer.name == name: + return layer + raise ValueError( + f"No such layer: {name}. Existing layers are: " + f"{list(layer.name for layer in self.layers)}." + ) + raise ValueError( + "Provide either a layer name or layer index at `get_layer`." + ) + + @traceback_utils.filter_traceback + def summary( + self, + line_length=None, + positions=None, + print_fn=None, + expand_nested=False, + show_trainable=False, + layer_range=None, + ): + """Prints a string summary of the network. + + Args: + line_length: Total length of printed lines + (e.g. set this to adapt the display to different + terminal window sizes). + positions: Relative or absolute positions of log elements + in each line. If not provided, becomes + `[0.3, 0.6, 0.70, 1.]`. Defaults to `None`. + print_fn: Print function to use. By default, prints to `stdout`. + If `stdout` doesn't work in your environment, change to `print`. + It will be called on each line of the summary. + You can set it to a custom function + in order to capture the string summary. + expand_nested: Whether to expand the nested models. + Defaults to `False`. + show_trainable: Whether to show if a layer is trainable. + Defaults to `False`. + layer_range: a list or tuple of 2 strings, + which is the starting layer name and ending layer name + (both inclusive) indicating the range of layers to be printed + in summary. It also accepts regex patterns instead of exact + names. In this case, the start predicate will be + the first element that matches `layer_range[0]` + and the end predicate will be the last element + that matches `layer_range[1]`. + By default `None` considers all layers of the model. + + Raises: + ValueError: if `summary()` is called before the model is built. + """ + summary_utils.print_summary( + self, + line_length=line_length, + positions=positions, + print_fn=print_fn, + expand_nested=expand_nested, + show_trainable=show_trainable, + layer_range=layer_range, + ) + + @traceback_utils.filter_traceback + def save(self, filepath, overwrite=True, zipped=None, **kwargs): + """Saves a model as a `.keras` file. + + Args: + filepath: `str` or `pathlib.Path` object. + The path where to save the model. Must end in `.keras` + (unless saving the model as an unzipped directory + via `zipped=False`). + overwrite: Whether we should overwrite any existing model at + the target location, or instead ask the user via + an interactive prompt. + zipped: Whether to save the model as a zipped `.keras` + archive (default when saving locally), or as an + unzipped directory (default when saving on the + Hugging Face Hub). + + Example: + + ```python + model = keras.Sequential( + [ + keras.layers.Dense(5, input_shape=(3,)), + keras.layers.Softmax(), + ], + ) + model.save("model.keras") + loaded_model = keras.saving.load_model("model.keras") + x = keras.random.uniform((10, 3)) + assert np.allclose(model.predict(x), loaded_model.predict(x)) + ``` + + Note that `model.save()` is an alias for `keras.saving.save_model()`. + + The saved `.keras` file contains: + + - The model's configuration (architecture) + - The model's weights + - The model's optimizer's state (if any) + + Thus models can be reinstantiated in the exact same state. + """ + return saving_api.save_model( + self, filepath, overwrite=overwrite, zipped=zipped, **kwargs + ) + + @traceback_utils.filter_traceback + def save_weights(self, filepath, overwrite=True): + """Saves all layer weights to a `.weights.h5` file. + + Args: + filepath: `str` or `pathlib.Path` object. + Path where to save the model. Must end in `.weights.h5`. + overwrite: Whether we should overwrite any existing model + at the target location, or instead ask the user + via an interactive prompt. + """ + return saving_api.save_weights(self, filepath, overwrite=overwrite) + + @traceback_utils.filter_traceback + def load_weights(self, filepath, skip_mismatch=False, **kwargs): + """Load weights from a file saved via `save_weights()`. + + Weights are loaded based on the network's + topology. This means the architecture should be the same as when the + weights were saved. Note that layers that don't have weights are not + taken into account in the topological ordering, so adding or removing + layers is fine as long as they don't have weights. + + **Partial weight loading** + + If you have modified your model, for instance by adding a new layer + (with weights) or by changing the shape of the weights of a layer, + you can choose to ignore errors and continue loading + by setting `skip_mismatch=True`. In this case any layer with + mismatching weights will be skipped. A warning will be displayed + for each skipped layer. + + Args: + filepath: String, path to the weights file to load. + It can either be a `.weights.h5` file + or a legacy `.h5` weights file. + skip_mismatch: Boolean, whether to skip loading of layers where + there is a mismatch in the number of weights, or a mismatch in + the shape of the weights. + """ + saving_api.load_weights( + self, filepath, skip_mismatch=skip_mismatch, **kwargs + ) + + def quantize(self, mode, **kwargs): + """Quantize the weights of the model. + + Note that the model must be built first before calling this method. + `quantize` will recursively call `quantize(mode)` in all layers and + will be skipped if the layer doesn't implement the function. + + Args: + mode: The mode of the quantization. Only 'int8' is supported at this + time. + """ + from keras.src.dtype_policies import QUANTIZATION_MODES + + type_check = kwargs.pop("type_check", True) + if kwargs: + raise ValueError( + "Unrecognized keyword arguments " + f"passed to {self.__class__.__name__}: {kwargs}" + ) + if mode not in QUANTIZATION_MODES: + raise ValueError( + "Invalid quantization mode. " + f"Expected one of {QUANTIZATION_MODES}. Received: mode={mode}" + ) + mode_changed = False + for layer in self._flatten_layers(): + list_of_sublayers = list(layer._flatten_layers()) + if len(list_of_sublayers) == 1: # leaves of the model + try: + layer.quantize(mode, type_check=type_check) + mode_changed = True + except NotImplementedError as e: + warnings.warn(str(e)) + # We need to set these functions to `None` to remake them for changed + # call function + if mode_changed: + self.train_function = None + self.test_function = None + self.predict_function = None + + def build_from_config(self, config): + if not config: + return + status = False + if "input_shape" in config: + # Case: all inputs are in the first arg (possibly nested). + if utils.is_default(self.build): + status = self._build_by_run_for_single_pos_arg( + config["input_shape"] + ) + else: + try: + self.build(config["input_shape"]) + status = True + except: + pass + self._build_shapes_dict = config + + elif "shapes_dict" in config: + # Case: inputs were recorded as multiple keyword arguments. + if utils.is_default(self.build): + status = self._build_by_run_for_kwargs(config["shapes_dict"]) + else: + try: + self.build(**config["shapes_dict"]) + status = True + except: + pass + self._build_shapes_dict = config["shapes_dict"] + + if not status: + warnings.warn( + f"Model '{self.name}' had a build config, but the model " + "cannot be built automatically in " + "`build_from_config(config)`. " + "You should implement " + "`def build_from_config(self, config)`, " + "and you might also want to implement the method " + " that generates the config at saving time, " + "`def get_build_config(self)`. " + "The method `build_from_config()` is meant to " + "create the state of the model (i.e. its variables) " + "upon deserialization.", + stacklevel=2, + ) + + def to_json(self, **kwargs): + """Returns a JSON string containing the network configuration. + + To load a network from a JSON save file, use + `keras.models.model_from_json(json_string, custom_objects={...})`. + + Args: + **kwargs: Additional keyword arguments to be passed to + `json.dumps()`. + + Returns: + A JSON string. + """ + from keras.src.saving import serialization_lib + + model_config = serialization_lib.serialize_keras_object(self) + return json.dumps(model_config, **kwargs) + + def export( + self, + filepath, + format="tf_saved_model", + verbose=True, + input_signature=None, + **kwargs, + ): + """Export the model as an artifact for inference. + + Args: + filepath: `str` or `pathlib.Path` object. The path to save the + artifact. + format: `str`. The export format. Supported values: + `"tf_saved_model"` and `"onnx"`. Defaults to + `"tf_saved_model"`. + verbose: `bool`. Whether to print a message during export. Defaults + to `True`. + input_signature: Optional. Specifies the shape and dtype of the + model inputs. Can be a structure of `keras.InputSpec`, + `tf.TensorSpec`, `backend.KerasTensor`, or backend tensor. If + not provided, it will be automatically computed. Defaults to + `None`. + **kwargs: Additional keyword arguments: + - Specific to the JAX backend and `format="tf_saved_model"`: + - `is_static`: Optional `bool`. Indicates whether `fn` is + static. Set to `False` if `fn` involves state updates + (e.g., RNG seeds and counters). + - `jax2tf_kwargs`: Optional `dict`. Arguments for + `jax2tf.convert`. See the documentation for + [`jax2tf.convert`]( + https://github.com/google/jax/blob/main/jax/experimental/jax2tf/README.md). + If `native_serialization` and `polymorphic_shapes` are + not provided, they will be automatically computed. + + **Note:** This feature is currently supported only with TensorFlow, JAX + and Torch backends. + + Examples: + + Here's how to export a TensorFlow SavedModel for inference. + + ```python + # Export the model as a TensorFlow SavedModel artifact + model.export("path/to/location", format="tf_saved_model") + + # Load the artifact in a different process/environment + reloaded_artifact = tf.saved_model.load("path/to/location") + predictions = reloaded_artifact.serve(input_data) + ``` + + Here's how to export an ONNX for inference. + + ```python + # Export the model as a ONNX artifact + model.export("path/to/location", format="onnx") + + # Load the artifact in a different process/environment + ort_session = onnxruntime.InferenceSession("path/to/location") + ort_inputs = { + k.name: v for k, v in zip(ort_session.get_inputs(), input_data) + } + predictions = ort_session.run(None, ort_inputs) + ``` + """ + from keras.src.export import export_onnx + from keras.src.export import export_saved_model + + available_formats = ("tf_saved_model", "onnx") + if format not in available_formats: + raise ValueError( + f"Unrecognized format={format}. Supported formats are: " + f"{list(available_formats)}." + ) + + if format == "tf_saved_model": + export_saved_model( + self, + filepath, + verbose, + input_signature=input_signature, + **kwargs, + ) + elif format == "onnx": + export_onnx( + self, + filepath, + verbose, + input_signature=input_signature, + **kwargs, + ) + + @classmethod + def from_config(cls, config, custom_objects=None): + from keras.src.models.functional import Functional + + functional_config_keys = [ + "name", + "layers", + "input_layers", + "output_layers", + ] + is_functional_config = all( + key in config for key in functional_config_keys + ) + argspec = inspect.getfullargspec(cls.__init__) + functional_init_args = inspect.getfullargspec(Functional.__init__).args[ + 1: + ] + revivable_as_functional = ( + cls in {Functional, Model} + or argspec.args[1:] == functional_init_args + or (argspec.varargs == "args" and argspec.varkw == "kwargs") + ) + if is_functional_config and revivable_as_functional: + # Revive Functional model + # (but not Functional subclasses with a custom __init__) + from keras.src.models.functional import functional_from_config + + return functional_from_config( + cls, config, custom_objects=custom_objects + ) + + # Either the model has a custom __init__, or the config + # does not contain all the information necessary to + # revive a Functional model. This happens when the user creates + # subclassed models where `get_config()` is returning + # insufficient information to be considered a Functional model. + # In this case, we fall back to provide all config into the + # constructor of the class. + try: + return cls(**config) + except TypeError as e: + raise TypeError( + "Unable to revive model from config. When overriding " + "the `get_config()` method, make sure that the " + "returned config contains all items used as arguments " + f"in the constructor to {cls}, " + "which is the default behavior. " + "You can override this default behavior by defining a " + "`from_config(cls, config)` class method to specify " + "how to create an " + f"instance of {cls.__name__} from its config.\n\n" + f"Received config={config}\n\n" + f"Error encountered during deserialization: {e}" + ) + + def _get_variable_map(self): + store = {} + map_saveable_variables(self, store=store, visited_saveables=set()) + return store + + def get_state_tree(self, value_format="backend_tensor"): + """Retrieves tree-like structure of model variables. + + This method allows retrieval of different model variables (trainable, + non-trainable, optimizer, and metrics). The variables are returned in a + nested dictionary format, where the keys correspond to the variable + names and the values are the nested representations of the variables. + + Returns: + dict: A dictionary containing the nested representations of the + requested variables. The keys are the variable names, and the + values are the corresponding nested dictionaries. + value_format: One of `"backend_tensor"`, `"numpy_array"`. + The kind of array to return as the leaves of the nested + state tree. + + Example: + + ```python + model = keras.Sequential([ + keras.Input(shape=(1,), name="my_input"), + keras.layers.Dense(1, activation="sigmoid", name="my_dense"), + ], name="my_sequential") + model.compile(optimizer="adam", loss="mse", metrics=["mae"]) + model.fit(np.array([[1.0]]), np.array([[1.0]])) + state_tree = model.get_state_tree() + ``` + + The `state_tree` dictionary returned looks like: + + ``` + { + 'metrics_variables': { + 'loss': { + 'count': ..., + 'total': ..., + }, + 'mean_absolute_error': { + 'count': ..., + 'total': ..., + } + }, + 'trainable_variables': { + 'my_sequential': { + 'my_dense': { + 'bias': ..., + 'kernel': ..., + } + } + }, + 'non_trainable_variables': {}, + 'optimizer_variables': { + 'adam': { + 'iteration': ..., + 'learning_rate': ..., + 'my_sequential_my_dense_bias_momentum': ..., + 'my_sequential_my_dense_bias_velocity': ..., + 'my_sequential_my_dense_kernel_momentum': ..., + 'my_sequential_my_dense_kernel_velocity': ..., + } + } + } + } + ``` + """ + variables = {} + variables["trainable_variables"] = self._create_nested_dict( + self.trainable_variables, value_format + ) + variables["non_trainable_variables"] = self._create_nested_dict( + self.non_trainable_variables, value_format + ) + variables["optimizer_variables"] = self._create_nested_dict( + self.optimizer.variables, value_format + ) + variables["metrics_variables"] = self._create_nested_dict( + self.metrics_variables, value_format + ) + return variables + + def _create_nested_dict(self, variables, value_format): + flat_dict = {} + for v in variables: + if v.path in flat_dict: + raise ValueError( + "The following variable path is found twice in the model: " + f"'{v.path}'. `get_state_tree()` can only be called when " + "all variable paths are unique. Make sure to give unique " + "names to your layers (and other objects)." + ) + if value_format == "backend_tensor": + flat_dict[v.path] = v.value + elif value_format == "numpy_array": + flat_dict[v.path] = v.numpy() + else: + raise ValueError( + "Invalid `value_format` argument. Expected one of " + "{'numpy_array', 'backend_tensor'}. Received: " + f"value_format={value_format}" + ) + + nested_dict = {} + for path, value in flat_dict.items(): + parts = path.split("/") + current_dict = nested_dict + for part in parts[:-1]: + if part not in current_dict: + current_dict[part] = {} + current_dict = current_dict[part] + current_dict[parts[-1]] = value + + return nested_dict + + def set_state_tree(self, state_tree): + """Assigns values to variables of the model. + + This method takes a dictionary of nested variable values, which + represents the state tree of the model, and assigns them to the + corresponding variables of the model. The dictionary keys represent the + variable names (e.g., `'trainable_variables'`, `'optimizer_variables'`), + and the values are nested dictionaries containing the variable + paths and their corresponding values. + + Args: + state_tree: A dictionary representing the state tree of the model. + The keys are the variable names, and the values are nested + dictionaries representing the variable paths and their values. + """ + for k, v in state_tree.items(): + path_value_dict = self._flatten_nested_dict(v) + if k == "trainable_variables": + self._assign_variable_values( + self.trainable_variables, path_value_dict + ) + elif k == "non_trainable_variables": + self._assign_variable_values( + self.non_trainable_variables, path_value_dict + ) + elif k == "optimizer_variables": + self._assign_variable_values( + self.optimizer.variables, path_value_dict + ) + elif k == "metrics_variables": + self._assign_variable_values( + self.metrics_variables, path_value_dict + ) + else: + raise ValueError(f"Unknown variable name: {k}") + + def _assign_variable_values(self, variables, path_value_dict): + for path, value in path_value_dict.items(): + for variable in variables: + if variable.path == path: + variable.assign(value) + + def _flatten_nested_dict(self, nested_dict): + flat_dict = {} + + def _flatten(current_dict, prefix=""): + for key, value in current_dict.items(): + if isinstance(value, dict): + _flatten(value, prefix + key + "/") + else: + flat_dict[prefix + key] = value + + _flatten(nested_dict) + return flat_dict + + +@keras_export("keras.models.model_from_json") +def model_from_json(json_string, custom_objects=None): + """Parses a JSON model configuration string and returns a model instance. + + Example: + + >>> model = keras.Sequential([ + ... keras.layers.Dense(5, input_shape=(3,)), + ... keras.layers.Softmax()]) + >>> config = model.to_json() + >>> loaded_model = keras.models.model_from_json(config) + + Args: + json_string: JSON string encoding a model configuration. + custom_objects: Optional dictionary mapping names + (strings) to custom classes or functions to be + considered during deserialization. + + Returns: + A Keras model instance (uncompiled). + """ + from keras.src.saving import serialization_lib + + model_config = json.loads(json_string) + return serialization_lib.deserialize_keras_object( + model_config, custom_objects=custom_objects + ) + + +def functional_init_arguments(args, kwargs): + return ( + (len(args) == 2) + or (len(args) == 1 and "outputs" in kwargs) + or ("inputs" in kwargs and "outputs" in kwargs) + ) + + +def inject_functional_model_class(cls): + """Inject `Functional` into the hierarchy of this class if needed.""" + from keras.src.models import functional + + if cls is Model: + return functional.Functional + # In case there is any multiple inheritance, we stop injecting the + # class if keras model is not in its class hierarchy. + if cls is object: + return object + + cls.__bases__ = tuple( + inject_functional_model_class(base) for base in cls.__bases__ + ) + # Trigger any `__new__` class swapping that needed to happen on `Functional` + # but did not because functional was not in the class hierarchy. + cls.__new__(cls) + + return cls diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/sequential.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/sequential.py new file mode 100644 index 0000000000000000000000000000000000000000..5815add1c142828709bc386160056417b88acf1c --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/sequential.py @@ -0,0 +1,367 @@ +import copy +import inspect +import typing + +from keras.src import backend +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.backend.common import global_state +from keras.src.backend.common import standardize_shape +from keras.src.layers.core.input_layer import InputLayer +from keras.src.layers.layer import Layer +from keras.src.legacy.saving import saving_utils +from keras.src.legacy.saving import serialization as legacy_serialization +from keras.src.models.functional import Functional +from keras.src.models.model import Model +from keras.src.saving import serialization_lib + + +@keras_export(["keras.Sequential", "keras.models.Sequential"]) +class Sequential(Model): + """`Sequential` groups a linear stack of layers into a `Model`. + + Examples: + + ```python + model = keras.Sequential() + model.add(keras.Input(shape=(16,))) + model.add(keras.layers.Dense(8)) + + # Note that you can also omit the initial `Input`. + # In that case the model doesn't have any weights until the first call + # to a training/evaluation method (since it isn't yet built): + model = keras.Sequential() + model.add(keras.layers.Dense(8)) + model.add(keras.layers.Dense(4)) + # model.weights not created yet + + # Whereas if you specify an `Input`, the model gets built + # continuously as you are adding layers: + model = keras.Sequential() + model.add(keras.Input(shape=(16,))) + model.add(keras.layers.Dense(8)) + len(model.weights) # Returns "2" + + # When using the delayed-build pattern (no input shape specified), you can + # choose to manually build your model by calling + # `build(batch_input_shape)`: + model = keras.Sequential() + model.add(keras.layers.Dense(8)) + model.add(keras.layers.Dense(4)) + model.build((None, 16)) + len(model.weights) # Returns "4" + + # Note that when using the delayed-build pattern (no input shape specified), + # the model gets built the first time you call `fit`, `eval`, or `predict`, + # or the first time you call the model on some input data. + model = keras.Sequential() + model.add(keras.layers.Dense(8)) + model.add(keras.layers.Dense(1)) + model.compile(optimizer='sgd', loss='mse') + # This builds the model for the first time: + model.fit(x, y, batch_size=32, epochs=10) + ``` + """ + + def __new__(cls, *args, **kwargs): + return typing.cast(cls, super().__new__(cls)) + + def __init__(self, layers=None, trainable=True, name=None): + super().__init__(trainable=trainable, name=name) + self._functional = None + self._layers = [] + if layers: + for layer in layers: + self.add(layer, rebuild=False) + self._maybe_rebuild() + + def add(self, layer, rebuild=True): + """Adds a layer instance on top of the layer stack. + + Args: + layer: layer instance. + """ + # Legacy case: if the first layer has an input_shape arg, + # use it to build an InputLayer. + if not self._layers: + if getattr(layer, "_input_shape_arg", None) is not None: + self.add(InputLayer(shape=layer._input_shape_arg)) + + # If we are passed a Keras tensor created by keras.Input(), we + # extract the input layer from its keras history and use that. + if hasattr(layer, "_keras_history"): + origin_layer = layer._keras_history[0] + if isinstance(origin_layer, InputLayer): + layer = origin_layer + if not isinstance(layer, Layer): + raise ValueError( + "Only instances of `keras.Layer` can be " + f"added to a Sequential model. Received: {layer} " + f"(of type {type(layer)})" + ) + if not self._is_layer_name_unique(layer): + raise ValueError( + "All layers added to a Sequential model " + f"should have unique names. Name '{layer.name}' is already " + "the name of a layer in this model. Update the `name` argument " + "to pass a unique name." + ) + if ( + isinstance(layer, InputLayer) + and self._layers + and isinstance(self._layers[0], InputLayer) + ): + raise ValueError( + f"Sequential model '{self.name}' has already been configured " + f"to use input shape {self._layers[0].batch_shape}. You cannot " + f"add a different Input layer to it." + ) + + self._layers.append(layer) + if rebuild: + self._maybe_rebuild() + else: + self.built = False + self._functional = None + + def pop(self, rebuild=True): + """Removes the last layer in the model.""" + layer = self._layers.pop() + self.built = False + self._functional = None + if rebuild: + self._maybe_rebuild() + return layer + + def _maybe_rebuild(self): + self.built = False + self._functional = None + if isinstance(self._layers[0], InputLayer) and len(self._layers) > 1: + input_shape = self._layers[0].batch_shape + self.build(input_shape) + elif hasattr(self._layers[0], "input_shape") and len(self._layers) > 1: + # We can build the Sequential model if the first layer has the + # `input_shape` property. This is most commonly found in Functional + # model. + input_shape = self._layers[0].input_shape + self.build(input_shape) + + def _lock_state(self): + # Unlike other layers, Sequential is mutable after build. + pass + + def _obj_type(self): + return "Sequential" + + def build(self, input_shape=None): + try: + input_shape = standardize_shape(input_shape) + except: + # Do not attempt to build if the model does not have a single + # input tensor. + return + if not self._layers: + raise ValueError( + f"Sequential model {self.name} cannot be built because it has " + "no layers. Call `model.add(layer)`." + ) + if isinstance(self._layers[0], InputLayer): + if self._layers[0].batch_shape != input_shape: + raise ValueError( + f"Sequential model '{self.name}' has already been " + "configured to use input shape " + f"{self._layers[0].batch_shape}. You cannot build it " + f"with input_shape {input_shape}" + ) + else: + dtype = self._layers[0].compute_dtype + self._layers = [ + InputLayer(batch_shape=input_shape, dtype=dtype) + ] + self._layers + + # Build functional model + inputs = self._layers[0].output + x = inputs + for layer in self._layers[1:]: + try: + x = layer(x) + except NotImplementedError: + # Can happen if shape inference is not implemented. + # TODO: consider reverting inbound nodes on layers processed. + return + except TypeError as e: + signature = inspect.signature(layer.call) + positional_args = [ + param + for param in signature.parameters.values() + if param.default == inspect.Parameter.empty + ] + if len(positional_args) != 1: + raise ValueError( + "Layers added to a Sequential model " + "can only have a single positional argument, " + f"the input tensor. Layer {layer.__class__.__name__} " + f"has multiple positional arguments: {positional_args}" + ) + raise e + outputs = x + self._functional = Functional(inputs=inputs, outputs=outputs) + self.built = True + + def call(self, inputs, training=None, mask=None): + if self._functional: + return self._functional.call(inputs, training=training, mask=mask) + + # Fallback: Just apply the layer sequence. + # This typically happens if `inputs` is a nested struct. + for layer in self.layers: + # During each iteration, `inputs` are the inputs to `layer`, and + # `outputs` are the outputs of `layer` applied to `inputs`. At the + # end of each iteration `inputs` is set to `outputs` to prepare for + # the next layer. + kwargs = {} + if layer._call_has_mask_arg: + kwargs["mask"] = mask + if layer._call_has_training_arg and training is not None: + kwargs["training"] = training + outputs = layer(inputs, **kwargs) + inputs = outputs + + mask = tree.map_structure(backend.get_keras_mask, outputs) + return outputs + + @property + def layers(self): + # Historically, `sequential.layers` only returns layers that were added + # via `add`, and omits the auto-generated `InputLayer` that comes at the + # bottom of the stack. + layers = self._layers + if layers and isinstance(layers[0], InputLayer): + return layers[1:] + return layers[:] + + @layers.setter + def layers(self, _): + raise AttributeError( + "`Sequential.layers` attribute is reserved and should not be used. " + "Use `add()` and `pop()` to change the layers in this model." + ) + + def compute_output_spec(self, inputs, training=None, mask=None): + if self._functional: + return self._functional.compute_output_spec( + inputs, training=training, mask=mask + ) + # Direct application + for layer in self.layers: + outputs = layer.compute_output_spec( + inputs, training=training + ) # Ignore mask + inputs = outputs + return outputs + + def compute_output_shape(self, input_shape): + if self._functional: + return self._functional.compute_output_shape(input_shape) + # Direct application + for layer in self.layers: + output_shape = layer.compute_output_shape(input_shape) + input_shape = output_shape + return output_shape + + @property + def input_shape(self): + if self._functional: + return self._functional.input_shape + raise AttributeError( + f"Sequential model '{self.name}' has no defined input shape yet." + ) + + @property + def output_shape(self): + if self._functional: + return self._functional.output_shape + raise AttributeError( + f"Sequential model '{self.name}' has no defined output shape yet." + ) + + @property + def inputs(self): + if self._functional: + return self._functional.inputs + raise AttributeError( + f"Sequential model '{self.name}' has no defined inputs yet." + ) + + @property + def outputs(self): + if self._functional: + return self._functional.outputs + raise AttributeError( + f"Sequential model '{self.name}' has no defined outputs yet." + ) + + @property + def input_dtype(self): + # Sequential.__call__ will try to convert its inputs + # to the dtype expected by its input layer, if any. + layers = self._layers + if layers and isinstance(layers[0], InputLayer): + return layers[0].dtype + return super().input_dtype + + def _is_layer_name_unique(self, layer): + for ref_layer in self._layers: + if layer.name == ref_layer.name and ref_layer is not layer: + return False + return True + + def get_config(self): + serialize_fn = serialization_lib.serialize_keras_object + if global_state.get_global_attribute("use_legacy_config", False): + # Legacy format serialization used for H5 and SavedModel formats + serialize_fn = legacy_serialization.serialize_keras_object + layer_configs = [] + for layer in super().layers: + # `super().layers` include the InputLayer if available (it is + # filtered out of `self.layers`). + layer_configs.append(serialize_fn(layer)) + config = Model.get_config(self) + config["name"] = self.name + config["layers"] = copy.deepcopy(layer_configs) + if self._functional is not None: + config["build_input_shape"] = self._layers[0].batch_shape + return config + + @classmethod + def from_config(cls, config, custom_objects=None): + if "name" in config: + name = config["name"] + build_input_shape = config.get("build_input_shape") + layer_configs = config["layers"] + else: + name = None + layer_configs = config + model = cls(name=name) + for layer_config in layer_configs: + if "module" not in layer_config: + # Legacy format deserialization (no "module" key) + # used for H5 and SavedModel formats + layer = saving_utils.model_from_config( + layer_config, + custom_objects=custom_objects, + ) + else: + layer = serialization_lib.deserialize_keras_object( + layer_config, + custom_objects=custom_objects, + ) + model.add(layer) + if ( + not model._functional + and "build_input_shape" in locals() + and build_input_shape + and isinstance(build_input_shape, (tuple, list)) + ): + model.build(build_input_shape) + return model diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/variable_mapping.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/variable_mapping.py new file mode 100644 index 0000000000000000000000000000000000000000..e06ea5b09395829fe353cb1ef8489d3abba7f70c --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/models/variable_mapping.py @@ -0,0 +1,61 @@ +from keras.src.layers.layer import Layer +from keras.src.metrics.metric import Metric +from keras.src.optimizers.optimizer import Optimizer +from keras.src.saving import saving_lib +from keras.src.saving.keras_saveable import KerasSaveable + + +def map_saveable_variables(saveable, store, visited_saveables): + # If the saveable has already been seen, skip it. + if id(saveable) in visited_saveables: + return + + visited_saveables.add(id(saveable)) + + variables = [] + if isinstance(saveable, Layer): + variables = ( + saveable._trainable_variables + saveable._non_trainable_variables + ) + elif isinstance(saveable, Optimizer): + variables = saveable._variables + elif isinstance(saveable, Metric): + variables = saveable._variables + for v in variables: + if v.path in store: + raise ValueError( + "The model contains two variables with a duplicate path: " + f"path='{v.path}' appears at least twice. " + f"This path is used for {v} and for {store[v.path]}. " + "In order to get a variable map, make sure to use " + "unique paths/names for each variable." + ) + store[v.path] = v + + # Recursively save state of children saveables (layers, optimizers, etc.) + for child_attr, child_obj in saving_lib._walk_saveable(saveable): + if isinstance(child_obj, KerasSaveable): + map_saveable_variables( + child_obj, + store, + visited_saveables=visited_saveables, + ) + elif isinstance(child_obj, (list, dict, tuple, set)): + map_container_variables( + child_obj, + store, + visited_saveables=visited_saveables, + ) + + +def map_container_variables(container, store, visited_saveables): + if isinstance(container, dict): + container = list(container.values()) + + for saveable in container: + if isinstance(saveable, KerasSaveable): + map_saveable_variables( + saveable, + store, + visited_saveables=visited_saveables, + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a79e137400d2f18ac3dfb7a6ce484146333c5f1d Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/core.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..131aefd8d388a39ac7fe976de97f70e5a1bf4112 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/core.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/function.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1af4f3efe3328297e52acfe0722405e1c5eba4bd Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/function.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/image.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/image.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f34574aef8b0ce1a7e3e2505cdd9a76d7a098bf2 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/image.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/linalg.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/linalg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e63486081468c56b0e31f37065782c010e61af49 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/linalg.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/math.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/math.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02307bc11cbc2338c90b835b247b15acada4ed71 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/math.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/nn.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/nn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d43ca24d290c7cd11a367d1349ff8ba421750283 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/nn.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/node.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/node.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d26310ea1e55de522c6c25523c1bf3738129147 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/node.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/numpy.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/numpy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e6b10ddeab8c50574b5387b695fc12665c1d675 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/numpy.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/operation.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/operation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..261c811f8308fd68df91d76ee6a1a4a183c2ff0f Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/ops/__pycache__/operation.cpython-310.pyc differ