diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/INSTALLER b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/METADATA b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..6c054c86d2d9701c54125fc237c031f4ea30c2fb --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/METADATA @@ -0,0 +1,150 @@ +Metadata-Version: 2.1 +Name: keras +Version: 3.8.0 +Summary: Multi-backend Keras +Author-email: Keras team +License: Apache License 2.0 +Project-URL: Home, https://keras.io/ +Project-URL: Repository, https://github.com/keras-team/keras +Classifier: Development Status :: 4 - Beta +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Operating System :: Unix +Classifier: Operating System :: MacOS +Classifier: Intended Audience :: Science/Research +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Software Development +Requires-Python: >=3.9 +Description-Content-Type: text/markdown +Requires-Dist: absl-py +Requires-Dist: numpy +Requires-Dist: rich +Requires-Dist: namex +Requires-Dist: h5py +Requires-Dist: optree +Requires-Dist: ml-dtypes +Requires-Dist: packaging + +# Keras 3: Deep Learning for Humans + +Keras 3 is a multi-backend deep learning framework, with support for JAX, TensorFlow, and PyTorch. +Effortlessly build and train models for computer vision, natural language processing, audio processing, +timeseries forecasting, recommender systems, etc. + +- **Accelerated model development**: Ship deep learning solutions faster thanks to the high-level UX of Keras +and the availability of easy-to-debug runtimes like PyTorch or JAX eager execution. +- **State-of-the-art performance**: By picking the backend that is the fastest for your model architecture (often JAX!), +leverage speedups ranging from 20% to 350% compared to other frameworks. [Benchmark here](https://keras.io/getting_started/benchmarks/). +- **Datacenter-scale training**: Scale confidently from your laptop to large clusters of GPUs or TPUs. + +Join nearly three million developers, from burgeoning startups to global enterprises, in harnessing the power of Keras 3. + + +## Installation + +### Install with pip + +Keras 3 is available on PyPI as `keras`. Note that Keras 2 remains available as the `tf-keras` package. + +1. Install `keras`: + +``` +pip install keras --upgrade +``` + +2. Install backend package(s). + +To use `keras`, you should also install the backend of choice: `tensorflow`, `jax`, or `torch`. +Note that `tensorflow` is required for using certain Keras 3 features: certain preprocessing layers +as well as `tf.data` pipelines. + +### Local installation + +#### Minimal installation + +Keras 3 is compatible with Linux and MacOS systems. For Windows users, we recommend using WSL2 to run Keras. +To install a local development version: + +1. Install dependencies: + +``` +pip install -r requirements.txt +``` + +2. Run installation command from the root directory. + +``` +python pip_build.py --install +``` + +3. Run API generation script when creating PRs that update `keras_export` public APIs: + +``` +./shell/api_gen.sh +``` + +#### Adding GPU support + +The `requirements.txt` file will install a CPU-only version of TensorFlow, JAX, and PyTorch. For GPU support, we also +provide a separate `requirements-{backend}-cuda.txt` for TensorFlow, JAX, and PyTorch. These install all CUDA +dependencies via `pip` and expect a NVIDIA driver to be pre-installed. We recommend a clean python environment for each +backend to avoid CUDA version mismatches. As an example, here is how to create a Jax GPU environment with `conda`: + +```shell +conda create -y -n keras-jax python=3.10 +conda activate keras-jax +pip install -r requirements-jax-cuda.txt +python pip_build.py --install +``` + +## Configuring your backend + +You can export the environment variable `KERAS_BACKEND` or you can edit your local config file at `~/.keras/keras.json` +to configure your backend. Available backend options are: `"tensorflow"`, `"jax"`, `"torch"`. Example: + +``` +export KERAS_BACKEND="jax" +``` + +In Colab, you can do: + +```python +import os +os.environ["KERAS_BACKEND"] = "jax" + +import keras +``` + +**Note:** The backend must be configured before importing `keras`, and the backend cannot be changed after +the package has been imported. + +## Backwards compatibility + +Keras 3 is intended to work as a drop-in replacement for `tf.keras` (when using the TensorFlow backend). Just take your +existing `tf.keras` code, make sure that your calls to `model.save()` are using the up-to-date `.keras` format, and you're +done. + +If your `tf.keras` model does not include custom components, you can start running it on top of JAX or PyTorch immediately. + +If it does include custom components (e.g. custom layers or a custom `train_step()`), it is usually possible to convert it +to a backend-agnostic implementation in just a few minutes. + +In addition, Keras models can consume datasets in any format, regardless of the backend you're using: +you can train your models with your existing `tf.data.Dataset` pipelines or PyTorch `DataLoaders`. + +## Why use Keras 3? + +- Run your high-level Keras workflows on top of any framework -- benefiting at will from the advantages of each framework, +e.g. the scalability and performance of JAX or the production ecosystem options of TensorFlow. +- Write custom components (e.g. layers, models, metrics) that you can use in low-level workflows in any framework. + - You can take a Keras model and train it in a training loop written from scratch in native TF, JAX, or PyTorch. + - You can take a Keras model and use it as part of a PyTorch-native `Module` or as part of a JAX-native model function. +- Make your ML code future-proof by avoiding framework lock-in. +- As a PyTorch user: get access to power and usability of Keras, at last! +- As a JAX user: get access to a fully-featured, battle-tested, well-documented modeling and training library. + + +Read more in the [Keras 3 release announcement](https://keras.io/keras_3/). diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/RECORD b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..f7bd3ced3f6ce9f7321889ff657a2a0297f2d0ad --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/RECORD @@ -0,0 +1,1167 @@ +keras-3.8.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +keras-3.8.0.dist-info/METADATA,sha256=82_B-ikTuR7ULzYBq-XYOHikwgR5cbLdLAoMdjcuyis,5800 +keras-3.8.0.dist-info/RECORD,, +keras-3.8.0.dist-info/WHEEL,sha256=A3WOREP4zgxI0fKrHUG8DC8013e3dK3n7a6HDbcEIwE,91 +keras-3.8.0.dist-info/top_level.txt,sha256=ptcw_-QuGZ4ZDjMdwi_Z0clZm8QAqFdvzzFnDEOTs9o,6 +keras/__init__.py,sha256=4RIBKrI0DyyQMR1WoeFHOw4MIl9Z7mnWNjwxU4bZJpk,2248 +keras/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/__init__.py,sha256=KxzM_FebWUeTe1MInWSNmhQkhezwkhdgj9nIZruK_U4,34 +keras/_tf_keras/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/__init__.py,sha256=zZoJBLzgTf1Cnq_knTQeJW9nNsr3QPSx3d_9Gmftz38,2197 +keras/_tf_keras/keras/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/activations/__init__.py,sha256=4iq05YrT-fwYhwmVot_RntIXQw_0W0-t-G1hwXFFmDg,1963 +keras/_tf_keras/keras/activations/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/applications/__init__.py,sha256=OYAgbbrtjh7qLKOpcPHhC069N8lzyOVUvfx7SbTUPac,3295 +keras/_tf_keras/keras/applications/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/applications/convnext/__init__.py,sha256=CJtQ7VaafEq_qCfXqg803Wwgp7j63rIQl2KnVeWPnAI,535 +keras/_tf_keras/keras/applications/convnext/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/applications/densenet/__init__.py,sha256=P2LYU-mrO7j2cD0USIaDHAclR7WFSRte1A3Egw9ZNp0,414 +keras/_tf_keras/keras/applications/densenet/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/applications/efficientnet/__init__.py,sha256=8yc3DWUG029LT0SSLUS52fCqlKiIaJKWuLBFR9kRyKw,758 +keras/_tf_keras/keras/applications/efficientnet/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/applications/efficientnet_v2/__init__.py,sha256=vkOu63V-Dx3onrbE1ildBWdTuIMm6YP5YBXmKIwt8OE,733 +keras/_tf_keras/keras/applications/efficientnet_v2/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/applications/imagenet_utils/__init__.py,sha256=7UiZ1k9p_1KKJ3WhpzTw5IEzNKCA_HkjyA9HUT-P56c,258 +keras/_tf_keras/keras/applications/imagenet_utils/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/applications/inception_resnet_v2/__init__.py,sha256=z3b_vdJA3cgiXTC1YHOh1XECxr7gx5cTA9p7EHX57-o,341 +keras/_tf_keras/keras/applications/inception_resnet_v2/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/applications/inception_v3/__init__.py,sha256=UqUJ30KDEAKEoMjOX_JvuF_ZFdGUF3hZ1HxTnaJQi7Y,314 +keras/_tf_keras/keras/applications/inception_v3/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/applications/mobilenet/__init__.py,sha256=aZ-UclrXa5y18flDccJcXoQU_uiEqe36QFITVJKISZg,303 +keras/_tf_keras/keras/applications/mobilenet/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/applications/mobilenet_v2/__init__.py,sha256=QqEsqsY0XS7l7y16K8tDLnxTKDPztPg9Lquu8aOBtqk,314 +keras/_tf_keras/keras/applications/mobilenet_v2/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/applications/mobilenet_v3/__init__.py,sha256=dGfqu2jDH072Irmv9XhByY1hLGJSJnZPFjuSzzXtP3M,254 +keras/_tf_keras/keras/applications/mobilenet_v3/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/applications/nasnet/__init__.py,sha256=kKSKE7oLUQBlafjx-yZqltzqBVK3ZzSRUe_pdM5yMJI,351 +keras/_tf_keras/keras/applications/nasnet/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/applications/resnet/__init__.py,sha256=Jb8J5nfhmlM5OoXbeS8te-e9WcdcVThauyDgYjO41hI,397 +keras/_tf_keras/keras/applications/resnet/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/applications/resnet50/__init__.py,sha256=FDSlA76kxMfGbengnUhswy9wrXOcooytKwqVQlAiHCU,293 +keras/_tf_keras/keras/applications/resnet50/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/applications/resnet_v2/__init__.py,sha256=ZpnMiE2sXOmGHP4dh5coXw6Bw-gAG9Q4AFk3QeXNJAs,418 +keras/_tf_keras/keras/applications/resnet_v2/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/applications/vgg16/__init__.py,sha256=5zCCcsQpFp3_n0pxixssCtqt1J05ijsVOLeHL3nN_BA,287 +keras/_tf_keras/keras/applications/vgg16/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/applications/vgg19/__init__.py,sha256=EcXy9vHifqPxWA9FtBsFVyBE4-cGJk0kuYWqpN16VsA,287 +keras/_tf_keras/keras/applications/vgg19/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/applications/xception/__init__.py,sha256=7rCbsyBRr4q75NEAJXha_afJ9XQlVThvjiq1z1rUCMc,299 +keras/_tf_keras/keras/applications/xception/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/backend/__init__.py,sha256=kD-WLxwoqq9YPpBG4rFzbpqe43z1mDJCA-JcGOxOnws,6684 +keras/_tf_keras/keras/backend/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/callbacks/__init__.py,sha256=3hQGFKvDGo_GxxKAg7962P8sOjkAK3gYLAMoApk6NYQ,1044 +keras/_tf_keras/keras/callbacks/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/config/__init__.py,sha256=wdYtkLTOd4vDa3pWuTrWF6Je-xvPh_myrnzapx2gs20,1328 +keras/_tf_keras/keras/config/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/constraints/__init__.py,sha256=IwUc3HQMwy8RS0ylJwjNLSYEV6CCN5cy5vVL2OEouTY,797 +keras/_tf_keras/keras/constraints/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/datasets/__init__.py,sha256=TMj3G88kHwCFfFg0IP4OxTD1gCMOfZNscm97YJnX9_0,454 +keras/_tf_keras/keras/datasets/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/datasets/boston_housing/__init__.py,sha256=m-JFgF4Wg83j9kUIwG4WwwYxJuqx2X20su0cms-4AvQ,178 +keras/_tf_keras/keras/datasets/boston_housing/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/datasets/california_housing/__init__.py,sha256=ZTdBD-p_s7NUQ72-YvVu6zhpFbhogz7Dx2TC_F0wT6o,182 +keras/_tf_keras/keras/datasets/california_housing/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/datasets/cifar10/__init__.py,sha256=zE6qAroVmT1N-graOKMme7pMKd3pa-gXoE2YiA71G-k,171 +keras/_tf_keras/keras/datasets/cifar10/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/datasets/cifar100/__init__.py,sha256=ry24rVuxL-fMGlvTm_69E8BoWqs5RA4PBXc18q9_3nE,172 +keras/_tf_keras/keras/datasets/cifar100/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/datasets/fashion_mnist/__init__.py,sha256=XdTBzHTGNyjnROyxHbTnsPvM4aah7pW7OS7fdA7NDK4,177 +keras/_tf_keras/keras/datasets/fashion_mnist/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/datasets/imdb/__init__.py,sha256=UbWIDX0g49ou0oKn52cX9XO3GoxaSoAY8yTTayMhgBI,219 +keras/_tf_keras/keras/datasets/imdb/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/datasets/mnist/__init__.py,sha256=LtzLQyEHikIwIHBnHQpipXIoIzBBlD4ZymIYHFQsbXM,169 +keras/_tf_keras/keras/datasets/mnist/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/datasets/reuters/__init__.py,sha256=nveC8af7Nf1U11DgSGOXud9OG0OTMLmOzLKj_5meuv8,280 +keras/_tf_keras/keras/datasets/reuters/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/distribution/__init__.py,sha256=nMMN_Whe_0UBUxBi7hrgAmF58dKLRTJdHPNovets_TU,775 +keras/_tf_keras/keras/distribution/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/dtype_policies/__init__.py,sha256=5hre38f1WKuUCs0fVSrLdnWVmFGl9hjeSHMDcYGsXC8,605 +keras/_tf_keras/keras/dtype_policies/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/export/__init__.py,sha256=fDKQSQMFuL-o1L82NJ6oj4_EVZlERXIH1xUG6Z2NPq8,177 +keras/_tf_keras/keras/export/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/initializers/__init__.py,sha256=AC2ADbB6GI106OtDS-rkoyQISqDSANBkEv74ipc3PVo,3026 +keras/_tf_keras/keras/initializers/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/layers/__init__.py,sha256=JWGBZ3I5pbBplrrYlcf0RJwoozYdSJmmPKIgEsY_A-U,11908 +keras/_tf_keras/keras/layers/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/legacy/__init__.py,sha256=jnXr7nfdi2bWIIPHkecb4V8kJgWHlbLLGONQNtVIyoE,158 +keras/_tf_keras/keras/legacy/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/legacy/saving/__init__.py,sha256=Lo-SrioJr4I2Lwg7BtCBUrXhehEUi3PjA6XKC1RUTD0,270 +keras/_tf_keras/keras/legacy/saving/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/losses/__init__.py,sha256=97dwQVgjT9I25KZNWwYwaw7Xsee6VjiGtbdOPhheBMM,2974 +keras/_tf_keras/keras/losses/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/metrics/__init__.py,sha256=vdZLkgKKkjWnmikTXm4z6h2z_GD6ifqsGGVpJhYjjUM,4986 +keras/_tf_keras/keras/metrics/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/mixed_precision/__init__.py,sha256=5SAdEHsp61WWzpzR7LDAEVwEXDc10iSfICJ5X4fBOU4,636 +keras/_tf_keras/keras/mixed_precision/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/models/__init__.py,sha256=CoOZmRsB75JTFWmaAG2ozUkPod3tqKINHwZfz1vGc74,416 +keras/_tf_keras/keras/models/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/ops/__init__.py,sha256=o3d-0VDc_ROhfmylT2txfYM5kuIKSMu9ih4HF7xB5i0,10452 +keras/_tf_keras/keras/ops/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/ops/image/__init__.py,sha256=sR-AgA7J4SoQ9A2uasL2eVATbJF7tA93L4Bjc90djC0,528 +keras/_tf_keras/keras/ops/image/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/ops/linalg/__init__.py,sha256=S5FbsvccCzV39KKSuRiHi4NstVhHpQBiTJRF-I6H6Y8,595 +keras/_tf_keras/keras/ops/linalg/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/ops/nn/__init__.py,sha256=7t_xXtIzXauLbbghLVwdoJcM2n-ZP2hy9I0vJ2OZXwM,1937 +keras/_tf_keras/keras/ops/nn/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/ops/numpy/__init__.py,sha256=f5bKYDOsWCg_2kcv9h64niH_b6gGPiAtNA3jk9WJgeY,6426 +keras/_tf_keras/keras/ops/numpy/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/optimizers/__init__.py,sha256=vFg0VYhMqrF46b8DnZJPECQGTSLo2_JHIk_N88IESpk,1008 +keras/_tf_keras/keras/optimizers/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/optimizers/legacy/__init__.py,sha256=uIMQESCV80Q0FY-9ikQUjXYPyZqmTfAM3dfohQ5DzYs,516 +keras/_tf_keras/keras/optimizers/legacy/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/optimizers/schedules/__init__.py,sha256=Wj5RdkBgCZlb83cmMFLvXPMy3bWfi65DT-n6mc_jEm8,918 +keras/_tf_keras/keras/optimizers/schedules/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/preprocessing/__init__.py,sha256=S7kyp2DIP5zEHLu109zfMetURJ3Tphmp86i6v5dzv_Q,530 +keras/_tf_keras/keras/preprocessing/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/preprocessing/image/__init__.py,sha256=HxDuQEt6Fvk8LPHdU3ZIHK6rPI4HBnrO1iWWPv2vlLg,1240 +keras/_tf_keras/keras/preprocessing/image/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/preprocessing/sequence/__init__.py,sha256=SuRkLzfPAxJE9mJrOJUHIS1F49wHjKRRMTigoGzHnuw,385 +keras/_tf_keras/keras/preprocessing/sequence/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/preprocessing/text/__init__.py,sha256=ENE088kHs8mA7vSTt8OVn8EbRNmJB8pH_mHuyYLNSEI,436 +keras/_tf_keras/keras/preprocessing/text/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/quantizers/__init__.py,sha256=1uMzyYRCEZmbKf35VvtF7HPmqooNhHgxNgll--Ot21E,627 +keras/_tf_keras/keras/quantizers/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/random/__init__.py,sha256=Vp0WMSatNORPtXBd9PgL9czZCDJj-3EpS_vzDGBaq7U,628 +keras/_tf_keras/keras/random/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/regularizers/__init__.py,sha256=Dlz92XwBnM5yXdcWsrRYCLij3iHtmeVLrkWOF6G_9Sk,819 +keras/_tf_keras/keras/regularizers/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/saving/__init__.py,sha256=T-ae1TghLi2qL3ws-qCNVBSCriVJp6Obp3b1lXZcKZ8,980 +keras/_tf_keras/keras/saving/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/tree/__init__.py,sha256=LeyEGEQky2QkIpz2gaBdWmX0a_XbI1hVx-1N6T4SlPA,738 +keras/_tf_keras/keras/tree/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/utils/__init__.py,sha256=YhX48UKVikdCxjvy8ncVTbafMWMgZ7yo-KSJB1I-1po,2737 +keras/_tf_keras/keras/utils/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/utils/bounding_boxes/__init__.py,sha256=Cg2G9tvv1gf8QfmqAnPDFGUSp_qFx2HGCAl2l9j74dg,1119 +keras/_tf_keras/keras/utils/bounding_boxes/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/utils/legacy/__init__.py,sha256=Lo-SrioJr4I2Lwg7BtCBUrXhehEUi3PjA6XKC1RUTD0,270 +keras/_tf_keras/keras/utils/legacy/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/visualization/__init__.py,sha256=25wRQnlYw_E6GYMOUs-62PYrFko3VeYOy-QeoOKEFoQ,569 +keras/_tf_keras/keras/visualization/__pycache__/__init__.cpython-310.pyc,, +keras/_tf_keras/keras/wrappers/__init__.py,sha256=Uytau8DAb7y3iDA2Hrf3LyOcA6HuhSk6CZZs0YkDnfo,317 +keras/_tf_keras/keras/wrappers/__pycache__/__init__.cpython-310.pyc,, +keras/api/__init__.py,sha256=_j2pS8vM-cp0jlc6d4xv6gN7dqEEPkiQNi4s0vFQsFI,2167 +keras/api/__pycache__/__init__.cpython-310.pyc,, +keras/api/activations/__init__.py,sha256=4iq05YrT-fwYhwmVot_RntIXQw_0W0-t-G1hwXFFmDg,1963 +keras/api/activations/__pycache__/__init__.cpython-310.pyc,, +keras/api/applications/__init__.py,sha256=OYAgbbrtjh7qLKOpcPHhC069N8lzyOVUvfx7SbTUPac,3295 +keras/api/applications/__pycache__/__init__.cpython-310.pyc,, +keras/api/applications/convnext/__init__.py,sha256=CJtQ7VaafEq_qCfXqg803Wwgp7j63rIQl2KnVeWPnAI,535 +keras/api/applications/convnext/__pycache__/__init__.cpython-310.pyc,, +keras/api/applications/densenet/__init__.py,sha256=P2LYU-mrO7j2cD0USIaDHAclR7WFSRte1A3Egw9ZNp0,414 +keras/api/applications/densenet/__pycache__/__init__.cpython-310.pyc,, +keras/api/applications/efficientnet/__init__.py,sha256=8yc3DWUG029LT0SSLUS52fCqlKiIaJKWuLBFR9kRyKw,758 +keras/api/applications/efficientnet/__pycache__/__init__.cpython-310.pyc,, +keras/api/applications/efficientnet_v2/__init__.py,sha256=vkOu63V-Dx3onrbE1ildBWdTuIMm6YP5YBXmKIwt8OE,733 +keras/api/applications/efficientnet_v2/__pycache__/__init__.cpython-310.pyc,, +keras/api/applications/imagenet_utils/__init__.py,sha256=7UiZ1k9p_1KKJ3WhpzTw5IEzNKCA_HkjyA9HUT-P56c,258 +keras/api/applications/imagenet_utils/__pycache__/__init__.cpython-310.pyc,, +keras/api/applications/inception_resnet_v2/__init__.py,sha256=z3b_vdJA3cgiXTC1YHOh1XECxr7gx5cTA9p7EHX57-o,341 +keras/api/applications/inception_resnet_v2/__pycache__/__init__.cpython-310.pyc,, +keras/api/applications/inception_v3/__init__.py,sha256=UqUJ30KDEAKEoMjOX_JvuF_ZFdGUF3hZ1HxTnaJQi7Y,314 +keras/api/applications/inception_v3/__pycache__/__init__.cpython-310.pyc,, +keras/api/applications/mobilenet/__init__.py,sha256=aZ-UclrXa5y18flDccJcXoQU_uiEqe36QFITVJKISZg,303 +keras/api/applications/mobilenet/__pycache__/__init__.cpython-310.pyc,, +keras/api/applications/mobilenet_v2/__init__.py,sha256=QqEsqsY0XS7l7y16K8tDLnxTKDPztPg9Lquu8aOBtqk,314 +keras/api/applications/mobilenet_v2/__pycache__/__init__.cpython-310.pyc,, +keras/api/applications/mobilenet_v3/__init__.py,sha256=dGfqu2jDH072Irmv9XhByY1hLGJSJnZPFjuSzzXtP3M,254 +keras/api/applications/mobilenet_v3/__pycache__/__init__.cpython-310.pyc,, +keras/api/applications/nasnet/__init__.py,sha256=kKSKE7oLUQBlafjx-yZqltzqBVK3ZzSRUe_pdM5yMJI,351 +keras/api/applications/nasnet/__pycache__/__init__.cpython-310.pyc,, +keras/api/applications/resnet/__init__.py,sha256=Jb8J5nfhmlM5OoXbeS8te-e9WcdcVThauyDgYjO41hI,397 +keras/api/applications/resnet/__pycache__/__init__.cpython-310.pyc,, +keras/api/applications/resnet50/__init__.py,sha256=FDSlA76kxMfGbengnUhswy9wrXOcooytKwqVQlAiHCU,293 +keras/api/applications/resnet50/__pycache__/__init__.cpython-310.pyc,, +keras/api/applications/resnet_v2/__init__.py,sha256=ZpnMiE2sXOmGHP4dh5coXw6Bw-gAG9Q4AFk3QeXNJAs,418 +keras/api/applications/resnet_v2/__pycache__/__init__.cpython-310.pyc,, +keras/api/applications/vgg16/__init__.py,sha256=5zCCcsQpFp3_n0pxixssCtqt1J05ijsVOLeHL3nN_BA,287 +keras/api/applications/vgg16/__pycache__/__init__.cpython-310.pyc,, +keras/api/applications/vgg19/__init__.py,sha256=EcXy9vHifqPxWA9FtBsFVyBE4-cGJk0kuYWqpN16VsA,287 +keras/api/applications/vgg19/__pycache__/__init__.cpython-310.pyc,, +keras/api/applications/xception/__init__.py,sha256=7rCbsyBRr4q75NEAJXha_afJ9XQlVThvjiq1z1rUCMc,299 +keras/api/applications/xception/__pycache__/__init__.cpython-310.pyc,, +keras/api/backend/__init__.py,sha256=Iz8yqN_VWI6G8SzGW351lqwpY2aMEC_1-BazSReN_s4,883 +keras/api/backend/__pycache__/__init__.cpython-310.pyc,, +keras/api/callbacks/__init__.py,sha256=3hQGFKvDGo_GxxKAg7962P8sOjkAK3gYLAMoApk6NYQ,1044 +keras/api/callbacks/__pycache__/__init__.cpython-310.pyc,, +keras/api/config/__init__.py,sha256=wdYtkLTOd4vDa3pWuTrWF6Je-xvPh_myrnzapx2gs20,1328 +keras/api/config/__pycache__/__init__.cpython-310.pyc,, +keras/api/constraints/__init__.py,sha256=IwUc3HQMwy8RS0ylJwjNLSYEV6CCN5cy5vVL2OEouTY,797 +keras/api/constraints/__pycache__/__init__.cpython-310.pyc,, +keras/api/datasets/__init__.py,sha256=TMj3G88kHwCFfFg0IP4OxTD1gCMOfZNscm97YJnX9_0,454 +keras/api/datasets/__pycache__/__init__.cpython-310.pyc,, +keras/api/datasets/boston_housing/__init__.py,sha256=m-JFgF4Wg83j9kUIwG4WwwYxJuqx2X20su0cms-4AvQ,178 +keras/api/datasets/boston_housing/__pycache__/__init__.cpython-310.pyc,, +keras/api/datasets/california_housing/__init__.py,sha256=ZTdBD-p_s7NUQ72-YvVu6zhpFbhogz7Dx2TC_F0wT6o,182 +keras/api/datasets/california_housing/__pycache__/__init__.cpython-310.pyc,, +keras/api/datasets/cifar10/__init__.py,sha256=zE6qAroVmT1N-graOKMme7pMKd3pa-gXoE2YiA71G-k,171 +keras/api/datasets/cifar10/__pycache__/__init__.cpython-310.pyc,, +keras/api/datasets/cifar100/__init__.py,sha256=ry24rVuxL-fMGlvTm_69E8BoWqs5RA4PBXc18q9_3nE,172 +keras/api/datasets/cifar100/__pycache__/__init__.cpython-310.pyc,, +keras/api/datasets/fashion_mnist/__init__.py,sha256=XdTBzHTGNyjnROyxHbTnsPvM4aah7pW7OS7fdA7NDK4,177 +keras/api/datasets/fashion_mnist/__pycache__/__init__.cpython-310.pyc,, +keras/api/datasets/imdb/__init__.py,sha256=UbWIDX0g49ou0oKn52cX9XO3GoxaSoAY8yTTayMhgBI,219 +keras/api/datasets/imdb/__pycache__/__init__.cpython-310.pyc,, +keras/api/datasets/mnist/__init__.py,sha256=LtzLQyEHikIwIHBnHQpipXIoIzBBlD4ZymIYHFQsbXM,169 +keras/api/datasets/mnist/__pycache__/__init__.cpython-310.pyc,, +keras/api/datasets/reuters/__init__.py,sha256=nveC8af7Nf1U11DgSGOXud9OG0OTMLmOzLKj_5meuv8,280 +keras/api/datasets/reuters/__pycache__/__init__.cpython-310.pyc,, +keras/api/distribution/__init__.py,sha256=nMMN_Whe_0UBUxBi7hrgAmF58dKLRTJdHPNovets_TU,775 +keras/api/distribution/__pycache__/__init__.cpython-310.pyc,, +keras/api/dtype_policies/__init__.py,sha256=5hre38f1WKuUCs0fVSrLdnWVmFGl9hjeSHMDcYGsXC8,605 +keras/api/dtype_policies/__pycache__/__init__.cpython-310.pyc,, +keras/api/export/__init__.py,sha256=fDKQSQMFuL-o1L82NJ6oj4_EVZlERXIH1xUG6Z2NPq8,177 +keras/api/export/__pycache__/__init__.cpython-310.pyc,, +keras/api/initializers/__init__.py,sha256=AC2ADbB6GI106OtDS-rkoyQISqDSANBkEv74ipc3PVo,3026 +keras/api/initializers/__pycache__/__init__.cpython-310.pyc,, +keras/api/layers/__init__.py,sha256=9wIEB_qtIVXxBZGjKUCo9CXZHghSGx-QIT-QFTXkR_8,11781 +keras/api/layers/__pycache__/__init__.cpython-310.pyc,, +keras/api/legacy/__init__.py,sha256=jnXr7nfdi2bWIIPHkecb4V8kJgWHlbLLGONQNtVIyoE,158 +keras/api/legacy/__pycache__/__init__.cpython-310.pyc,, +keras/api/legacy/saving/__init__.py,sha256=Lo-SrioJr4I2Lwg7BtCBUrXhehEUi3PjA6XKC1RUTD0,270 +keras/api/legacy/saving/__pycache__/__init__.cpython-310.pyc,, +keras/api/losses/__init__.py,sha256=h0DLNCOHhzkXHFJIss9LjjAJ5wSaajI255xfSaPkzhY,2467 +keras/api/losses/__pycache__/__init__.cpython-310.pyc,, +keras/api/metrics/__init__.py,sha256=_4b3MjwnTgp3am78le7zd7sJVmUaPdzs8PT18GqUMXU,4525 +keras/api/metrics/__pycache__/__init__.cpython-310.pyc,, +keras/api/mixed_precision/__init__.py,sha256=5SAdEHsp61WWzpzR7LDAEVwEXDc10iSfICJ5X4fBOU4,636 +keras/api/mixed_precision/__pycache__/__init__.cpython-310.pyc,, +keras/api/models/__init__.py,sha256=CoOZmRsB75JTFWmaAG2ozUkPod3tqKINHwZfz1vGc74,416 +keras/api/models/__pycache__/__init__.cpython-310.pyc,, +keras/api/ops/__init__.py,sha256=o3d-0VDc_ROhfmylT2txfYM5kuIKSMu9ih4HF7xB5i0,10452 +keras/api/ops/__pycache__/__init__.cpython-310.pyc,, +keras/api/ops/image/__init__.py,sha256=sR-AgA7J4SoQ9A2uasL2eVATbJF7tA93L4Bjc90djC0,528 +keras/api/ops/image/__pycache__/__init__.cpython-310.pyc,, +keras/api/ops/linalg/__init__.py,sha256=S5FbsvccCzV39KKSuRiHi4NstVhHpQBiTJRF-I6H6Y8,595 +keras/api/ops/linalg/__pycache__/__init__.cpython-310.pyc,, +keras/api/ops/nn/__init__.py,sha256=7t_xXtIzXauLbbghLVwdoJcM2n-ZP2hy9I0vJ2OZXwM,1937 +keras/api/ops/nn/__pycache__/__init__.cpython-310.pyc,, +keras/api/ops/numpy/__init__.py,sha256=f5bKYDOsWCg_2kcv9h64niH_b6gGPiAtNA3jk9WJgeY,6426 +keras/api/ops/numpy/__pycache__/__init__.cpython-310.pyc,, +keras/api/optimizers/__init__.py,sha256=vFg0VYhMqrF46b8DnZJPECQGTSLo2_JHIk_N88IESpk,1008 +keras/api/optimizers/__pycache__/__init__.cpython-310.pyc,, +keras/api/optimizers/legacy/__init__.py,sha256=uIMQESCV80Q0FY-9ikQUjXYPyZqmTfAM3dfohQ5DzYs,516 +keras/api/optimizers/legacy/__pycache__/__init__.cpython-310.pyc,, +keras/api/optimizers/schedules/__init__.py,sha256=Wj5RdkBgCZlb83cmMFLvXPMy3bWfi65DT-n6mc_jEm8,918 +keras/api/optimizers/schedules/__pycache__/__init__.cpython-310.pyc,, +keras/api/preprocessing/__init__.py,sha256=mVbAwXBZ5UxJmrKdUFKQjwdN_DBPf9wNVac_XURBmSI,453 +keras/api/preprocessing/__pycache__/__init__.cpython-310.pyc,, +keras/api/preprocessing/image/__init__.py,sha256=61dPt1CFgoX52QMmaA11D1RHvA3hpSf4I4_sDsS1zuc,379 +keras/api/preprocessing/image/__pycache__/__init__.cpython-310.pyc,, +keras/api/preprocessing/sequence/__init__.py,sha256=nqJbuy_w9GxqAlk5i_gGwzQAodLu0gpPCsYCOXzQYXQ,179 +keras/api/preprocessing/sequence/__pycache__/__init__.cpython-310.pyc,, +keras/api/quantizers/__init__.py,sha256=1uMzyYRCEZmbKf35VvtF7HPmqooNhHgxNgll--Ot21E,627 +keras/api/quantizers/__pycache__/__init__.cpython-310.pyc,, +keras/api/random/__init__.py,sha256=Vp0WMSatNORPtXBd9PgL9czZCDJj-3EpS_vzDGBaq7U,628 +keras/api/random/__pycache__/__init__.cpython-310.pyc,, +keras/api/regularizers/__init__.py,sha256=Dlz92XwBnM5yXdcWsrRYCLij3iHtmeVLrkWOF6G_9Sk,819 +keras/api/regularizers/__pycache__/__init__.cpython-310.pyc,, +keras/api/saving/__init__.py,sha256=T-ae1TghLi2qL3ws-qCNVBSCriVJp6Obp3b1lXZcKZ8,980 +keras/api/saving/__pycache__/__init__.cpython-310.pyc,, +keras/api/tree/__init__.py,sha256=LeyEGEQky2QkIpz2gaBdWmX0a_XbI1hVx-1N6T4SlPA,738 +keras/api/tree/__pycache__/__init__.cpython-310.pyc,, +keras/api/utils/__init__.py,sha256=YhX48UKVikdCxjvy8ncVTbafMWMgZ7yo-KSJB1I-1po,2737 +keras/api/utils/__pycache__/__init__.cpython-310.pyc,, +keras/api/utils/bounding_boxes/__init__.py,sha256=Cg2G9tvv1gf8QfmqAnPDFGUSp_qFx2HGCAl2l9j74dg,1119 +keras/api/utils/bounding_boxes/__pycache__/__init__.cpython-310.pyc,, +keras/api/utils/legacy/__init__.py,sha256=Lo-SrioJr4I2Lwg7BtCBUrXhehEUi3PjA6XKC1RUTD0,270 +keras/api/utils/legacy/__pycache__/__init__.cpython-310.pyc,, +keras/api/visualization/__init__.py,sha256=25wRQnlYw_E6GYMOUs-62PYrFko3VeYOy-QeoOKEFoQ,569 +keras/api/visualization/__pycache__/__init__.cpython-310.pyc,, +keras/api/wrappers/__init__.py,sha256=Uytau8DAb7y3iDA2Hrf3LyOcA6HuhSk6CZZs0YkDnfo,317 +keras/api/wrappers/__pycache__/__init__.cpython-310.pyc,, +keras/src/__init__.py,sha256=Gi4S7EiCMkE03PbdGNpFdaUYySWDs_FcAJ8Taz9Y1BE,684 +keras/src/__pycache__/__init__.cpython-310.pyc,, +keras/src/__pycache__/api_export.cpython-310.pyc,, +keras/src/__pycache__/version.cpython-310.pyc,, +keras/src/activations/__init__.py,sha256=SgIXIccbRm8TFHfo8XNd_ClNOF5BxoXuq4p9sVqsBos,4321 +keras/src/activations/__pycache__/__init__.cpython-310.pyc,, +keras/src/activations/__pycache__/activations.cpython-310.pyc,, +keras/src/activations/activations.py,sha256=F6nckJVrzd4BHGLw-DP1a83m-SUhvFgnR6scUZ73DPY,17114 +keras/src/api_export.py,sha256=gXOkBOnmscV013WAc75lc4Up01-Kkg9EylIAT_QWctg,1173 +keras/src/applications/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +keras/src/applications/__pycache__/__init__.cpython-310.pyc,, +keras/src/applications/__pycache__/convnext.cpython-310.pyc,, +keras/src/applications/__pycache__/densenet.cpython-310.pyc,, +keras/src/applications/__pycache__/efficientnet.cpython-310.pyc,, +keras/src/applications/__pycache__/efficientnet_v2.cpython-310.pyc,, +keras/src/applications/__pycache__/imagenet_utils.cpython-310.pyc,, +keras/src/applications/__pycache__/inception_resnet_v2.cpython-310.pyc,, +keras/src/applications/__pycache__/inception_v3.cpython-310.pyc,, +keras/src/applications/__pycache__/mobilenet.cpython-310.pyc,, +keras/src/applications/__pycache__/mobilenet_v2.cpython-310.pyc,, +keras/src/applications/__pycache__/mobilenet_v3.cpython-310.pyc,, +keras/src/applications/__pycache__/nasnet.cpython-310.pyc,, +keras/src/applications/__pycache__/resnet.cpython-310.pyc,, +keras/src/applications/__pycache__/resnet_v2.cpython-310.pyc,, +keras/src/applications/__pycache__/vgg16.cpython-310.pyc,, +keras/src/applications/__pycache__/vgg19.cpython-310.pyc,, +keras/src/applications/__pycache__/xception.cpython-310.pyc,, +keras/src/applications/convnext.py,sha256=Eqq8_J-7rHl36ihhVOb3S-rg3tOuS7KJg7F7mF7GgqA,25015 +keras/src/applications/densenet.py,sha256=wE6Kz0KQJaRrJMVO3NSzek5QANqxCVOXrR9Lko6jrYM,17094 +keras/src/applications/efficientnet.py,sha256=4ncUeMVCI4Opqi6ioZJOg6bw62JCcXCHSR4OvSUC3dw,25342 +keras/src/applications/efficientnet_v2.py,sha256=zVhG7ovNXNpqmqOEDqZiFATdXHIlb24SI52Qtz6TAAg,40735 +keras/src/applications/imagenet_utils.py,sha256=4zh4jPOYQPyTbs3vOHrAixqVWeqhbTjM-vkaCDatwVg,16034 +keras/src/applications/inception_resnet_v2.py,sha256=zrwLxezhUigqj2x6ELkHkeKs_KmN0wscs_mlF8EwsVw,14570 +keras/src/applications/inception_v3.py,sha256=Qcr_KFFvyTFsib4NKxUu2HcC61mG2aQeBkdVXT6pz3Q,15581 +keras/src/applications/mobilenet.py,sha256=KQoFt1AL4JLkOsIBwdnSr9tcz1woZdNG9k3eVSX2Ths,17269 +keras/src/applications/mobilenet_v2.py,sha256=Ftmh5-PM9BjNUujAdjxa2Z0LQU9loUksztEOwlkAvM0,18035 +keras/src/applications/mobilenet_v3.py,sha256=iVwPqK66wfsBac-KwOW_p5LO1hS7w7mCIL1PyLj1MKg,23651 +keras/src/applications/nasnet.py,sha256=W_yZZ84O7X2nSTbPAfV4MoyiJKV6jWiu7xGrF8d9ysE,30917 +keras/src/applications/resnet.py,sha256=9QixLDppBqWlDlhzPGut_F_BjJ2rZeHbVnKDAMEVvdg,19521 +keras/src/applications/resnet_v2.py,sha256=Lkcm5C052RAGJ814Ff_LFbFJ9EMvOGBmmIRcWFSvVs0,6755 +keras/src/applications/vgg16.py,sha256=hQwypxWhnRTjACW29m0eR560MrwPtATXOa7d8q9GQtc,9173 +keras/src/applications/vgg19.py,sha256=MmcoMicENz4_5rrtIBX-7NuzqEAYBsQxePF_P5zPCuI,9494 +keras/src/applications/xception.py,sha256=tsIVYzsc2LJ_NSMXE7xclM44beibDSXGNrR6URucoL4,12786 +keras/src/backend/__init__.py,sha256=b9xUJiQjfk-0_HzuCHpUn26u-_F_TDFHf31RduG2KAc,3088 +keras/src/backend/__pycache__/__init__.cpython-310.pyc,, +keras/src/backend/__pycache__/config.cpython-310.pyc,, +keras/src/backend/common/__init__.py,sha256=q_z_xvW-5LnR7n8cVKPCPWVefEFpHTqTRKnteLYTovk,595 +keras/src/backend/common/__pycache__/__init__.cpython-310.pyc,, +keras/src/backend/common/__pycache__/backend_utils.cpython-310.pyc,, +keras/src/backend/common/__pycache__/dtypes.cpython-310.pyc,, +keras/src/backend/common/__pycache__/global_state.cpython-310.pyc,, +keras/src/backend/common/__pycache__/keras_tensor.cpython-310.pyc,, +keras/src/backend/common/__pycache__/masking.cpython-310.pyc,, +keras/src/backend/common/__pycache__/name_scope.cpython-310.pyc,, +keras/src/backend/common/__pycache__/stateless_scope.cpython-310.pyc,, +keras/src/backend/common/__pycache__/symbolic_scope.cpython-310.pyc,, +keras/src/backend/common/__pycache__/tensor_attributes.cpython-310.pyc,, +keras/src/backend/common/__pycache__/variables.cpython-310.pyc,, +keras/src/backend/common/backend_utils.py,sha256=I_UdvvRl2E4VJvyPd8G8QFnkJpc5VZraN7IqmvN44H0,17509 +keras/src/backend/common/dtypes.py,sha256=SP7UwC0_4Nz00ye4XtnTfD3mbq98OsU-vriOQDfuqUA,10227 +keras/src/backend/common/global_state.py,sha256=0xWtrdgw_VOgtzH3Xl9D0qJJYYeP1AaqE9u2GHXwcu0,3412 +keras/src/backend/common/keras_tensor.py,sha256=pc40I6xqHS_gmwkbsFh2809kkfFbvCf3RLjqBXAZD4s,10537 +keras/src/backend/common/masking.py,sha256=JiC1uvxF_4psCMlaiawfAA_7UQEhF123xxFAnRyNg98,727 +keras/src/backend/common/name_scope.py,sha256=p0kBTcaAhueiQEeOI-5--YJUrVsdInpwyEjTjS43dTQ,2545 +keras/src/backend/common/stateless_scope.py,sha256=sRZvWOwMM6BWqhaB9v4mqIRwKXdWh2LTBAMFtBUHjes,3667 +keras/src/backend/common/symbolic_scope.py,sha256=RfrfOAv2cbiZai-L6tHwir2WUpJhS6gGj0R2YjxDMVk,683 +keras/src/backend/common/tensor_attributes.py,sha256=X5sYeGDu9YmVBIn8oX31IeE-v-bxjq2ovmIjLrVOa8g,1161 +keras/src/backend/common/variables.py,sha256=FUZ6Ru38JsLsiuaoaIU6jDCN0pbmruYDkP_UUc8pI40,22743 +keras/src/backend/config.py,sha256=katPyLBXsgGmUU8KSCEonKL88j-Mrw7EKgkHBBqBUgs,9526 +keras/src/backend/jax/__init__.py,sha256=VvHxm5iubKY-DnNEl7CUmPvWjbGLXufTCxyk8UJC2lk,1380 +keras/src/backend/jax/__pycache__/__init__.cpython-310.pyc,, +keras/src/backend/jax/__pycache__/core.cpython-310.pyc,, +keras/src/backend/jax/__pycache__/distribution_lib.cpython-310.pyc,, +keras/src/backend/jax/__pycache__/export.cpython-310.pyc,, +keras/src/backend/jax/__pycache__/image.cpython-310.pyc,, +keras/src/backend/jax/__pycache__/layer.cpython-310.pyc,, +keras/src/backend/jax/__pycache__/linalg.cpython-310.pyc,, +keras/src/backend/jax/__pycache__/math.cpython-310.pyc,, +keras/src/backend/jax/__pycache__/nn.cpython-310.pyc,, +keras/src/backend/jax/__pycache__/numpy.cpython-310.pyc,, +keras/src/backend/jax/__pycache__/optimizer.cpython-310.pyc,, +keras/src/backend/jax/__pycache__/random.cpython-310.pyc,, +keras/src/backend/jax/__pycache__/rnn.cpython-310.pyc,, +keras/src/backend/jax/__pycache__/sparse.cpython-310.pyc,, +keras/src/backend/jax/__pycache__/tensorboard.cpython-310.pyc,, +keras/src/backend/jax/__pycache__/trainer.cpython-310.pyc,, +keras/src/backend/jax/core.py,sha256=bwvmkcZ-1zgFyEWHCnoodGI1_i14wPeFClEgONV5EQE,13635 +keras/src/backend/jax/distribution_lib.py,sha256=yOrp3aWzfsjwMaSlIU4Q6srNcJMUcruMpsZQl1bUMBM,9595 +keras/src/backend/jax/export.py,sha256=BZImEaY54YYRQLw0GQa2IGN_x78YO-cNWY-gqoaxBpk,7852 +keras/src/backend/jax/image.py,sha256=TUfpFg25_sPML8Qos52T3XvkvKSpksDRo2TEdfW7NpY,18873 +keras/src/backend/jax/layer.py,sha256=kfxiy810I30GcAgDaxsODxHY3CY4V8yMNKi4pGBTNlg,25 +keras/src/backend/jax/linalg.py,sha256=F2smqTVuZhDtLUpPLG1aQd89tEhDgt6hWEEUXicNok0,2188 +keras/src/backend/jax/math.py,sha256=1IEDpdoF8e5ltu3D4wbDQuihzvJHhMXz8W9Z_E-eJqU,9391 +keras/src/backend/jax/nn.py,sha256=_IQ4rL-2P75WsozQ9N5Ms8JcMy-Mc_fZ1LTD13YXAx8,37042 +keras/src/backend/jax/numpy.py,sha256=N3XJBD98FlKHua_2mSxQUAV2Nh6fC_3dcQHO0o4wQqY,33472 +keras/src/backend/jax/optimizer.py,sha256=JgYtDGitcRqDXesmGQCOye7za8NiuDnsxnDmBNM8Z0c,4142 +keras/src/backend/jax/random.py,sha256=Uk2huGIk_dlzMrx5eDVrrr2TeCEMitn2vr4yzA0NXjs,3594 +keras/src/backend/jax/rnn.py,sha256=bSnLID-CP2Pr-Xi-a0jT4NJbWwAh0JFcYP5YwiaEZws,7552 +keras/src/backend/jax/sparse.py,sha256=yuxMCxssWj6dn0IC1FMfWZoZ8OkMDIc_uULZ_HR3lPo,13804 +keras/src/backend/jax/tensorboard.py,sha256=48fhQ7hpP8vlL6WJ1-_YQ89VY1cVAmATO9YOqKjSvck,490 +keras/src/backend/jax/trainer.py,sha256=GvNOqNNSL8vWNr9ForbtRMqaa72qWdbS_7n0qrFcSMA,38527 +keras/src/backend/numpy/__init__.py,sha256=MmFlbB7yNLSJmb3KVAtczLhP48PgQ7cldXcXArY2oeQ,1240 +keras/src/backend/numpy/__pycache__/__init__.cpython-310.pyc,, +keras/src/backend/numpy/__pycache__/core.cpython-310.pyc,, +keras/src/backend/numpy/__pycache__/export.cpython-310.pyc,, +keras/src/backend/numpy/__pycache__/image.cpython-310.pyc,, +keras/src/backend/numpy/__pycache__/layer.cpython-310.pyc,, +keras/src/backend/numpy/__pycache__/linalg.cpython-310.pyc,, +keras/src/backend/numpy/__pycache__/math.cpython-310.pyc,, +keras/src/backend/numpy/__pycache__/nn.cpython-310.pyc,, +keras/src/backend/numpy/__pycache__/numpy.cpython-310.pyc,, +keras/src/backend/numpy/__pycache__/random.cpython-310.pyc,, +keras/src/backend/numpy/__pycache__/rnn.cpython-310.pyc,, +keras/src/backend/numpy/__pycache__/trainer.cpython-310.pyc,, +keras/src/backend/numpy/core.py,sha256=u9XXXpnNwMZt6MFzjbHXTKJRDll8acwVKprbER6EH_M,13136 +keras/src/backend/numpy/export.py,sha256=mXJ8egC2Rl_I-ggYOTe-NbPeeWiv55od39aWZimUheo,351 +keras/src/backend/numpy/image.py,sha256=9aSFGNqqxsv5rmK-YywzKtliWtrKoBL1t03v6m1wWYA,17250 +keras/src/backend/numpy/layer.py,sha256=dTk7W7ql7vRgll7JbOXK5PlIhQw5VHdpSjKciHd8vec,27 +keras/src/backend/numpy/linalg.py,sha256=oCeHcCnqm7jJvT2Pt75vlSApFAQi0X85jo5h8hsVP6s,2102 +keras/src/backend/numpy/math.py,sha256=gZ5ozBT5E5SwwY-le1oz5-Rh5emChDdrM9CQ_zqoIaQ,10170 +keras/src/backend/numpy/nn.py,sha256=oupZ8T9WiWUT5YBeQnmrmPX_EUzW3HQkh0zYOahrjAg,36048 +keras/src/backend/numpy/numpy.py,sha256=5FLDT07269ghQtH467DCxgW2G0gLqgWJyC41cmNKv3o,30760 +keras/src/backend/numpy/random.py,sha256=wx2nE75q7L2cBMjtQlQx8yKMj4Ie3puFMDQsbrZO8SA,3961 +keras/src/backend/numpy/rnn.py,sha256=_3QChpBwSdvjSNsSi2zD2ljXsM5vAFBnXuwxwBbA4b4,7652 +keras/src/backend/numpy/trainer.py,sha256=SBvvtYQxmCOKmuUjlKHMNv-DOFMijf9Jf5MakRgeweQ,11139 +keras/src/backend/openvino/__init__.py,sha256=xzkB1NlX2Gy__RUrD45bM1qXJr7pcF1wxFN-SjcwLHA,1255 +keras/src/backend/openvino/__pycache__/__init__.cpython-310.pyc,, +keras/src/backend/openvino/__pycache__/core.cpython-310.pyc,, +keras/src/backend/openvino/__pycache__/export.cpython-310.pyc,, +keras/src/backend/openvino/__pycache__/image.cpython-310.pyc,, +keras/src/backend/openvino/__pycache__/layer.cpython-310.pyc,, +keras/src/backend/openvino/__pycache__/linalg.cpython-310.pyc,, +keras/src/backend/openvino/__pycache__/math.cpython-310.pyc,, +keras/src/backend/openvino/__pycache__/nn.cpython-310.pyc,, +keras/src/backend/openvino/__pycache__/numpy.cpython-310.pyc,, +keras/src/backend/openvino/__pycache__/random.cpython-310.pyc,, +keras/src/backend/openvino/__pycache__/rnn.cpython-310.pyc,, +keras/src/backend/openvino/__pycache__/trainer.cpython-310.pyc,, +keras/src/backend/openvino/core.py,sha256=8-0-DylNrE207E025BO5gw8qzzQC1prN5Hn2_INKZB8,19474 +keras/src/backend/openvino/export.py,sha256=eDDZmCTXIyii3YXEPMEDXYVUI_z07BlHJaD0NovEoXE,360 +keras/src/backend/openvino/image.py,sha256=mJzfbUE-WW6xLFZUO5APR8Un4jkSxjChA-YEeV_WC3M,938 +keras/src/backend/openvino/layer.py,sha256=5RdvaH1yOyPAphjKiuQAK1H_yZFYKE1Hp7c5bZ1pkRk,30 +keras/src/backend/openvino/linalg.py,sha256=7PtMY_-R94bcBE2xgCGZoXqi_6q8AX9805L3yOfXOs4,1326 +keras/src/backend/openvino/math.py,sha256=T0zSJ3lvnS41tPhw_sPyenM1xn0jTSF4n3sp5gYV9BI,3492 +keras/src/backend/openvino/nn.py,sha256=R1g-OMaQvnwYu9b1w_iNyG10JWOBEsac9ph1I8sk1do,14838 +keras/src/backend/openvino/numpy.py,sha256=6qJLgTlRedWyJN53kc9b_zG10VHXAAXtT3-eXSmXQJ4,31837 +keras/src/backend/openvino/random.py,sha256=MviLk8kg0h-DcgyqkjSLjVuQBaT55iQWJR6lr-Acxvo,3651 +keras/src/backend/openvino/rnn.py,sha256=ErmuZLPSgG9qU-NfYPPvBZ6Ysy8k-fA4g19Vhqq7OVQ,866 +keras/src/backend/openvino/trainer.py,sha256=wx2bdW71RMkOzJa9mNk5aLjjqq70qALpnfYE8jTRWbE,9069 +keras/src/backend/tensorflow/__init__.py,sha256=DDqA8UAqSH9tEtx0mz5ib2PqrqVm3neQ1IPiSmjTDKg,1583 +keras/src/backend/tensorflow/__pycache__/__init__.cpython-310.pyc,, +keras/src/backend/tensorflow/__pycache__/core.cpython-310.pyc,, +keras/src/backend/tensorflow/__pycache__/distribution_lib.cpython-310.pyc,, +keras/src/backend/tensorflow/__pycache__/export.cpython-310.pyc,, +keras/src/backend/tensorflow/__pycache__/image.cpython-310.pyc,, +keras/src/backend/tensorflow/__pycache__/layer.cpython-310.pyc,, +keras/src/backend/tensorflow/__pycache__/linalg.cpython-310.pyc,, +keras/src/backend/tensorflow/__pycache__/math.cpython-310.pyc,, +keras/src/backend/tensorflow/__pycache__/nn.cpython-310.pyc,, +keras/src/backend/tensorflow/__pycache__/numpy.cpython-310.pyc,, +keras/src/backend/tensorflow/__pycache__/optimizer.cpython-310.pyc,, +keras/src/backend/tensorflow/__pycache__/random.cpython-310.pyc,, +keras/src/backend/tensorflow/__pycache__/rnn.cpython-310.pyc,, +keras/src/backend/tensorflow/__pycache__/sparse.cpython-310.pyc,, +keras/src/backend/tensorflow/__pycache__/tensorboard.cpython-310.pyc,, +keras/src/backend/tensorflow/__pycache__/trackable.cpython-310.pyc,, +keras/src/backend/tensorflow/__pycache__/trainer.cpython-310.pyc,, +keras/src/backend/tensorflow/core.py,sha256=_tPIg4o-JC3hQ_vMDzVv5_HUwvg7ZPfG7HynhXXWtfY,21828 +keras/src/backend/tensorflow/distribution_lib.py,sha256=blbl6frgrsdhxZTIXO88rq9drNtaqo_gE5rk7k8Qdzc,2747 +keras/src/backend/tensorflow/export.py,sha256=pyynwLWfGdDBaHFq8Nq4wZ1ihn1Mmp8zAikXhS2ADaI,1326 +keras/src/backend/tensorflow/image.py,sha256=dCCTezxx0_0bJwTM1Kw1lOBUKwqaRbEfTkU4azSGWyM,16997 +keras/src/backend/tensorflow/layer.py,sha256=iE6XYSZENEoTpNhoXrEOm7gnIOHwOjETZd_p9J_16f0,4334 +keras/src/backend/tensorflow/linalg.py,sha256=SOskCo6JJSjEP9xjbCSCh-CCu6_DRbl9QCobXXZ0P-Y,7624 +keras/src/backend/tensorflow/math.py,sha256=eZSrriwIW9AC3R5ZNCMuLOV1uiyxGFwFdjA5T7f6iI0,12368 +keras/src/backend/tensorflow/nn.py,sha256=lVBidWmieuqtFwnQ-vPXGwvorcODDTnxprnSeJR6IvM,34138 +keras/src/backend/tensorflow/numpy.py,sha256=hBxEt21bfOBbIhJEgSA9Pn0JYsXBuVBQ1fWzy6QrPU4,84300 +keras/src/backend/tensorflow/optimizer.py,sha256=kFlyEOnGjEYdLpd8mpwhUeku78__xBfZbbrDWpJrq60,9307 +keras/src/backend/tensorflow/random.py,sha256=iO8V_soaDXZm9ewyAVbjudhsMj08C348c9Bz64nxXC4,6475 +keras/src/backend/tensorflow/rnn.py,sha256=SwKOW9j4CYcSYmrlm1vYK34xU0TcVgBcz52fRUT50aM,34600 +keras/src/backend/tensorflow/sparse.py,sha256=8oriKe2vp1GqbemKo0F4bkVIbb0tIcDTsgq3er1K4mo,32268 +keras/src/backend/tensorflow/tensorboard.py,sha256=e7pXicuMfQjuCmq1wOmixWhWt2EbjLMBo_JPAqCbZRk,504 +keras/src/backend/tensorflow/trackable.py,sha256=QZn0JvpBJ7Kx4e6zM2IVIWz9ADcWDB-dHN6vjoQBa9Q,1993 +keras/src/backend/tensorflow/trainer.py,sha256=Y8VUyzvUoARe_T223Ux839PDkdCVtYBDjnx4jSKdEiM,36087 +keras/src/backend/torch/__init__.py,sha256=NFqFuuDvd9Vq2nQR3oVe48ULQjJbGjFdp4b9jNX5W_g,2066 +keras/src/backend/torch/__pycache__/__init__.cpython-310.pyc,, +keras/src/backend/torch/__pycache__/core.cpython-310.pyc,, +keras/src/backend/torch/__pycache__/export.cpython-310.pyc,, +keras/src/backend/torch/__pycache__/image.cpython-310.pyc,, +keras/src/backend/torch/__pycache__/layer.cpython-310.pyc,, +keras/src/backend/torch/__pycache__/linalg.cpython-310.pyc,, +keras/src/backend/torch/__pycache__/math.cpython-310.pyc,, +keras/src/backend/torch/__pycache__/nn.cpython-310.pyc,, +keras/src/backend/torch/__pycache__/numpy.cpython-310.pyc,, +keras/src/backend/torch/__pycache__/random.cpython-310.pyc,, +keras/src/backend/torch/__pycache__/rnn.cpython-310.pyc,, +keras/src/backend/torch/__pycache__/trainer.cpython-310.pyc,, +keras/src/backend/torch/core.py,sha256=dhTAEXD9IUnxef-aaZHp0wmGPTY9baB_WGqFJiLJ_fI,23764 +keras/src/backend/torch/export.py,sha256=XPlZxynbCPY5iSA8StzT560Mra_CmLy7gHoART7V5VU,4855 +keras/src/backend/torch/image.py,sha256=Rn24Z7mRbHWT-57dwsfCuxpwVrmzTrdCEmOEZxb8jWo,17862 +keras/src/backend/torch/layer.py,sha256=vwPiyCMmF0Z_IlauaNkJEuyCY4jG7eirSWi-r2UEUPQ,2205 +keras/src/backend/torch/linalg.py,sha256=5jmtd1oOfTlnf1_qVHVUG_I0QatYoesANETQ6FNZF7s,1875 +keras/src/backend/torch/math.py,sha256=gXYOCjLPF6W1H3fzLEi3RPLfiJgvUApkDCh7Q43UACg,14316 +keras/src/backend/torch/nn.py,sha256=wHpPvcA6uj95g79XBSFCmq2JwFD3UjjRseBo0ZsoLhM,32327 +keras/src/backend/torch/numpy.py,sha256=w7J5ffuvqirekg-gMooM5UHUjOI8Q2uA3SW71s50bXQ,50797 +keras/src/backend/torch/optimizers/__init__.py,sha256=yvqiyKgMEh-nGpacssdpsMySujyYB6lPy-Wil3onXvo,78 +keras/src/backend/torch/optimizers/__pycache__/__init__.cpython-310.pyc,, +keras/src/backend/torch/optimizers/__pycache__/torch_adadelta.cpython-310.pyc,, +keras/src/backend/torch/optimizers/__pycache__/torch_adagrad.cpython-310.pyc,, +keras/src/backend/torch/optimizers/__pycache__/torch_adam.cpython-310.pyc,, +keras/src/backend/torch/optimizers/__pycache__/torch_adamax.cpython-310.pyc,, +keras/src/backend/torch/optimizers/__pycache__/torch_adamw.cpython-310.pyc,, +keras/src/backend/torch/optimizers/__pycache__/torch_lion.cpython-310.pyc,, +keras/src/backend/torch/optimizers/__pycache__/torch_nadam.cpython-310.pyc,, +keras/src/backend/torch/optimizers/__pycache__/torch_optimizer.cpython-310.pyc,, +keras/src/backend/torch/optimizers/__pycache__/torch_parallel_optimizer.cpython-310.pyc,, +keras/src/backend/torch/optimizers/__pycache__/torch_rmsprop.cpython-310.pyc,, +keras/src/backend/torch/optimizers/__pycache__/torch_sgd.cpython-310.pyc,, +keras/src/backend/torch/optimizers/torch_adadelta.py,sha256=iPjGHvD7q_VD0WaMNxuNcvz8uIWd0smRyEMzMqryUD4,1672 +keras/src/backend/torch/optimizers/torch_adagrad.py,sha256=Mg0jEGVur0fXFGm9LjPxi55qMQFoaVPfOFtnkliZeXA,1041 +keras/src/backend/torch/optimizers/torch_adam.py,sha256=qwbiK7OZS2OhxRXd-EaS5xJDxShQnVFNAL8OqHLF60E,1889 +keras/src/backend/torch/optimizers/torch_adamax.py,sha256=8nkMw4dYj7agkigmFBpePb6nSNhJKrRVVtIjqLA0J1M,1483 +keras/src/backend/torch/optimizers/torch_adamw.py,sha256=JcAtOdadgNPLH5cAlHkw_OSJ_wkGCyK5pQE3MQNk_Ps,150 +keras/src/backend/torch/optimizers/torch_lion.py,sha256=JMik6y-n4FWgv6Ug5y8rGyl_eCHMQ7OXAFBNE9p5GC8,1041 +keras/src/backend/torch/optimizers/torch_nadam.py,sha256=L7jC1fxvZOcAN7VxA1bi0WYpe_JVyfP5l1bfNKmj62k,2421 +keras/src/backend/torch/optimizers/torch_optimizer.py,sha256=yiCcsZcbRY3HEtiXADDUJxqS74iRmrMwnEFtX5GFh9Q,1803 +keras/src/backend/torch/optimizers/torch_parallel_optimizer.py,sha256=MXlJzuE7GKF_a6A0qspRorM2bQCSBAE2BOfKw9a5mnw,783 +keras/src/backend/torch/optimizers/torch_rmsprop.py,sha256=BkxPLHL_8Qq-rt-CYLp4MO0L8hMjAKfrcKSgfgPA-_E,2053 +keras/src/backend/torch/optimizers/torch_sgd.py,sha256=7BUKY8HtoWG_gdaTk_8SDUM9hR4Tbcld68qSLcFItiQ,1175 +keras/src/backend/torch/random.py,sha256=YhLfC7qkGpzlU_i6gGPVormo3BMSo7OUA3TC3GCehrA,8292 +keras/src/backend/torch/rnn.py,sha256=faunVsKvNOUehdYywLoMMAHXDVDYYLinXRnjA7u5Id0,13704 +keras/src/backend/torch/trainer.py,sha256=-QMfgTrxGUX_AH2misXCc9nLgW1z-Nwksz005zS7rEk,17556 +keras/src/callbacks/__init__.py,sha256=1W0PW4onBURqIZOth1ZU0KWXv-ZJQVcSdjh6fNdpz2A,922 +keras/src/callbacks/__pycache__/__init__.cpython-310.pyc,, +keras/src/callbacks/__pycache__/backup_and_restore.cpython-310.pyc,, +keras/src/callbacks/__pycache__/callback.cpython-310.pyc,, +keras/src/callbacks/__pycache__/callback_list.cpython-310.pyc,, +keras/src/callbacks/__pycache__/csv_logger.cpython-310.pyc,, +keras/src/callbacks/__pycache__/early_stopping.cpython-310.pyc,, +keras/src/callbacks/__pycache__/history.cpython-310.pyc,, +keras/src/callbacks/__pycache__/lambda_callback.cpython-310.pyc,, +keras/src/callbacks/__pycache__/learning_rate_scheduler.cpython-310.pyc,, +keras/src/callbacks/__pycache__/model_checkpoint.cpython-310.pyc,, +keras/src/callbacks/__pycache__/progbar_logger.cpython-310.pyc,, +keras/src/callbacks/__pycache__/reduce_lr_on_plateau.cpython-310.pyc,, +keras/src/callbacks/__pycache__/remote_monitor.cpython-310.pyc,, +keras/src/callbacks/__pycache__/swap_ema_weights.cpython-310.pyc,, +keras/src/callbacks/__pycache__/tensorboard.cpython-310.pyc,, +keras/src/callbacks/__pycache__/terminate_on_nan.cpython-310.pyc,, +keras/src/callbacks/backup_and_restore.py,sha256=QsF_8rJIh9s-6g91fsgVYDJBXmCtse5MPqasUf4SXCM,9361 +keras/src/callbacks/callback.py,sha256=SzoISmhmI4OWwslTmh-ROGpNU93fCEcf4JaENGudtt8,10153 +keras/src/callbacks/callback_list.py,sha256=352U3o-oFLiM5TfmOLdgmpWPMuvvx9fkR6foOH2nsZI,8736 +keras/src/callbacks/csv_logger.py,sha256=SX0vUniaMSrlBOVCLCZmiDYD-LM0kGH0fynVBQCom-A,3206 +keras/src/callbacks/early_stopping.py,sha256=tIkxCQGsfs7VEv6MTTiHe1L8dj21_At2WDgF-cp0y0c,8910 +keras/src/callbacks/history.py,sha256=Ed2lKv0Z-JgTZpS4PKKA7vkBP1EFzbLJXmsH_tXZ3_s,1301 +keras/src/callbacks/lambda_callback.py,sha256=UWzsVV5zqPq034SALBg-jpWNIvnmzrXqPmX_9FWbRbs,3441 +keras/src/callbacks/learning_rate_scheduler.py,sha256=II0SLxltUX3omRbGTYffd9KTWLRKtzW57SDRe70_t7E,2965 +keras/src/callbacks/model_checkpoint.py,sha256=QdaTYF2HYiQRPU23IsJ8ch7OmEiOOKXpmTJ9sSb1ARk,18615 +keras/src/callbacks/progbar_logger.py,sha256=BqddKoOyc8vxxtKriq5QD3n5JhVPUxkuWF2u1UlCriQ,3104 +keras/src/callbacks/reduce_lr_on_plateau.py,sha256=IIn633i7saAFKla7Qf1OEdBggNKnYinQ1hW_lp65ITo,5340 +keras/src/callbacks/remote_monitor.py,sha256=VDbNzCdddCDe_ZoeVvwV50oJkwOehhT_IDDYD8LzFOg,2727 +keras/src/callbacks/swap_ema_weights.py,sha256=JFp0E2BDTBWxVMdsGgVFuArfX3OaNKdtD9pG9wnFV6o,6843 +keras/src/callbacks/tensorboard.py,sha256=SnlWocoHpgTOmW7yrguBAkPHsHf3-UU9jMjhgvRyAsE,26973 +keras/src/callbacks/terminate_on_nan.py,sha256=WWrXVVa927N7-vwzegcORMFAP3rk4eVqPzL8XvfSaHw,669 +keras/src/constraints/__init__.py,sha256=3bDz814Sz2haFYT3puoLzv1Nqm9Uf2AwQqqamgqULPk,1715 +keras/src/constraints/__pycache__/__init__.cpython-310.pyc,, +keras/src/constraints/__pycache__/constraints.cpython-310.pyc,, +keras/src/constraints/constraints.py,sha256=bn9uGKb-GuOoEd3SGJfFqc7SDS0ziGUeggozc5Yna_0,7333 +keras/src/datasets/__init__.py,sha256=ivEFJkqLxwU5BEYqWsWTd66kJ96YMKFKiYQGHm2CX68,383 +keras/src/datasets/__pycache__/__init__.cpython-310.pyc,, +keras/src/datasets/__pycache__/boston_housing.cpython-310.pyc,, +keras/src/datasets/__pycache__/california_housing.cpython-310.pyc,, +keras/src/datasets/__pycache__/cifar.cpython-310.pyc,, +keras/src/datasets/__pycache__/cifar10.cpython-310.pyc,, +keras/src/datasets/__pycache__/cifar100.cpython-310.pyc,, +keras/src/datasets/__pycache__/fashion_mnist.cpython-310.pyc,, +keras/src/datasets/__pycache__/imdb.cpython-310.pyc,, +keras/src/datasets/__pycache__/mnist.cpython-310.pyc,, +keras/src/datasets/__pycache__/reuters.cpython-310.pyc,, +keras/src/datasets/boston_housing.py,sha256=tWTEhV2LHaBaNviUU72ZIa7nr_nAEuSu_bXFh4kvkG0,2644 +keras/src/datasets/california_housing.py,sha256=d7cceyP0hnKDaHYUF_VP5GWLJznxAPEqMuMkhnugVns,3850 +keras/src/datasets/cifar.py,sha256=nnv0GQKypj68qnK8gMEjTY4h6orkO1g70huKQqdJmAQ,704 +keras/src/datasets/cifar10.py,sha256=wnX2QW5UnMYaH931H-YZ6fdijiQQjtjJtj_z5K6MVkA,3189 +keras/src/datasets/cifar100.py,sha256=XbPTtVIiYVsRXWI8sQxksf7nPEB9tMv7qyGMuHTiTLs,2973 +keras/src/datasets/fashion_mnist.py,sha256=iAQoY3e7ln15BZ7nNIEWU4rT7ORsMiltDZdFgvC-dcI,2929 +keras/src/datasets/imdb.py,sha256=0y7AHRu7p-9FyHqo9cjmm1zkRZJrgS716xm5h_zDXDg,7201 +keras/src/datasets/mnist.py,sha256=VjVTM4Q8iucAS2hTXsUtjT6hktGDUHBfaGu4kNUwUYc,2393 +keras/src/datasets/reuters.py,sha256=q7lveC4NfeBcTJrM0qBYXJTlafpVoonEGyMkLY8GubU,7214 +keras/src/distribution/__init__.py,sha256=pseLHx387oTmXROr95tU7kNWjPL8-JB4kZs8nUHsOiU,718 +keras/src/distribution/__pycache__/__init__.cpython-310.pyc,, +keras/src/distribution/__pycache__/distribution_lib.cpython-310.pyc,, +keras/src/distribution/distribution_lib.py,sha256=_b5ZJejY2LsRuGoEpaRagYhtEgd2bkbhOGTnbZSE69g,31552 +keras/src/dtype_policies/__init__.py,sha256=qYQQC3MvU0BujZcP0IN7_0awcu926rtSRukjcV2TU5w,3545 +keras/src/dtype_policies/__pycache__/__init__.cpython-310.pyc,, +keras/src/dtype_policies/__pycache__/dtype_policy.cpython-310.pyc,, +keras/src/dtype_policies/__pycache__/dtype_policy_map.cpython-310.pyc,, +keras/src/dtype_policies/dtype_policy.py,sha256=RNjKHjdTZeHJpf51crSr2TwLz_fi59YN8p_7k3UabVw,12745 +keras/src/dtype_policies/dtype_policy_map.py,sha256=23Rm2NZlZ4DK8TESGKzQAbr1gwc4jJsyCVc1KBXUt-A,7902 +keras/src/export/__init__.py,sha256=Mhd9QeM1sMbm316M8Gr9bEBInVzhWZZIEy3lFlD66eQ,211 +keras/src/export/__pycache__/__init__.cpython-310.pyc,, +keras/src/export/__pycache__/export_utils.cpython-310.pyc,, +keras/src/export/__pycache__/onnx.cpython-310.pyc,, +keras/src/export/__pycache__/saved_model.cpython-310.pyc,, +keras/src/export/__pycache__/tf2onnx_lib.cpython-310.pyc,, +keras/src/export/__pycache__/tfsm_layer.cpython-310.pyc,, +keras/src/export/export_utils.py,sha256=wxiObhc7R07UqsKHWwK_KlDlN3iBFOaY1zUI3WKz2uw,4041 +keras/src/export/onnx.py,sha256=d8NuR1xipci7wVCubPQac18BRO59Gj4rky5PKsXAGhI,5843 +keras/src/export/saved_model.py,sha256=YpAfbC6RNs8DdxMz9mudAEY3KI40vLDDRP0d5yw5k-o,27334 +keras/src/export/tf2onnx_lib.py,sha256=u3AP1458GHvFHIFNnqyu_yEgTKlgUYhmbYBc9YKQKKE,7210 +keras/src/export/tfsm_layer.py,sha256=5psADcAXkJN_AR1A8nmIxh8kjaTAuAo8u-bGnEkK98c,5701 +keras/src/initializers/__init__.py,sha256=tG7qxC2J0PDhO_L2W95sJXNIduL7F5lqHvUuJ7EIhXE,5662 +keras/src/initializers/__pycache__/__init__.cpython-310.pyc,, +keras/src/initializers/__pycache__/constant_initializers.cpython-310.pyc,, +keras/src/initializers/__pycache__/initializer.cpython-310.pyc,, +keras/src/initializers/__pycache__/random_initializers.cpython-310.pyc,, +keras/src/initializers/constant_initializers.py,sha256=celz5tGkp2opqyuORykexWkMIQJe0AenJ9dVcGbf-ZY,9960 +keras/src/initializers/initializer.py,sha256=kNAyRA8CzBdtknT6ZUt5XIO2_Z9NzpN119CId7wT1Vg,2632 +keras/src/initializers/random_initializers.py,sha256=AuUeQ3YZGakDKTCs8njQLhozE6iWYHwP6-VstnEMOaQ,23631 +keras/src/layers/__init__.py,sha256=8yLpcxh_y_AzXGAmsKinZdL5uGoEXOcq_SPFd3-AOPk,10716 +keras/src/layers/__pycache__/__init__.cpython-310.pyc,, +keras/src/layers/__pycache__/input_spec.cpython-310.pyc,, +keras/src/layers/__pycache__/layer.cpython-310.pyc,, +keras/src/layers/activations/__init__.py,sha256=MhPBye8WWLSf_iDel3BuuqYk4nx6Sym8s4dZKb1KTqQ,272 +keras/src/layers/activations/__pycache__/__init__.cpython-310.pyc,, +keras/src/layers/activations/__pycache__/activation.cpython-310.pyc,, +keras/src/layers/activations/__pycache__/elu.cpython-310.pyc,, +keras/src/layers/activations/__pycache__/leaky_relu.cpython-310.pyc,, +keras/src/layers/activations/__pycache__/prelu.cpython-310.pyc,, +keras/src/layers/activations/__pycache__/relu.cpython-310.pyc,, +keras/src/layers/activations/__pycache__/softmax.cpython-310.pyc,, +keras/src/layers/activations/activation.py,sha256=JOnb1NjMUcaccpxVBslnQkvCBbAKgbwOrJIpkELdNMo,1282 +keras/src/layers/activations/elu.py,sha256=rhRvrQzgWO2d4D4UlwD05g-PwkorYKHefh__cbY4uto,835 +keras/src/layers/activations/leaky_relu.py,sha256=M2l1H1-iYU_Rkhkc9WTnzuwgQxmOXTDD59bvXYZ651k,1926 +keras/src/layers/activations/prelu.py,sha256=39aa5muLXt_eO_aLyE_z0_3aNed94wCKHmtDc7IGzdo,3454 +keras/src/layers/activations/relu.py,sha256=7pWkLOzTbMdbBAboCJPHXMm1GlFJ4sW0AH7RMdmxwWs,2684 +keras/src/layers/activations/softmax.py,sha256=XMbpJdvtRGnOf7QKHGrIH2cnrQMs6kGB2un98Ddm37I,2264 +keras/src/layers/attention/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +keras/src/layers/attention/__pycache__/__init__.cpython-310.pyc,, +keras/src/layers/attention/__pycache__/additive_attention.cpython-310.pyc,, +keras/src/layers/attention/__pycache__/attention.cpython-310.pyc,, +keras/src/layers/attention/__pycache__/grouped_query_attention.cpython-310.pyc,, +keras/src/layers/attention/__pycache__/multi_head_attention.cpython-310.pyc,, +keras/src/layers/attention/additive_attention.py,sha256=Es5Ca_IV06G67xy3v5T1Kt2sjM70L_P5Fmiy9FfTE28,4335 +keras/src/layers/attention/attention.py,sha256=-P7wDcua_AdHLjWyc8rW9dstYTAjjW9K1XBm9km7y9s,13494 +keras/src/layers/attention/grouped_query_attention.py,sha256=6c-z2qWlkhX52Kjz6KShHfL6No_dMGaqgD76wTKUfRg,21042 +keras/src/layers/attention/multi_head_attention.py,sha256=ph4R_K9Fxcir1iZJFx3AgKuokR_1eQkBF45I_BFIWh0,32006 +keras/src/layers/convolutional/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +keras/src/layers/convolutional/__pycache__/__init__.cpython-310.pyc,, +keras/src/layers/convolutional/__pycache__/base_conv.cpython-310.pyc,, +keras/src/layers/convolutional/__pycache__/base_conv_transpose.cpython-310.pyc,, +keras/src/layers/convolutional/__pycache__/base_depthwise_conv.cpython-310.pyc,, +keras/src/layers/convolutional/__pycache__/base_separable_conv.cpython-310.pyc,, +keras/src/layers/convolutional/__pycache__/conv1d.cpython-310.pyc,, +keras/src/layers/convolutional/__pycache__/conv1d_transpose.cpython-310.pyc,, +keras/src/layers/convolutional/__pycache__/conv2d.cpython-310.pyc,, +keras/src/layers/convolutional/__pycache__/conv2d_transpose.cpython-310.pyc,, +keras/src/layers/convolutional/__pycache__/conv3d.cpython-310.pyc,, +keras/src/layers/convolutional/__pycache__/conv3d_transpose.cpython-310.pyc,, +keras/src/layers/convolutional/__pycache__/depthwise_conv1d.cpython-310.pyc,, +keras/src/layers/convolutional/__pycache__/depthwise_conv2d.cpython-310.pyc,, +keras/src/layers/convolutional/__pycache__/separable_conv1d.cpython-310.pyc,, +keras/src/layers/convolutional/__pycache__/separable_conv2d.cpython-310.pyc,, +keras/src/layers/convolutional/base_conv.py,sha256=OJXpjrTfqkYMANwsLYMQ7WKeaePWEaCuI6Bkwbrb9U0,17280 +keras/src/layers/convolutional/base_conv_transpose.py,sha256=Z2CF1hiOu6k1bbThliIeag9vQQeLo89PIrDRaAOVxe8,10712 +keras/src/layers/convolutional/base_depthwise_conv.py,sha256=0LLxZif9xy7jf5UgZpN9y2n1riesdjrQbhw5-X4LgDo,11634 +keras/src/layers/convolutional/base_separable_conv.py,sha256=qoD03-NxthlkpHhd6DVMx57wvPt7tcx8lJFnR9Bg-tA,12660 +keras/src/layers/convolutional/conv1d.py,sha256=2RV1hjQi7A4oj-issZ6_kRoWEA-J9WXqON7N_mbhifA,7321 +keras/src/layers/convolutional/conv1d_transpose.py,sha256=Mg4g5cd-RNf4QTCHMaUUQoZrJDLrzEukcO9UwK2CBN0,5575 +keras/src/layers/convolutional/conv2d.py,sha256=c1VaoYr8YSZj5YCTa-1zKOXDQANryBrJptqWi5sbZOE,5689 +keras/src/layers/convolutional/conv2d_transpose.py,sha256=mlGI66wHkEUIdhExtGgShFN2xPrGm79xkrYoLwQ58k8,5695 +keras/src/layers/convolutional/conv3d.py,sha256=ZVHcutPZBEeGB9fV88B7yEZD21VqlnBIovFP3lvviX8,5918 +keras/src/layers/convolutional/conv3d_transpose.py,sha256=TB3oGatWi9PQHPsLNuD_NChS3UvuWhiYDimbwSntcD4,5901 +keras/src/layers/convolutional/depthwise_conv1d.py,sha256=ekylnBEKTDUgPB3OkoqZx3M7xgrHabzCA-ww_wEqVFY,6003 +keras/src/layers/convolutional/depthwise_conv2d.py,sha256=rnCd_S3UVeNdVotjKW1WloTEZIGY2diNhKuQmmpnjxM,6100 +keras/src/layers/convolutional/separable_conv1d.py,sha256=vL5qzdaSOOTgyn1A6y9IZZbQOEeB6FedPk9JJI5wqSY,6452 +keras/src/layers/convolutional/separable_conv2d.py,sha256=ZkLOnA6l5UV3GuJufwlOHMOm1S-xkt6sdF-qmP4PDjw,6533 +keras/src/layers/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +keras/src/layers/core/__pycache__/__init__.cpython-310.pyc,, +keras/src/layers/core/__pycache__/dense.cpython-310.pyc,, +keras/src/layers/core/__pycache__/einsum_dense.cpython-310.pyc,, +keras/src/layers/core/__pycache__/embedding.cpython-310.pyc,, +keras/src/layers/core/__pycache__/identity.cpython-310.pyc,, +keras/src/layers/core/__pycache__/input_layer.cpython-310.pyc,, +keras/src/layers/core/__pycache__/lambda_layer.cpython-310.pyc,, +keras/src/layers/core/__pycache__/masking.cpython-310.pyc,, +keras/src/layers/core/__pycache__/wrapper.cpython-310.pyc,, +keras/src/layers/core/dense.py,sha256=A3VfqlPzQjChSVXzTo1o2OIUM-H1dZWtphCqA_-64Rk,23572 +keras/src/layers/core/einsum_dense.py,sha256=etMN2e-EPWxOPCKCAXBUWtCL8b60hxIjfnf2R6XkvF4,41261 +keras/src/layers/core/embedding.py,sha256=qAE_J9owpNPXMwHbMmj2Gvad5B19guGB4niXwU9AdEA,16183 +keras/src/layers/core/identity.py,sha256=jI9teEM3ZMT8blcC8d_3yCBaj8CbTuMELez5H39gkbM,843 +keras/src/layers/core/input_layer.py,sha256=sW3GdZ1pWdMZtTG5H9458qnf8AMR4ko6tF-qsRMUqW8,7369 +keras/src/layers/core/lambda_layer.py,sha256=wCb8VFqwlO0iWwTAEs2wQIQIJW27l1xfybFfhUbNSzw,9194 +keras/src/layers/core/masking.py,sha256=-EBbTAjeCBw-BPWwg1-imyKzAeRxfO-YYwM2AzAMzGE,2574 +keras/src/layers/core/wrapper.py,sha256=nhgyWdLqHfxWhYDQZ1mU7Fw9lmXZRKHIknBDaywLbeU,1535 +keras/src/layers/input_spec.py,sha256=M52SiBu_4uogdrMYW8BoyeWSElb4ahwa5X04yDkpbs0,9849 +keras/src/layers/layer.py,sha256=iYmZiEAbtrewh0z7kFFHp7mRc2X8bdsT0NOkIi6wmRc,68738 +keras/src/layers/merging/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +keras/src/layers/merging/__pycache__/__init__.cpython-310.pyc,, +keras/src/layers/merging/__pycache__/add.cpython-310.pyc,, +keras/src/layers/merging/__pycache__/average.cpython-310.pyc,, +keras/src/layers/merging/__pycache__/base_merge.cpython-310.pyc,, +keras/src/layers/merging/__pycache__/concatenate.cpython-310.pyc,, +keras/src/layers/merging/__pycache__/dot.cpython-310.pyc,, +keras/src/layers/merging/__pycache__/maximum.cpython-310.pyc,, +keras/src/layers/merging/__pycache__/minimum.cpython-310.pyc,, +keras/src/layers/merging/__pycache__/multiply.cpython-310.pyc,, +keras/src/layers/merging/__pycache__/subtract.cpython-310.pyc,, +keras/src/layers/merging/add.py,sha256=icbh3RwZ3QUP3bFNCi7GbrHj2hFdKu1Dsv8djSa13co,2150 +keras/src/layers/merging/average.py,sha256=RPW8Lpj0U3ebMdvhyI451Iw_Qn7p6tKAEgdgDds19Co,2214 +keras/src/layers/merging/base_merge.py,sha256=NpkijhQvcCpU_Wq3OOK6bhZZv2HqHAx6WX-Alm3WHgc,10800 +keras/src/layers/merging/concatenate.py,sha256=WZGtrV863hawY3JkTbElPRPkAZA8G3oK6XVcrUEEq5A,6798 +keras/src/layers/merging/dot.py,sha256=XR3KiuhdEF6tatDndYWvfngwJj2MWXHb4NprLZWQWJ0,12807 +keras/src/layers/merging/maximum.py,sha256=5lF8X0raVikM8YimdXJlZlbwT6-BGFD3O61sDsPidcw,2142 +keras/src/layers/merging/minimum.py,sha256=f8RN1O5yYzDqJbXuVTBKC0TKdEw_VU4bC4pZX2zE35A,2140 +keras/src/layers/merging/multiply.py,sha256=WvBX5gOpouqfQYnpioKMw2Tj6HRQQ2LNBuvKsRo_6P0,3185 +keras/src/layers/merging/subtract.py,sha256=ijpJDomo1JSMCw97Rn55LXiVLsI50lcvUxmZiv_HIzo,2684 +keras/src/layers/normalization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +keras/src/layers/normalization/__pycache__/__init__.cpython-310.pyc,, +keras/src/layers/normalization/__pycache__/batch_normalization.cpython-310.pyc,, +keras/src/layers/normalization/__pycache__/group_normalization.cpython-310.pyc,, +keras/src/layers/normalization/__pycache__/layer_normalization.cpython-310.pyc,, +keras/src/layers/normalization/__pycache__/spectral_normalization.cpython-310.pyc,, +keras/src/layers/normalization/__pycache__/unit_normalization.cpython-310.pyc,, +keras/src/layers/normalization/batch_normalization.py,sha256=PzT-Ucj5p-qLGrgN0RxbqL_ICVViaJR15PsFVNVEi60,14161 +keras/src/layers/normalization/group_normalization.py,sha256=S8w40kMCi_aEN079vwDPxaV7K02Ny0HocZJ1ATX4SpA,9367 +keras/src/layers/normalization/layer_normalization.py,sha256=68Al0piqjcxBWWb-9q_AiorPuguXRMapE06CM5mwp8w,10265 +keras/src/layers/normalization/spectral_normalization.py,sha256=HTzypVIzBID26wsB80OSkyvBR0IO48XGRrMSF-u6rdE,4304 +keras/src/layers/normalization/unit_normalization.py,sha256=7YJphfXGpXGrZcaUj6RKYDpgd0EqPUs2tgK3DbybCAI,2059 +keras/src/layers/pooling/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +keras/src/layers/pooling/__pycache__/__init__.cpython-310.pyc,, +keras/src/layers/pooling/__pycache__/average_pooling1d.cpython-310.pyc,, +keras/src/layers/pooling/__pycache__/average_pooling2d.cpython-310.pyc,, +keras/src/layers/pooling/__pycache__/average_pooling3d.cpython-310.pyc,, +keras/src/layers/pooling/__pycache__/base_global_pooling.cpython-310.pyc,, +keras/src/layers/pooling/__pycache__/base_pooling.cpython-310.pyc,, +keras/src/layers/pooling/__pycache__/global_average_pooling1d.cpython-310.pyc,, +keras/src/layers/pooling/__pycache__/global_average_pooling2d.cpython-310.pyc,, +keras/src/layers/pooling/__pycache__/global_average_pooling3d.cpython-310.pyc,, +keras/src/layers/pooling/__pycache__/global_max_pooling1d.cpython-310.pyc,, +keras/src/layers/pooling/__pycache__/global_max_pooling2d.cpython-310.pyc,, +keras/src/layers/pooling/__pycache__/global_max_pooling3d.cpython-310.pyc,, +keras/src/layers/pooling/__pycache__/max_pooling1d.cpython-310.pyc,, +keras/src/layers/pooling/__pycache__/max_pooling2d.cpython-310.pyc,, +keras/src/layers/pooling/__pycache__/max_pooling3d.cpython-310.pyc,, +keras/src/layers/pooling/average_pooling1d.py,sha256=bFtZmRxvmQCUMA5vLE2SOHc8wQO0acKa7WJu8lTNgfU,3347 +keras/src/layers/pooling/average_pooling2d.py,sha256=SAihWGF5q0YNsxHVX8lYF3RrGt3RWuZesgMIVUqfRDI,4153 +keras/src/layers/pooling/average_pooling3d.py,sha256=ukbXgxotazAuDec_RsuKWj8khmmmCEnZdPQKg8J6SNA,3238 +keras/src/layers/pooling/base_global_pooling.py,sha256=_d1a2c2twJxzLJ0ULAXf5444Prr8SDsFomoTRxs4vwI,1486 +keras/src/layers/pooling/base_pooling.py,sha256=KNyul-L6f3UnIueC_04OQAf-c1JvpL_S1BpwxEZNV4E,2451 +keras/src/layers/pooling/global_average_pooling1d.py,sha256=h9zAVA0Dpxwk_-tn15v1NS-E0YZ_d4YGBS-IqOPxF94,3131 +keras/src/layers/pooling/global_average_pooling2d.py,sha256=hVzDSoG7VLExX1N0YZ_kTAvONRSr5UVsjqpvvCpFZmI,2469 +keras/src/layers/pooling/global_average_pooling3d.py,sha256=jyL1rQmuoUcynfqhEAxyB1Y83WcTasAZ9pZHoWB8ER8,2603 +keras/src/layers/pooling/global_max_pooling1d.py,sha256=1RpUDPbnvHCltb0DZY38FHqg9_ruWgLT4G-FZUsy4H4,2357 +keras/src/layers/pooling/global_max_pooling2d.py,sha256=9d5ELOYLxeWyxp-PxSBo8AKIOoh0Vcv8FAGs0Xd87k0,2451 +keras/src/layers/pooling/global_max_pooling3d.py,sha256=NfsKoJHgKiEnCd8yMia6VyjRJXQIH1d-WnfIZIYqDRE,2585 +keras/src/layers/pooling/max_pooling1d.py,sha256=tcUlxUaxW-TWSO_XLcc1_ObDHCMNUADDZ993pwYmDAc,3346 +keras/src/layers/pooling/max_pooling2d.py,sha256=c8-EZmzYZRLgwE8TiWb3HRMiJiI_fplOELjrFUH5x2c,4128 +keras/src/layers/pooling/max_pooling3d.py,sha256=xVsJd6KPyu1m9jCVuwT3MZwpwT27TSx0k9cI_PhB2_8,3228 +keras/src/layers/preprocessing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +keras/src/layers/preprocessing/__pycache__/__init__.cpython-310.pyc,, +keras/src/layers/preprocessing/__pycache__/category_encoding.cpython-310.pyc,, +keras/src/layers/preprocessing/__pycache__/discretization.cpython-310.pyc,, +keras/src/layers/preprocessing/__pycache__/feature_space.cpython-310.pyc,, +keras/src/layers/preprocessing/__pycache__/hashed_crossing.cpython-310.pyc,, +keras/src/layers/preprocessing/__pycache__/hashing.cpython-310.pyc,, +keras/src/layers/preprocessing/__pycache__/index_lookup.cpython-310.pyc,, +keras/src/layers/preprocessing/__pycache__/integer_lookup.cpython-310.pyc,, +keras/src/layers/preprocessing/__pycache__/mel_spectrogram.cpython-310.pyc,, +keras/src/layers/preprocessing/__pycache__/normalization.cpython-310.pyc,, +keras/src/layers/preprocessing/__pycache__/pipeline.cpython-310.pyc,, +keras/src/layers/preprocessing/__pycache__/rescaling.cpython-310.pyc,, +keras/src/layers/preprocessing/__pycache__/stft_spectrogram.cpython-310.pyc,, +keras/src/layers/preprocessing/__pycache__/string_lookup.cpython-310.pyc,, +keras/src/layers/preprocessing/__pycache__/text_vectorization.cpython-310.pyc,, +keras/src/layers/preprocessing/__pycache__/tf_data_layer.cpython-310.pyc,, +keras/src/layers/preprocessing/category_encoding.py,sha256=_8VZN-AoH07m0wOCmDB_Bf2xuQT7bZjUtVhI-MULU6o,6922 +keras/src/layers/preprocessing/discretization.py,sha256=2KvXZ2NSTaUP3IBMDydCANK7RNa3EwxvW9S5s4kIPsM,13080 +keras/src/layers/preprocessing/feature_space.py,sha256=-uA-gxQpCccwXuoC6LLcQIShoa_2DymnUspl89zRPTg,30196 +keras/src/layers/preprocessing/hashed_crossing.py,sha256=4ajEp1MHtLc0UKTbpO6f4wFGAZZIMjdPMCYm6qFZJA4,8488 +keras/src/layers/preprocessing/hashing.py,sha256=CtVKFmvr11tRTslGZ2q8PsHVrfK94BoVzlq_Z1keQyw,11189 +keras/src/layers/preprocessing/image_preprocessing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +keras/src/layers/preprocessing/image_preprocessing/__pycache__/__init__.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/__pycache__/auto_contrast.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/__pycache__/base_image_preprocessing_layer.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/__pycache__/center_crop.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/__pycache__/equalization.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/__pycache__/max_num_bounding_box.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/__pycache__/mix_up.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/__pycache__/rand_augment.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_brightness.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_color_degeneration.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_color_jitter.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_contrast.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_crop.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_flip.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_grayscale.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_hue.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_posterization.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_rotation.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_saturation.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_sharpness.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_shear.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_translation.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_zoom.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/__pycache__/resizing.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/__pycache__/solarization.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/auto_contrast.py,sha256=GF23qTCPknYYTJzYeC136vRosZls432YbJe8S_YdpPg,3799 +keras/src/layers/preprocessing/image_preprocessing/base_image_preprocessing_layer.py,sha256=vK9tLyfuxXYVfoH8tTMmN_tH5joToe1ctqfv7jZLpB8,13860 +keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/__init__.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/bounding_box.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/converters.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/formats.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/iou.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/validation.cpython-310.pyc,, +keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/bounding_box.py,sha256=aI9u1OvCcnEPBUqMFHS-49xwFLZnhuKgFgB1b3NCAUQ,16270 +keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/converters.py,sha256=opXlmX5SRiWEU2_M1PqkiBVi8LRNfIIDMPMfMY_2Yp0,15999 +keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/formats.py,sha256=b4v7nskUauUvk7Ub4rgImPUysJrDl4m5oBTGD1MEnTI,3377 +keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/iou.py,sha256=dJPC9FL3ViQSItURsiFKu6IZ5oelUQ91KFguWdBu8qA,10144 +keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/validation.py,sha256=aNeC8VBG2xY7qvNAcbTpb824SDyOf88iGMNIohwsjQk,7189 +keras/src/layers/preprocessing/image_preprocessing/center_crop.py,sha256=67il9tcOnt69j55lA4hYDrJJboppFCgnQptBL2dvF38,10022 +keras/src/layers/preprocessing/image_preprocessing/equalization.py,sha256=V5flpM63vc--L7lMMKfEDXQZ8hu-no0qXNudkUTeCms,8648 +keras/src/layers/preprocessing/image_preprocessing/max_num_bounding_box.py,sha256=VJ3eq2GwSeFUWJgQLqe2lHqrVn1qJyoJLUycOv4Xqjo,3304 +keras/src/layers/preprocessing/image_preprocessing/mix_up.py,sha256=wtT7wvlKaVRWu7XbpToESNVNI0KDNmuUCdM8RRTnSms,6520 +keras/src/layers/preprocessing/image_preprocessing/rand_augment.py,sha256=zQRxI_F_9GvKoOQ6LRFyLZ2nuzeRDIEelbNjaHyQi2I,7625 +keras/src/layers/preprocessing/image_preprocessing/random_brightness.py,sha256=-I9ovcx_0Ok0XS9NdtY4Q0MBo-izSpChAVqy8rCWlkE,6072 +keras/src/layers/preprocessing/image_preprocessing/random_color_degeneration.py,sha256=JAItpxwEXaZibMCjna56fj8aciCcmmz95zT3HeVFSu0,4765 +keras/src/layers/preprocessing/image_preprocessing/random_color_jitter.py,sha256=dqgQiCVXLYKIlS4mf3krrdYiZMzDrLqsz60vex0bi6I,9333 +keras/src/layers/preprocessing/image_preprocessing/random_contrast.py,sha256=cMVHg9LC0NGHDk1PYL7UMDyZpIbTixX4Rmm32Wr3_7g,5463 +keras/src/layers/preprocessing/image_preprocessing/random_crop.py,sha256=3DPcHVyYj3g8SZ8hV6eTJlVt41Nv6pMGVjLf_Wca5kc,10542 +keras/src/layers/preprocessing/image_preprocessing/random_flip.py,sha256=fQurnkSGbehqFhfyVHQCxpfidU-hnq3mx9nhBEK21Eg,8046 +keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py,sha256=STScLoNMZtwr_54jj8rcQcmVU_SLUBUXSQna85hVvAU,4260 +keras/src/layers/preprocessing/image_preprocessing/random_hue.py,sha256=XdlmKw81K9z6YUFmEKgu9kYQtFtdT1zJelXX_TxtN_c,6335 +keras/src/layers/preprocessing/image_preprocessing/random_posterization.py,sha256=w_MxVjDPqVjQgFiPmbRWzGDZg_BvZEYV5-KPA6Dg9Ik,5036 +keras/src/layers/preprocessing/image_preprocessing/random_rotation.py,sha256=uhZiuLac4VHbrUO9Uyk0bt4yfulw-LZ8Z177z-6yfXM,9624 +keras/src/layers/preprocessing/image_preprocessing/random_saturation.py,sha256=3CqJ5-kvWaENMrsiTL8TbImL3ToOiGRD3C5gfoz3J24,5924 +keras/src/layers/preprocessing/image_preprocessing/random_sharpness.py,sha256=qczPlJ_wl5636T1oqrlp_dE9jSn4xpfXNROOhmF3zKI,6021 +keras/src/layers/preprocessing/image_preprocessing/random_shear.py,sha256=fS33rneM_r4UAqfdtGXi9Tki6ooUh9Il_qXqIwxkRXI,14889 +keras/src/layers/preprocessing/image_preprocessing/random_translation.py,sha256=4hOk8oynwzTNPNIHGlGjOey7yaQQ8xn1Ac8R9Wbd3nI,14921 +keras/src/layers/preprocessing/image_preprocessing/random_zoom.py,sha256=g62ZOZkf_Vv3g9eC92X4Vk8NBnh0pQ1HP6Qu6Xrl1RY,16462 +keras/src/layers/preprocessing/image_preprocessing/resizing.py,sha256=VjVOiLolCfZ-i2OTaLuiPndPGKskMrqTM6FxtWfUn2Q,11812 +keras/src/layers/preprocessing/image_preprocessing/solarization.py,sha256=xfOlqPf_CA2t4dP1rFw1tcUP1mhDyliJTipCajYR5u0,7884 +keras/src/layers/preprocessing/index_lookup.py,sha256=DCf_TKmJx8wftMfjJ_ETpKz6Tq3RsDUXR7gbwIhcvT8,41996 +keras/src/layers/preprocessing/integer_lookup.py,sha256=4rlZ03HLx3g-t7r9u0K9gymKYo1-iDw8NYRjkQmL23o,18458 +keras/src/layers/preprocessing/mel_spectrogram.py,sha256=siDkgfjItBQlq0ZxDwuyVFWUEWfxK-_4OV-ePVDvINU,14572 +keras/src/layers/preprocessing/normalization.py,sha256=qtJAzfr6JH2fsigGGydbV_tuY-JVlffqB45cPOgxNgc,14973 +keras/src/layers/preprocessing/pipeline.py,sha256=D6dd1LQTW9m9jUaeorTn29rY19gRmkSXXaUxj02kUxc,2533 +keras/src/layers/preprocessing/rescaling.py,sha256=OkjATRt1n3ncO2FL26zM2kj8NC3bu3fJGORT4nAyG8I,2798 +keras/src/layers/preprocessing/stft_spectrogram.py,sha256=r02Qko8raSF1vQrlL_SNlXW7Rjt8UiZOF-Y68-WkmGU,15059 +keras/src/layers/preprocessing/string_lookup.py,sha256=a5r6C7Y39M58JCkMd2851HmQYjagKdAltve2NExsawU,17745 +keras/src/layers/preprocessing/text_vectorization.py,sha256=kqCXKpRBkqrrX5SZPBrwwruw7ByOxew-xQ6OZoREuRA,27816 +keras/src/layers/preprocessing/tf_data_layer.py,sha256=ps0Az4BbFcxdwdZ2dYzOPFQQ8tYTOzKyiNSpu5dwAFU,2628 +keras/src/layers/regularization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +keras/src/layers/regularization/__pycache__/__init__.cpython-310.pyc,, +keras/src/layers/regularization/__pycache__/activity_regularization.cpython-310.pyc,, +keras/src/layers/regularization/__pycache__/alpha_dropout.cpython-310.pyc,, +keras/src/layers/regularization/__pycache__/dropout.cpython-310.pyc,, +keras/src/layers/regularization/__pycache__/gaussian_dropout.cpython-310.pyc,, +keras/src/layers/regularization/__pycache__/gaussian_noise.cpython-310.pyc,, +keras/src/layers/regularization/__pycache__/spatial_dropout.cpython-310.pyc,, +keras/src/layers/regularization/activity_regularization.py,sha256=m7E0xA2dqT0m-qLj1LBNAitszaaqtLlCuScc587-BpA,1278 +keras/src/layers/regularization/alpha_dropout.py,sha256=KSJOFE249x0XUrdXmotAPjwEHrFvw7o6Q6X6D6Eg2OQ,3620 +keras/src/layers/regularization/dropout.py,sha256=j4ludUTtz804NxscjvavNb2aTQbdNjIMsFcIYDtJzWY,3004 +keras/src/layers/regularization/gaussian_dropout.py,sha256=_iTmmmSK0qCKXdtHYRIK6zSE3G9DRfGH67zT3EAx9D4,2067 +keras/src/layers/regularization/gaussian_noise.py,sha256=KQ0Z8MWzVb5iuM8eTtTxOUF5TcYX2rAcqy4S55s6klY,2115 +keras/src/layers/regularization/spatial_dropout.py,sha256=8SORBywkWwdM-id_xnFquDCrRKhiLqNrMtXlyll-AR0,7300 +keras/src/layers/reshaping/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +keras/src/layers/reshaping/__pycache__/__init__.cpython-310.pyc,, +keras/src/layers/reshaping/__pycache__/cropping1d.cpython-310.pyc,, +keras/src/layers/reshaping/__pycache__/cropping2d.cpython-310.pyc,, +keras/src/layers/reshaping/__pycache__/cropping3d.cpython-310.pyc,, +keras/src/layers/reshaping/__pycache__/flatten.cpython-310.pyc,, +keras/src/layers/reshaping/__pycache__/permute.cpython-310.pyc,, +keras/src/layers/reshaping/__pycache__/repeat_vector.cpython-310.pyc,, +keras/src/layers/reshaping/__pycache__/reshape.cpython-310.pyc,, +keras/src/layers/reshaping/__pycache__/up_sampling1d.cpython-310.pyc,, +keras/src/layers/reshaping/__pycache__/up_sampling2d.cpython-310.pyc,, +keras/src/layers/reshaping/__pycache__/up_sampling3d.cpython-310.pyc,, +keras/src/layers/reshaping/__pycache__/zero_padding1d.cpython-310.pyc,, +keras/src/layers/reshaping/__pycache__/zero_padding2d.cpython-310.pyc,, +keras/src/layers/reshaping/__pycache__/zero_padding3d.cpython-310.pyc,, +keras/src/layers/reshaping/cropping1d.py,sha256=jrSIsn5Zvwe8R73YyC1fhF3mDZTOC5ymhvkGKH2M75g,2760 +keras/src/layers/reshaping/cropping2d.py,sha256=N7r1-tuAkhC9QWH0Tt005iZnHimWT6cQBMbbWR5-tUQ,9044 +keras/src/layers/reshaping/cropping3d.py,sha256=Hm176o-duFkIXiAYjvjRAY6mWypY_vSEmGpQU1Eh8yU,11265 +keras/src/layers/reshaping/flatten.py,sha256=La8OFnWq0UisPjTsMMGNyFuzxJlnpqGCYX9kLgLg92Q,3059 +keras/src/layers/reshaping/permute.py,sha256=F3BxIPmPBnQGSmK2CxW4udFRRAuGKuZaomt-C2luUTs,2090 +keras/src/layers/reshaping/repeat_vector.py,sha256=Gv8DRO145ooHBriDLvzitmKQJtx-ek0o7EPStPx_Pac,1335 +keras/src/layers/reshaping/reshape.py,sha256=aAgYnt-rs_rqu2SppXZW6KkyBkCX2w1amBG9PhGDavY,2322 +keras/src/layers/reshaping/up_sampling1d.py,sha256=xJUqfpYUyc9x461UV_TMPDaCcy1_whKAknIHLkCcbhI,1591 +keras/src/layers/reshaping/up_sampling2d.py,sha256=exYZP8lo_lLVLsIgdlbyRVv_h8N9NHOXQ6SkY6nOSVQ,6035 +keras/src/layers/reshaping/up_sampling3d.py,sha256=nlK1wE5UCuTUsCGJKYkZixOGvxVE20f-H26hTnCyUU4,4910 +keras/src/layers/reshaping/zero_padding1d.py,sha256=t_WxXso0weqfouc-3Ij06YPi3r-9WYDLly_JPfIcHBM,3362 +keras/src/layers/reshaping/zero_padding2d.py,sha256=tDz2m1cfQaxvak2XbOWw7YDkOzUmM5SsaejDOBSMvt4,4646 +keras/src/layers/reshaping/zero_padding3d.py,sha256=XaorgfwHCjgaVtdiQWW6wrwHpoz-c2nkjWW5Ww6nTfE,5060 +keras/src/layers/rnn/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +keras/src/layers/rnn/__pycache__/__init__.cpython-310.pyc,, +keras/src/layers/rnn/__pycache__/bidirectional.cpython-310.pyc,, +keras/src/layers/rnn/__pycache__/conv_lstm.cpython-310.pyc,, +keras/src/layers/rnn/__pycache__/conv_lstm1d.cpython-310.pyc,, +keras/src/layers/rnn/__pycache__/conv_lstm2d.cpython-310.pyc,, +keras/src/layers/rnn/__pycache__/conv_lstm3d.cpython-310.pyc,, +keras/src/layers/rnn/__pycache__/dropout_rnn_cell.cpython-310.pyc,, +keras/src/layers/rnn/__pycache__/gru.cpython-310.pyc,, +keras/src/layers/rnn/__pycache__/lstm.cpython-310.pyc,, +keras/src/layers/rnn/__pycache__/rnn.cpython-310.pyc,, +keras/src/layers/rnn/__pycache__/simple_rnn.cpython-310.pyc,, +keras/src/layers/rnn/__pycache__/stacked_rnn_cells.cpython-310.pyc,, +keras/src/layers/rnn/__pycache__/time_distributed.cpython-310.pyc,, +keras/src/layers/rnn/bidirectional.py,sha256=Jbce73SzJteMd3NNCrjwrymz_lWF03Qr1ejrAtzERrQ,13235 +keras/src/layers/rnn/conv_lstm.py,sha256=Tc6hjC_Z2WwQzZNB0XyZ2SU-gwylNP1OhDMdHN1-lTA,27621 +keras/src/layers/rnn/conv_lstm1d.py,sha256=7Al9iXoc5CbdywW8O4CIP_HeRQD4fTZ0Ph_3a_lx4So,8296 +keras/src/layers/rnn/conv_lstm2d.py,sha256=N9qTryL8AgNZxOhbqt8YgFYXeb88qGn0CTgKICXlRpw,8381 +keras/src/layers/rnn/conv_lstm3d.py,sha256=khYSWkfVqI3RGrQuthK93TqlWX13itKCjpi0I6CPKkU,8289 +keras/src/layers/rnn/dropout_rnn_cell.py,sha256=S9TM2G9n1I9xsOSoS3ZKHhPbq_-0xh2P__sBNfYE98E,2524 +keras/src/layers/rnn/gru.py,sha256=Isofd5zrFOvzP341MQ2ZbYXMWkY82hAqtJYQ_PsxwWU,28798 +keras/src/layers/rnn/lstm.py,sha256=tHSDDprfhyZbczEbRIxRKKD3eS3d7QrlPtBrHgQ87jw,27686 +keras/src/layers/rnn/rnn.py,sha256=-U1H8rFM6TMCPBKCdyI1NOUtjYO__4EMILv5C6OI1uU,18984 +keras/src/layers/rnn/simple_rnn.py,sha256=w8veFLz2qsZbFJpZyyDrWfeRu4wjWnmRAD6-Im9rXTo,17542 +keras/src/layers/rnn/stacked_rnn_cells.py,sha256=RQU16cJjGZcyUTh5GqEJUUxmydNNXsR06K5kycrks5Y,4943 +keras/src/layers/rnn/time_distributed.py,sha256=BUYeXP_RslRhq_k-VZ6t65n2bQKq_pQImXFTh4d4emc,4800 +keras/src/legacy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +keras/src/legacy/__pycache__/__init__.cpython-310.pyc,, +keras/src/legacy/__pycache__/backend.cpython-310.pyc,, +keras/src/legacy/__pycache__/layers.cpython-310.pyc,, +keras/src/legacy/__pycache__/losses.cpython-310.pyc,, +keras/src/legacy/backend.py,sha256=9EJkBgzhUvSXZPN9vX9i58g3AOTtGIqutYVC_SwLo_A,70277 +keras/src/legacy/layers.py,sha256=oOaFtRtroSZpKL0z4tDWOpUbsrJhmuef6twESrSOmx8,8396 +keras/src/legacy/losses.py,sha256=pprb6guwHwBv-5zo2qZhLkji4z-L0plE5k6CoS7tsr8,523 +keras/src/legacy/preprocessing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +keras/src/legacy/preprocessing/__pycache__/__init__.cpython-310.pyc,, +keras/src/legacy/preprocessing/__pycache__/image.cpython-310.pyc,, +keras/src/legacy/preprocessing/__pycache__/sequence.cpython-310.pyc,, +keras/src/legacy/preprocessing/__pycache__/text.cpython-310.pyc,, +keras/src/legacy/preprocessing/image.py,sha256=zxY_utToHOHn4RYaX_qGB-BcLnnWr5o6nrK-nHJhuGk,65545 +keras/src/legacy/preprocessing/sequence.py,sha256=jyot2KR3652vRxuzmLkWjRd5MivMysH_3jZ1HgGvF80,11172 +keras/src/legacy/preprocessing/text.py,sha256=1NCgRIVZhZoWPSv0GKPGZ2r0D6SvcnHQsLpvFSnVals,11103 +keras/src/legacy/saving/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +keras/src/legacy/saving/__pycache__/__init__.cpython-310.pyc,, +keras/src/legacy/saving/__pycache__/json_utils.cpython-310.pyc,, +keras/src/legacy/saving/__pycache__/legacy_h5_format.cpython-310.pyc,, +keras/src/legacy/saving/__pycache__/saving_options.cpython-310.pyc,, +keras/src/legacy/saving/__pycache__/saving_utils.cpython-310.pyc,, +keras/src/legacy/saving/__pycache__/serialization.cpython-310.pyc,, +keras/src/legacy/saving/json_utils.py,sha256=JIGZu1OJylkP71N6h3IBLoG_e9qnCQAC9H4GdDdUIOc,7296 +keras/src/legacy/saving/legacy_h5_format.py,sha256=Vcw71ftgO9e_0e14XG5opxVwGquKUjVQW_3asiWkdOI,22605 +keras/src/legacy/saving/saving_options.py,sha256=ZUyOHYsTf0rBLBAOlSaeqVNv9tGjWA9LsNyPk5WTXRI,485 +keras/src/legacy/saving/saving_utils.py,sha256=Mk4wGzXa4B_9CrjRdPFQkWuQGm5ySg5aKybXnLzsj1c,9275 +keras/src/legacy/saving/serialization.py,sha256=s4qrdywzIRnMccfXRmxbSqqfquQyohIIf7TdjRQCsBc,21808 +keras/src/losses/__init__.py,sha256=rt63Ye0f7YdAR0eV0EOj2J61DI6xNdp2ojonx6rB3wE,6595 +keras/src/losses/__pycache__/__init__.cpython-310.pyc,, +keras/src/losses/__pycache__/loss.cpython-310.pyc,, +keras/src/losses/__pycache__/losses.cpython-310.pyc,, +keras/src/losses/loss.py,sha256=BjtYoghA3jfpJ4_bG7c3NRK3rk7omzMSCuK9ZNlaYGs,8787 +keras/src/losses/losses.py,sha256=vrN_LMRIWwOJQ-zc96YzgcDaAiKxLYytYc_lun-8EIA,93333 +keras/src/metrics/__init__.py,sha256=CydJsY38PR2lRN4irhO_wnlvgruTEAgSHp8eUYE0lwY,7410 +keras/src/metrics/__pycache__/__init__.cpython-310.pyc,, +keras/src/metrics/__pycache__/accuracy_metrics.cpython-310.pyc,, +keras/src/metrics/__pycache__/confusion_metrics.cpython-310.pyc,, +keras/src/metrics/__pycache__/correlation_metrics.cpython-310.pyc,, +keras/src/metrics/__pycache__/f_score_metrics.cpython-310.pyc,, +keras/src/metrics/__pycache__/hinge_metrics.cpython-310.pyc,, +keras/src/metrics/__pycache__/iou_metrics.cpython-310.pyc,, +keras/src/metrics/__pycache__/metric.cpython-310.pyc,, +keras/src/metrics/__pycache__/metrics_utils.cpython-310.pyc,, +keras/src/metrics/__pycache__/probabilistic_metrics.cpython-310.pyc,, +keras/src/metrics/__pycache__/reduction_metrics.cpython-310.pyc,, +keras/src/metrics/__pycache__/regression_metrics.cpython-310.pyc,, +keras/src/metrics/accuracy_metrics.py,sha256=cDHR0jyFjtFz_oY20JMxXSpbKGnJq2lkZJt3N3NDG7g,18283 +keras/src/metrics/confusion_metrics.py,sha256=zVBnnk1n0rFuE7XiXp5mB24aO4pY5JIQCgMqyp6Epfw,61530 +keras/src/metrics/correlation_metrics.py,sha256=AKLlFGiByNSM_Dd4CIsQrjKpxPX53CGl6fbsvz3DY7A,6905 +keras/src/metrics/f_score_metrics.py,sha256=B6SBXpXikgayvre6yQJSEsbIpWlvUveSicEKdeGkaUs,11743 +keras/src/metrics/hinge_metrics.py,sha256=hmlZY6wijxvW3RpOt4RUA1Kn3US5mR7h98o-jIZsbcs,3255 +keras/src/metrics/iou_metrics.py,sha256=pk0Bskqdh3HdQDXMwxpCUj-2Re6a4sH1e6wQlHiTm40,27572 +keras/src/metrics/metric.py,sha256=tBcGhhWUebMD0c78algCXwMYwuaSt3lLZOZ-DtRe_IQ,8720 +keras/src/metrics/metrics_utils.py,sha256=NFrVJxNBBRGS62dOGONpcLI1zJS7w5X8Z7FzrMajKLQ,26616 +keras/src/metrics/probabilistic_metrics.py,sha256=cyDuxohv3eqbVjGhTljwo507wzriuXG20OVsCXd0Fo8,10640 +keras/src/metrics/reduction_metrics.py,sha256=-imgCBWg9Kdfx_k4Shq81h07feoHDquB_J704NgFQ1g,7345 +keras/src/metrics/regression_metrics.py,sha256=eLacV_8CKtzA26BJDJuncUDATuL1x8O6SRHqLA9eSFc,19756 +keras/src/models/__init__.py,sha256=DPbBPSfIGgsufTfJH5U5xJOeN_Ef4FMadT7KKYg3Kjg,143 +keras/src/models/__pycache__/__init__.cpython-310.pyc,, +keras/src/models/__pycache__/cloning.cpython-310.pyc,, +keras/src/models/__pycache__/functional.cpython-310.pyc,, +keras/src/models/__pycache__/model.cpython-310.pyc,, +keras/src/models/__pycache__/sequential.cpython-310.pyc,, +keras/src/models/__pycache__/variable_mapping.cpython-310.pyc,, +keras/src/models/cloning.py,sha256=BFFiu9lYRkNSL7EmzP-o8hx0kBsV-gBnTtT2-7C-ZWM,15413 +keras/src/models/functional.py,sha256=VQyiBKQOWCAOl91PIxZhS5zHYwqc5waKKCd-2At46Ps,33505 +keras/src/models/model.py,sha256=HQIagTsARHPhuea9YBXvJjubJ58hYLRzpr94eye-l_A,32035 +keras/src/models/sequential.py,sha256=S0APRXF1iTvostQG6DD6ofF6b-uf0f1zusXimzdNxVg,13826 +keras/src/models/variable_mapping.py,sha256=FVtcgjBRqOxtvkzOE6kjG9SpcB9keDg2gS5LOTlXvG0,2181 +keras/src/ops/__init__.py,sha256=aORlvnrqY_eQl0EFLWdpHsXHnQ6JLSw1qhwJMr-VXJ0,644 +keras/src/ops/__pycache__/__init__.cpython-310.pyc,, +keras/src/ops/__pycache__/core.cpython-310.pyc,, +keras/src/ops/__pycache__/function.cpython-310.pyc,, +keras/src/ops/__pycache__/image.cpython-310.pyc,, +keras/src/ops/__pycache__/linalg.cpython-310.pyc,, +keras/src/ops/__pycache__/math.cpython-310.pyc,, +keras/src/ops/__pycache__/nn.cpython-310.pyc,, +keras/src/ops/__pycache__/node.cpython-310.pyc,, +keras/src/ops/__pycache__/numpy.cpython-310.pyc,, +keras/src/ops/__pycache__/operation.cpython-310.pyc,, +keras/src/ops/__pycache__/operation_utils.cpython-310.pyc,, +keras/src/ops/__pycache__/symbolic_arguments.cpython-310.pyc,, +keras/src/ops/core.py,sha256=7WMuT86C5eIq_wPushi-Y2XJFGZc1Qf7orVmc35DbbQ,39591 +keras/src/ops/function.py,sha256=H2HDbfS5Y4-zkV8WpBj38xZeFkIBizHYZhbItbb7EJk,16285 +keras/src/ops/image.py,sha256=w0bHwOIxEYl96gVg2L31ftfO1HzgJeLralUjlEQMWQA,43372 +keras/src/ops/linalg.py,sha256=_yLcKA5xvCKsZSoldYcDPQ5MCw8d7YetMKrg1us6NtA,21251 +keras/src/ops/math.py,sha256=QeIgeWM5KZ9R9O1sSIuafHL3qgfs2pQfHvRNod3Pdcw,34519 +keras/src/ops/nn.py,sha256=XiozjlYU3hxynUFvikMoNwB1Vqq_4bvfr0niz2btp7U,82056 +keras/src/ops/node.py,sha256=aJgn9D-GkteE--Bbt2cZ9JjVxb2W2uS1OWEKoeLsl3Y,5583 +keras/src/ops/numpy.py,sha256=SfXR2RvvemVJg7oaNo3nAhnkrDkILvVnRQ3a8IVil2c,216928 +keras/src/ops/operation.py,sha256=2YHXy1bhcTxhvJM0CUWGzY-7EpbHDeNlYLrSbmb6Bck,11903 +keras/src/ops/operation_utils.py,sha256=McVlxvb-iD826m6Rpm_1UvnImhaLZLs3tzlCZE6S8Xo,14402 +keras/src/ops/symbolic_arguments.py,sha256=MKwXxZYkyouD9BPmQ1uUNxILdcwPvTayAqXaUV3P3o4,1628 +keras/src/optimizers/__init__.py,sha256=obSfcJtrRgVj1rCOxrNyeDGPS0_m16tDZzUphEy3iR4,3931 +keras/src/optimizers/__pycache__/__init__.cpython-310.pyc,, +keras/src/optimizers/__pycache__/adadelta.cpython-310.pyc,, +keras/src/optimizers/__pycache__/adafactor.cpython-310.pyc,, +keras/src/optimizers/__pycache__/adagrad.cpython-310.pyc,, +keras/src/optimizers/__pycache__/adam.cpython-310.pyc,, +keras/src/optimizers/__pycache__/adamax.cpython-310.pyc,, +keras/src/optimizers/__pycache__/adamw.cpython-310.pyc,, +keras/src/optimizers/__pycache__/base_optimizer.cpython-310.pyc,, +keras/src/optimizers/__pycache__/ftrl.cpython-310.pyc,, +keras/src/optimizers/__pycache__/lamb.cpython-310.pyc,, +keras/src/optimizers/__pycache__/lion.cpython-310.pyc,, +keras/src/optimizers/__pycache__/loss_scale_optimizer.cpython-310.pyc,, +keras/src/optimizers/__pycache__/nadam.cpython-310.pyc,, +keras/src/optimizers/__pycache__/optimizer.cpython-310.pyc,, +keras/src/optimizers/__pycache__/rmsprop.cpython-310.pyc,, +keras/src/optimizers/__pycache__/sgd.cpython-310.pyc,, +keras/src/optimizers/adadelta.py,sha256=nRWBuAJGBrofDN2fUb-vNvGz5nudZIjlBx7OBWSRXuM,4759 +keras/src/optimizers/adafactor.py,sha256=BAKcQ7ptahNHfzd6X_p5XMIV4TYr7FH-28DtpCUEMoU,7637 +keras/src/optimizers/adagrad.py,sha256=wv7cGmH4I0cB7nabSDmGrC4aqwz-j1CfXlQZKyvDLQc,3918 +keras/src/optimizers/adam.py,sha256=nzzVTAaalAbYcUDStCfK4BZw2FV3uPedAjRdmkIpBF0,5909 +keras/src/optimizers/adamax.py,sha256=d31aAVPkJ9GVPq0hTjEPd0I_gB63DsD-IS7yUI1GRmI,5082 +keras/src/optimizers/adamw.py,sha256=TVnjn1JQMwy_cghTbFi9WGnLco45Oq9YZ8qME8ej3r4,3785 +keras/src/optimizers/base_optimizer.py,sha256=pUhxkquSRnulTfyM4k7WwRamaDlQ2GggRnjBWboCSro,45232 +keras/src/optimizers/ftrl.py,sha256=cnfneb2m7nGiIZjGbR0cOOZbqXHBixrzyLnrcU6VchY,9099 +keras/src/optimizers/lamb.py,sha256=5_PWBd6uWKOVRk89h_j4tOMSowLvsq7Va2QLGTfJP_w,5276 +keras/src/optimizers/lion.py,sha256=15ML1_C7XGCFMgML90GqjYlXq_wRm2T9xR1WbwGus9A,4969 +keras/src/optimizers/loss_scale_optimizer.py,sha256=Kj-NSrfWr22uwF8VlYvn8Aao_7TOsEuerzlny6qJqco,11619 +keras/src/optimizers/nadam.py,sha256=tsRouI2vO5uU2Gy106YSgrSlRg9nSF9sbp7alqcVOhI,5926 +keras/src/optimizers/optimizer.py,sha256=cZtZwu42plSGjZBqoS6KThwJvWjEcPz9g97nZCSrwOA,870 +keras/src/optimizers/rmsprop.py,sha256=-uklCRqdptFxUlkK0_J6Ww7PptVhpsw7ywJj_L54jWM,6003 +keras/src/optimizers/schedules/__init__.py,sha256=vuUuHNTev8sD2-swsuq7zqyYbmaOhDyiIE6F3dGGSZU,546 +keras/src/optimizers/schedules/__pycache__/__init__.cpython-310.pyc,, +keras/src/optimizers/schedules/__pycache__/learning_rate_schedule.cpython-310.pyc,, +keras/src/optimizers/schedules/learning_rate_schedule.py,sha256=Oe3zk_IjeIN9TFNz1895RTN2rCk9uZY8iYbqFb9E06c,35507 +keras/src/optimizers/sgd.py,sha256=T-JFtmCVnLLAvN3S3qtWoKWci53AmxH2xBMKzeC11N4,4556 +keras/src/quantizers/__init__.py,sha256=Ssm4dFHi_pZh_erToRAiFHt4gyoftPS9CepipyhMStY,1784 +keras/src/quantizers/__pycache__/__init__.cpython-310.pyc,, +keras/src/quantizers/__pycache__/quantizers.cpython-310.pyc,, +keras/src/quantizers/quantizers.py,sha256=O-6FO6pKwOJup4quT3_WIZ5Kuwlt1X9PBDC1IfL-KJQ,5689 +keras/src/random/__init__.py,sha256=BmXVYPzxbhADohoLtAEEzB3cesP7YBFDsp1qc6BWWlg,420 +keras/src/random/__pycache__/__init__.cpython-310.pyc,, +keras/src/random/__pycache__/random.cpython-310.pyc,, +keras/src/random/__pycache__/seed_generator.cpython-310.pyc,, +keras/src/random/random.py,sha256=bUADZIVDuCghwIWTk0qBxXTxUdiNGWIdsRi8QJ3ePg4,17581 +keras/src/random/seed_generator.py,sha256=XYukdqfFArfWDwzBzip8hmoC1Ta2AAK-bF1eUy2cM3I,5593 +keras/src/regularizers/__init__.py,sha256=GzK9FTKL2Xxd5H55GfG9gxDqt4eZoVHFWICgb2VW8qM,1731 +keras/src/regularizers/__pycache__/__init__.cpython-310.pyc,, +keras/src/regularizers/__pycache__/regularizers.cpython-310.pyc,, +keras/src/regularizers/regularizers.py,sha256=urXNmMGuqHT7lOmS-yQPl3At3Ny-37Xlo389ErCg84A,11799 +keras/src/saving/__init__.py,sha256=vnrtfvnzW7Gwtxe5COhaMoEnVYB5iDe2YlqJ-DvqFIk,614 +keras/src/saving/__pycache__/__init__.cpython-310.pyc,, +keras/src/saving/__pycache__/file_editor.cpython-310.pyc,, +keras/src/saving/__pycache__/keras_saveable.cpython-310.pyc,, +keras/src/saving/__pycache__/object_registration.cpython-310.pyc,, +keras/src/saving/__pycache__/saving_api.cpython-310.pyc,, +keras/src/saving/__pycache__/saving_lib.cpython-310.pyc,, +keras/src/saving/__pycache__/serialization_lib.cpython-310.pyc,, +keras/src/saving/file_editor.py,sha256=XAl9O3XK2VO2IuAB-Mm40z-WpdRw9aQDS6sKJawAE1A,28980 +keras/src/saving/keras_saveable.py,sha256=aGIt1ajtsaamfUq18LM6ql8JEoQzi3HwzJEuwQ9bmKE,1285 +keras/src/saving/object_registration.py,sha256=aZmmFrJP5GjjNpLNmq4k6D-PqdAH8PMBGk7BXI7eogE,7358 +keras/src/saving/saving_api.py,sha256=UWdusfIT2tamCZD_LuDyAZxi-9jdfpgWbm5_XzObWaU,10419 +keras/src/saving/saving_lib.py,sha256=nCAu7h1zphpic0XqN-kZuRa2qT2XRhl2oPrzjDUozJs,40458 +keras/src/saving/serialization_lib.py,sha256=PjD60iyHE42G6wtv37JzC5ikRD981OWD_4V5Q4bgUFk,28759 +keras/src/testing/__init__.py,sha256=xOZf-VBOf3wrXu47PgII2TNfXgxUse60HCinBryHiK8,266 +keras/src/testing/__pycache__/__init__.cpython-310.pyc,, +keras/src/testing/__pycache__/test_case.cpython-310.pyc,, +keras/src/testing/__pycache__/test_utils.cpython-310.pyc,, +keras/src/testing/test_case.py,sha256=-S-acWAfOOc9SQTuBuroQ_-hqvtc8e0V96JTCp27Yw4,31514 +keras/src/testing/test_utils.py,sha256=6Vb8tJIyjU1ay63w3jvXNNhh7sSNrosQll4ii1NXELQ,6197 +keras/src/trainers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +keras/src/trainers/__pycache__/__init__.cpython-310.pyc,, +keras/src/trainers/__pycache__/compile_utils.cpython-310.pyc,, +keras/src/trainers/__pycache__/epoch_iterator.cpython-310.pyc,, +keras/src/trainers/__pycache__/trainer.cpython-310.pyc,, +keras/src/trainers/compile_utils.py,sha256=1xfQpgjiHRvueieijoJYVAMPgAs89CC-2AANC_dspE4,30253 +keras/src/trainers/data_adapters/__init__.py,sha256=GZa9Y4uzIEu_VEZnVPHAS0s_Jd4Z1sIn71-Vk4zj-wY,5934 +keras/src/trainers/data_adapters/__pycache__/__init__.cpython-310.pyc,, +keras/src/trainers/data_adapters/__pycache__/array_data_adapter.cpython-310.pyc,, +keras/src/trainers/data_adapters/__pycache__/array_slicing.cpython-310.pyc,, +keras/src/trainers/data_adapters/__pycache__/data_adapter.cpython-310.pyc,, +keras/src/trainers/data_adapters/__pycache__/data_adapter_utils.cpython-310.pyc,, +keras/src/trainers/data_adapters/__pycache__/generator_data_adapter.cpython-310.pyc,, +keras/src/trainers/data_adapters/__pycache__/py_dataset_adapter.cpython-310.pyc,, +keras/src/trainers/data_adapters/__pycache__/tf_dataset_adapter.cpython-310.pyc,, +keras/src/trainers/data_adapters/__pycache__/torch_data_loader_adapter.cpython-310.pyc,, +keras/src/trainers/data_adapters/array_data_adapter.py,sha256=T7_AmjlxGPxK0-sWqKzgFMwp8W-d8zzWirBxeO2Frxc,14219 +keras/src/trainers/data_adapters/array_slicing.py,sha256=y79A04eUEjtyQ3XAzwjMJyG-rk_aiCW4EQNFsQYKjoU,17315 +keras/src/trainers/data_adapters/data_adapter.py,sha256=NGBEr2cAFfmrtRl7a8f7iCEaaCN2k8bw3l-3QxlEzRM,3261 +keras/src/trainers/data_adapters/data_adapter_utils.py,sha256=Tdk-5a2Qhi3BDhye8DYOTub4gmV2ymirASj52LuE570,10520 +keras/src/trainers/data_adapters/generator_data_adapter.py,sha256=-bqQwJT-Gu-ec4aK0ejPb1FQQGVmlkbxHA4_11TFRPc,3118 +keras/src/trainers/data_adapters/py_dataset_adapter.py,sha256=HngaKe2jU2YOld2LpKx8yeBY5iT1OIxupjMskhDPqQ8,23580 +keras/src/trainers/data_adapters/tf_dataset_adapter.py,sha256=BUwA_o1nLu7JMfXCXiY0Q8t4z3a9CVzPGGId8Jyn4bQ,4950 +keras/src/trainers/data_adapters/torch_data_loader_adapter.py,sha256=RvM3n5-l3k5TMDVtemv4cQoSTrEtB8q1glmS7s1dKVM,2544 +keras/src/trainers/epoch_iterator.py,sha256=kCn6-j5qIv0LTvR1uMhINUU9GPbpiwX812zumtWvUds,4916 +keras/src/trainers/trainer.py,sha256=3S9ULMmxkVZUuQIoKfGJeLvDzXv59Qf0snUdrqNU_3U,51768 +keras/src/tree/__init__.py,sha256=GFevGbI_JtGccMAcA-382UO6ATdJap_YkpI50smCrv4,629 +keras/src/tree/__pycache__/__init__.cpython-310.pyc,, +keras/src/tree/__pycache__/dmtree_impl.cpython-310.pyc,, +keras/src/tree/__pycache__/optree_impl.cpython-310.pyc,, +keras/src/tree/__pycache__/tree_api.cpython-310.pyc,, +keras/src/tree/dmtree_impl.py,sha256=f5iwagX0Fejin-w-5_J6x3OyjTvekESCKcpmYQQkXug,13537 +keras/src/tree/optree_impl.py,sha256=XGWkj7TQ5CcUwRLgH8-vUnWKNZudALU1mfQEMlp87mQ,5760 +keras/src/tree/tree_api.py,sha256=cSOp6EMOe8p0DUIbbvELrzIjABTIYX0Fw7CBfqi8pcY,14093 +keras/src/utils/__init__.py,sha256=WSmTldk6M-XV0X84XR5vryg0BTR8KsTfxNIyRaNkqq0,1423 +keras/src/utils/__pycache__/__init__.cpython-310.pyc,, +keras/src/utils/__pycache__/argument_validation.cpython-310.pyc,, +keras/src/utils/__pycache__/audio_dataset_utils.cpython-310.pyc,, +keras/src/utils/__pycache__/backend_utils.cpython-310.pyc,, +keras/src/utils/__pycache__/code_stats.cpython-310.pyc,, +keras/src/utils/__pycache__/config.cpython-310.pyc,, +keras/src/utils/__pycache__/dataset_utils.cpython-310.pyc,, +keras/src/utils/__pycache__/dtype_utils.cpython-310.pyc,, +keras/src/utils/__pycache__/file_utils.cpython-310.pyc,, +keras/src/utils/__pycache__/image_dataset_utils.cpython-310.pyc,, +keras/src/utils/__pycache__/image_utils.cpython-310.pyc,, +keras/src/utils/__pycache__/io_utils.cpython-310.pyc,, +keras/src/utils/__pycache__/jax_layer.cpython-310.pyc,, +keras/src/utils/__pycache__/jax_utils.cpython-310.pyc,, +keras/src/utils/__pycache__/model_visualization.cpython-310.pyc,, +keras/src/utils/__pycache__/module_utils.cpython-310.pyc,, +keras/src/utils/__pycache__/naming.cpython-310.pyc,, +keras/src/utils/__pycache__/numerical_utils.cpython-310.pyc,, +keras/src/utils/__pycache__/progbar.cpython-310.pyc,, +keras/src/utils/__pycache__/python_utils.cpython-310.pyc,, +keras/src/utils/__pycache__/rng_utils.cpython-310.pyc,, +keras/src/utils/__pycache__/sequence_utils.cpython-310.pyc,, +keras/src/utils/__pycache__/summary_utils.cpython-310.pyc,, +keras/src/utils/__pycache__/text_dataset_utils.cpython-310.pyc,, +keras/src/utils/__pycache__/tf_utils.cpython-310.pyc,, +keras/src/utils/__pycache__/timeseries_dataset_utils.cpython-310.pyc,, +keras/src/utils/__pycache__/torch_utils.cpython-310.pyc,, +keras/src/utils/__pycache__/traceback_utils.cpython-310.pyc,, +keras/src/utils/__pycache__/tracking.cpython-310.pyc,, +keras/src/utils/argument_validation.py,sha256=uRFoLNJu3L2J8CM8L7uXGqhYi7ji8whh0H8nSHuRUXg,2876 +keras/src/utils/audio_dataset_utils.py,sha256=pxg3jOHgZMFhEkuJmCjI-dcrFyv7OlHyWW-49eedKN0,15114 +keras/src/utils/backend_utils.py,sha256=wp9i8Bie9mpkf6qdOAhZZ35-7tjSpgThWrlHcbRT8Xg,4618 +keras/src/utils/code_stats.py,sha256=1h4ifpAH5Jezm8BVrKM_WyzcG9uxrUiyzP1kcS4uqlo,1442 +keras/src/utils/config.py,sha256=3VhENVcng0DeazR-5rvjSnW_sovvOw-skEP-t3xWCEY,4643 +keras/src/utils/dataset_utils.py,sha256=IgVqIdnRf3sYHrLqJa5cigCIZUG1WxwollhNmr_4zDc,28195 +keras/src/utils/dtype_utils.py,sha256=wL_WaWYoDzDDmQW6EQGdpBb9O5QJ9OaEJsvY0Mir4uc,1483 +keras/src/utils/file_utils.py,sha256=fLlsHbcqkLr1xMgA7MmRrmoZwY_tLPUGOSdEzk_VE7w,17267 +keras/src/utils/image_dataset_utils.py,sha256=doL8q0q4DciFnlO-IyKN1v2Emh_gP4sI2rDhgeKL5qs,16964 +keras/src/utils/image_utils.py,sha256=HUI7Zcgqvsmm8a1xwfMwr7pOhnG4lsChP8Owv-xlCTM,16703 +keras/src/utils/io_utils.py,sha256=SreGeSMF3TGts4jaTMbf4yoSBhAgeJw-g2cOMyuePYA,4172 +keras/src/utils/jax_layer.py,sha256=zVHKEh4t4Rr8YseQlY-2EUxAjjVhVrQ2ngOetWIRv8w,26571 +keras/src/utils/jax_utils.py,sha256=vY3P4S9mfWEjdirLd81ocKqeCm-UVfgQ1yTi6UHdBiM,322 +keras/src/utils/model_visualization.py,sha256=JmATPQMI7nrIc32o2jCwm20M0XTivQnQZMFLes3-g00,16331 +keras/src/utils/module_utils.py,sha256=cKwmZaFoy0H0Q5e3WwlmK3X36DHburdBiEqmHkKBoAc,1988 +keras/src/utils/naming.py,sha256=bPowKBlgiVP_6XtVlNVHxrxheKuJy2c0e-oEM8ocZQY,1776 +keras/src/utils/numerical_utils.py,sha256=7XmtN-AFIYhbioLsbOTiHHiJsTrEPpiJpNJpG6GvnDg,7228 +keras/src/utils/progbar.py,sha256=Hud-bqGoixlyilD9NZnmcSOe3fT686Cv9GAUO9gPpvs,10349 +keras/src/utils/python_utils.py,sha256=sOjnW2s5WOkWBVEGgAQDKqhuV8YeOMESjH4VF6zOIio,4697 +keras/src/utils/rng_utils.py,sha256=XCokkeBtb0xDjLkvKsvJoTLoalM3c_tJHfTbysqpNvo,1677 +keras/src/utils/sequence_utils.py,sha256=CveyJ5VM5KJ4pFlo6LWT9omzd_xDeMRjTgczIKekP3Y,4716 +keras/src/utils/summary_utils.py,sha256=jjbTB6NTqMniSWXPKeNY6dvpn-U37WJdwqdfl8uX5nI,15447 +keras/src/utils/text_dataset_utils.py,sha256=JUqDauTec6uRZs71SbKeVjxHx_CNqqOWkoXQ1Q7ldRs,10701 +keras/src/utils/tf_utils.py,sha256=PC6SCcXouR5WjZ_e_MzAgWj1x9-bW4bQBiph6bOKf0c,4931 +keras/src/utils/timeseries_dataset_utils.py,sha256=rVxSuqlYLpzw_dVo8Ym5HSE2jFmndS8MAv4Uewycojo,9842 +keras/src/utils/torch_utils.py,sha256=QQNDA4hw_JVYlXXZH089s0Ev6JBqVGfxSeA7JL3ncFU,5226 +keras/src/utils/traceback_utils.py,sha256=VI8VJ8QjTDc3-cx3xfR9H7g68D2KVH7VknHi_JrVMuU,8997 +keras/src/utils/tracking.py,sha256=mVig-TS5LZbModoyAOnN3msazudKggW62hxUq4XzT2I,8844 +keras/src/version.py,sha256=e0Jbyh9_5DxOOWI-5lPqEq75DpaN7-F_BBuP3wJfygQ,189 +keras/src/visualization/__init__.py,sha256=bDdV3eLKeLKoUwUDBFuZxMO560OyFZND0zBn8vaG6rg,111 +keras/src/visualization/__pycache__/__init__.cpython-310.pyc,, +keras/src/visualization/__pycache__/draw_bounding_boxes.cpython-310.pyc,, +keras/src/visualization/__pycache__/draw_segmentation_masks.cpython-310.pyc,, +keras/src/visualization/__pycache__/plot_bounding_box_gallery.cpython-310.pyc,, +keras/src/visualization/__pycache__/plot_image_gallery.cpython-310.pyc,, +keras/src/visualization/__pycache__/plot_segmentation_mask_gallery.cpython-310.pyc,, +keras/src/visualization/draw_bounding_boxes.py,sha256=Gs7gNburpgwXr8CahiyQgZWhBD5ffVeoUG7kzIFL92g,6649 +keras/src/visualization/draw_segmentation_masks.py,sha256=C9zPIcHgQK8DKPhTvyiE13LTVU11zvIKK6q-YR249Tg,4746 +keras/src/visualization/plot_bounding_box_gallery.py,sha256=RBuNOnXHi0D6HiL7WmBfD1YeUsYunB1cHsusxmPct_s,6355 +keras/src/visualization/plot_image_gallery.py,sha256=JI75R1CquqtfHxWO-s2eHDT1dJi_w-V3lwqLE_PnsRU,5582 +keras/src/visualization/plot_segmentation_mask_gallery.py,sha256=gJnp5VowF7gIyPFuOzU3EBamQpDfpbS6ElqmgWDi4Y8,4335 +keras/src/wrappers/__init__.py,sha256=6QhlmdgtjERTkrI6uxtq9yTyHazeMOCPJVP6XEFskaw,270 +keras/src/wrappers/__pycache__/__init__.cpython-310.pyc,, +keras/src/wrappers/__pycache__/fixes.cpython-310.pyc,, +keras/src/wrappers/__pycache__/sklearn_wrapper.cpython-310.pyc,, +keras/src/wrappers/__pycache__/utils.cpython-310.pyc,, +keras/src/wrappers/fixes.py,sha256=iWAf_DHsvQAvmMXw0fVNECAomZs7wlGL8ckAARh8SsI,2591 +keras/src/wrappers/sklearn_wrapper.py,sha256=dlJp61cvLrY4UQYYvJs8lsChCZBrdO7JYyluroOBfN8,17479 +keras/src/wrappers/utils.py,sha256=UuRxqJhIOMtaTNX8J3FFmPZOTjn9rNyBO5IP9qEB5Qc,2383 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/WHEEL b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..1e3dcb16c30886471a686180684c9274bf795aad --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (75.7.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/top_level.txt b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..14348698da9f972e3043cef442ec02cef48ffdd8 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/top_level.txt @@ -0,0 +1 @@ +keras diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4df10c6c84a5429cf36989bfdc4a2ba708090c64 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/__init__.py @@ -0,0 +1,78 @@ +# DO NOT EDIT. Generated by api_gen.sh +from keras.api import DTypePolicy +from keras.api import FloatDTypePolicy +from keras.api import Function +from keras.api import Initializer +from keras.api import Input +from keras.api import InputSpec +from keras.api import KerasTensor +from keras.api import Layer +from keras.api import Loss +from keras.api import Metric +from keras.api import Model +from keras.api import Operation +from keras.api import Optimizer +from keras.api import Quantizer +from keras.api import Regularizer +from keras.api import Sequential +from keras.api import StatelessScope +from keras.api import SymbolicScope +from keras.api import Variable +from keras.api import __version__ +from keras.api import activations +from keras.api import applications +from keras.api import backend +from keras.api import callbacks +from keras.api import config +from keras.api import constraints +from keras.api import datasets +from keras.api import device +from keras.api import distribution +from keras.api import dtype_policies +from keras.api import export +from keras.api import initializers +from keras.api import layers +from keras.api import legacy +from keras.api import losses +from keras.api import metrics +from keras.api import mixed_precision +from keras.api import models +from keras.api import name_scope +from keras.api import ops +from keras.api import optimizers +from keras.api import preprocessing +from keras.api import quantizers +from keras.api import random +from keras.api import regularizers +from keras.api import saving +from keras.api import tree +from keras.api import utils +from keras.api import version +from keras.api import visualization +from keras.api import wrappers + +# END DO NOT EDIT. + +import os # isort: skip + +# Add everything in /api/ to the module search path. +__path__.append(os.path.join(os.path.dirname(__file__), "api")) # noqa: F405 + +# Don't pollute namespace. +del os + + +# Never autocomplete `.src` or `.api` on an imported keras object. +def __dir__(): + keys = dict.fromkeys((globals().keys())) + keys.pop("src") + keys.pop("api") + return list(keys) + + +# Don't import `.src` or `.api` during `from keras import *`. +__all__ = [ + name + for name in globals().keys() + if not (name.startswith("_") or name in ("src", "api")) +] diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfaebfd7895e3d92aada2d98af58a17987da012b Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9778bcd4d63a55c5cd0f82566a7a9e7041e6e086 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/__init__.py @@ -0,0 +1,20 @@ +from keras.src import activations +from keras.src import applications +from keras.src import backend +from keras.src import constraints +from keras.src import datasets +from keras.src import initializers +from keras.src import layers +from keras.src import models +from keras.src import ops +from keras.src import optimizers +from keras.src import regularizers +from keras.src import utils +from keras.src import visualization +from keras.src.backend import KerasTensor +from keras.src.layers import Input +from keras.src.layers import Layer +from keras.src.models import Functional +from keras.src.models import Model +from keras.src.models import Sequential +from keras.src.version import __version__ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f82023c026e263b2606b596a9eb00aa3cedbf6f Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/__pycache__/api_export.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/__pycache__/api_export.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0533006068c224f2a22d0f13d54fbb9399fd140e Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/__pycache__/api_export.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/__pycache__/version.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9e3c9663e2657f3ab15f1179e2cb2f10efeef59 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/__pycache__/version.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/api_export.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/api_export.py new file mode 100644 index 0000000000000000000000000000000000000000..76d007dc2af0aaae1f93bf8c684d43c2b782ed19 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/api_export.py @@ -0,0 +1,49 @@ +try: + import namex +except ImportError: + namex = None + + +# These dicts reference "canonical names" only +# (i.e. the first name an object was registered with). +REGISTERED_NAMES_TO_OBJS = {} +REGISTERED_OBJS_TO_NAMES = {} + + +def register_internal_serializable(path, symbol): + global REGISTERED_NAMES_TO_OBJS + if isinstance(path, (list, tuple)): + name = path[0] + else: + name = path + REGISTERED_NAMES_TO_OBJS[name] = symbol + REGISTERED_OBJS_TO_NAMES[symbol] = name + + +def get_symbol_from_name(name): + return REGISTERED_NAMES_TO_OBJS.get(name, None) + + +def get_name_from_symbol(symbol): + return REGISTERED_OBJS_TO_NAMES.get(symbol, None) + + +if namex: + + class keras_export(namex.export): + def __init__(self, path): + super().__init__(package="keras", path=path) + + def __call__(self, symbol): + register_internal_serializable(self.path, symbol) + return super().__call__(symbol) + +else: + + class keras_export: + def __init__(self, path): + self.path = path + + def __call__(self, symbol): + register_internal_serializable(self.path, symbol) + return symbol diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/backend_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/backend_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..05e637d444be06760c52c943866674eb6a36c517 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/backend_utils.py @@ -0,0 +1,140 @@ +import copy +import importlib +import os +import sys + +from keras.src import backend as backend_module +from keras.src.api_export import keras_export +from keras.src.backend.common import global_state + + +def in_tf_graph(): + if global_state.get_global_attribute("in_tf_graph_scope", False): + return True + + if "tensorflow" in sys.modules: + from keras.src.utils.module_utils import tensorflow as tf + + return not tf.executing_eagerly() + return False + + +def convert_tf_tensor(outputs, dtype=None): + if backend_module.backend() != "tensorflow" and not in_tf_graph(): + outputs = backend_module.convert_to_tensor(outputs, dtype=dtype) + return outputs + + +class TFGraphScope: + def __init__(self): + self._original_value = global_state.get_global_attribute( + "in_tf_graph_scope", False + ) + + def __enter__(self): + global_state.set_global_attribute("in_tf_graph_scope", True) + + def __exit__(self, *args, **kwargs): + global_state.set_global_attribute( + "in_tf_graph_scope", self._original_value + ) + + +class DynamicBackend: + """A class that can be used to switch from one backend to another. + + Example: + + ```python + backend = DynamicBackend("tensorflow") + y = backend.square(tf.constant(...)) + backend.set_backend("jax") + y = backend.square(jax.numpy.array(...)) + ``` + + Args: + backend: Initial backend to use (string). + """ + + def __init__(self, backend=None): + self._backend = backend or backend_module.backend() + + def set_backend(self, backend): + if backend not in ("tensorflow", "jax", "torch", "numpy", "openvino"): + raise ValueError( + "Available backends are ('tensorflow', 'jax', 'torch', " + f"'numpy' and 'openvino'). Received: backend={backend}" + ) + self._backend = backend + + def reset(self): + self._backend = backend_module.backend() + + @property + def name(self): + return self._backend + + def __getattr__(self, name): + if self._backend == "tensorflow": + module = importlib.import_module("keras.src.backend.tensorflow") + return getattr(module, name) + if self._backend == "jax": + module = importlib.import_module("keras.src.backend.jax") + return getattr(module, name) + if self._backend == "torch": + module = importlib.import_module("keras.src.backend.torch") + return getattr(module, name) + if self._backend == "numpy": + if backend_module.backend() == "numpy": + return getattr(backend_module, name) + else: + raise NotImplementedError( + "Currently, we cannot dynamically import the numpy backend " + "because it would disrupt the namespace of the import." + ) + if self._backend == "openvino": + module = importlib.import_module("keras.src.backend.openvino") + return getattr(module, name) + + +@keras_export("keras.config.set_backend") +def set_backend(backend): + """Reload the backend (and the Keras package). + + Example: + + ```python + keras.config.set_backend("jax") + ``` + + ⚠️ WARNING ⚠️: Using this function is dangerous and should be done + carefully. Changing the backend will **NOT** convert + the type of any already-instantiated objects. + Thus, any layers / tensors / etc. already created will no + longer be usable without errors. It is strongly recommended **not** + to keep around **any** Keras-originated objects instances created + before calling `set_backend()`. + + This includes any function or class instance that uses any Keras + functionality. All such code needs to be re-executed after calling + `set_backend()`. + """ + os.environ["KERAS_BACKEND"] = backend + # Clear module cache. + loaded_modules = [ + key for key in sys.modules.keys() if key.startswith("keras") + ] + for key in loaded_modules: + del sys.modules[key] + # Reimport Keras with the new backend (set via KERAS_BACKEND). + import keras + + # Finally: refresh all imported Keras submodules. + globs = copy.copy(globals()) + for key, value in globs.items(): + if value.__class__ == keras.__class__: + if str(value).startswith("" + + def __iter__(self): + keys = sorted(self._config.keys()) + for k in keys: + yield k + + def __len__(self): + return len(self._config) + + def __delitem__(self, key): + self._raise_if_frozen() + del self._config[key] + + def __contains__(self, item): + return item in self._config diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/dataset_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/dataset_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3eccb6a02eced58fb1250af9fb005211b9d5edc6 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/dataset_utils.py @@ -0,0 +1,763 @@ +import os +import random +import time +import warnings +from multiprocessing.pool import ThreadPool + +import numpy as np + +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.utils import io_utils +from keras.src.utils.module_utils import tensorflow as tf + + +@keras_export("keras.utils.split_dataset") +def split_dataset( + dataset, left_size=None, right_size=None, shuffle=False, seed=None +): + """Splits a dataset into a left half and a right half (e.g. train / test). + + Args: + dataset: + A `tf.data.Dataset`, a `torch.utils.data.Dataset` object, + or a list/tuple of arrays with the same length. + left_size: If float (in the range `[0, 1]`), it signifies + the fraction of the data to pack in the left dataset. If integer, it + signifies the number of samples to pack in the left dataset. If + `None`, defaults to the complement to `right_size`. + Defaults to `None`. + right_size: If float (in the range `[0, 1]`), it signifies + the fraction of the data to pack in the right dataset. + If integer, it signifies the number of samples to pack + in the right dataset. + If `None`, defaults to the complement to `left_size`. + Defaults to `None`. + shuffle: Boolean, whether to shuffle the data before splitting it. + seed: A random seed for shuffling. + + Returns: + A tuple of two `tf.data.Dataset` objects: + the left and right splits. + + Example: + + >>> data = np.random.random(size=(1000, 4)) + >>> left_ds, right_ds = keras.utils.split_dataset(data, left_size=0.8) + >>> int(left_ds.cardinality()) + 800 + >>> int(right_ds.cardinality()) + 200 + """ + dataset_type_spec = _get_type_spec(dataset) + + if dataset_type_spec is None: + raise TypeError( + "The `dataset` argument must be either" + "a `tf.data.Dataset`, a `torch.utils.data.Dataset`" + "object, or a list/tuple of arrays. " + f"Received: dataset={dataset} of type {type(dataset)}" + ) + + if right_size is None and left_size is None: + raise ValueError( + "At least one of the `left_size` or `right_size` " + "must be specified. Received: left_size=None and " + "right_size=None" + ) + + dataset_as_list = _convert_dataset_to_list(dataset, dataset_type_spec) + + if shuffle: + if seed is None: + seed = random.randint(0, int(1e6)) + random.seed(seed) + random.shuffle(dataset_as_list) + + total_length = len(dataset_as_list) + + left_size, right_size = _rescale_dataset_split_sizes( + left_size, right_size, total_length + ) + left_split = list(dataset_as_list[:left_size]) + right_split = list(dataset_as_list[-right_size:]) + + left_split = _restore_dataset_from_list( + left_split, dataset_type_spec, dataset + ) + right_split = _restore_dataset_from_list( + right_split, dataset_type_spec, dataset + ) + + left_split = tf.data.Dataset.from_tensor_slices(left_split) + right_split = tf.data.Dataset.from_tensor_slices(right_split) + + # apply batching to the splits if the dataset is batched + if dataset_type_spec is tf.data.Dataset and is_batched(dataset): + batch_size = get_batch_size(dataset) + if batch_size is not None: + left_split = left_split.batch(batch_size) + right_split = right_split.batch(batch_size) + + left_split = left_split.prefetch(tf.data.AUTOTUNE) + right_split = right_split.prefetch(tf.data.AUTOTUNE) + return left_split, right_split + + +def _convert_dataset_to_list( + dataset, + dataset_type_spec, + data_size_warning_flag=True, + ensure_shape_similarity=True, +): + """Convert `dataset` object to a list of samples. + + Args: + dataset: A `tf.data.Dataset`, a `torch.utils.data.Dataset` object, + or a list/tuple of arrays. + dataset_type_spec: the type of the dataset. + data_size_warning_flag: If set to `True`, a warning will + be issued if the dataset takes longer than 10 seconds to iterate. + Defaults to `True`. + ensure_shape_similarity: If set to `True`, the shape of + the first sample will be used to validate the shape of rest of the + samples. Defaults to `True`. + + Returns: + List: A list of samples. + """ + dataset_iterator = _get_data_iterator_from_dataset( + dataset, dataset_type_spec + ) + dataset_as_list = [] + + start_time = time.time() + for sample in _get_next_sample( + dataset_iterator, + ensure_shape_similarity, + data_size_warning_flag, + start_time, + ): + dataset_as_list.append(sample) + + return dataset_as_list + + +def _get_data_iterator_from_dataset(dataset, dataset_type_spec): + """Get the iterator from a dataset. + + Args: + dataset: A `tf.data.Dataset`, a `torch.utils.data.Dataset` object, + or a list/tuple of arrays. + dataset_type_spec: The type of the dataset. + + Returns: + iterator: An `iterator` object. + """ + if dataset_type_spec is list: + if len(dataset) == 0: + raise ValueError( + "Received an empty list dataset. " + "Please provide a non-empty list of arrays." + ) + + expected_shape = None + for i, element in enumerate(dataset): + if not isinstance(element, np.ndarray): + raise ValueError( + "Expected a list of `numpy.ndarray` objects," + f"Received: {type(element)} at index {i}." + ) + if expected_shape is None: + expected_shape = element.shape + elif element.shape[0] != expected_shape[0]: + raise ValueError( + "Received a list of NumPy arrays with different lengths." + f"Mismatch found at index {i}, " + f"Expected shape={expected_shape} " + f"Received shape={np.array(element).shape}." + "Please provide a list of NumPy arrays of the same length." + ) + + return iter(zip(*dataset)) + elif dataset_type_spec is tuple: + if len(dataset) == 0: + raise ValueError( + "Received an empty list dataset." + "Please provide a non-empty tuple of arrays." + ) + + expected_shape = None + for i, element in enumerate(dataset): + if not isinstance(element, np.ndarray): + raise ValueError( + "Expected a tuple of `numpy.ndarray` objects," + f"Received: {type(element)} at index {i}." + ) + if expected_shape is None: + expected_shape = element.shape + elif element.shape[0] != expected_shape[0]: + raise ValueError( + "Received a tuple of NumPy arrays with different lengths." + f"Mismatch found at index {i}, " + f"Expected shape={expected_shape} " + f"Received shape={np.array(element).shape}." + "Please provide a tuple of NumPy arrays of the same length." + ) + + return iter(zip(*dataset)) + elif dataset_type_spec is tf.data.Dataset: + if is_batched(dataset): + dataset = dataset.unbatch() + return iter(dataset) + + elif is_torch_dataset(dataset): + return iter(dataset) + elif dataset_type_spec is np.ndarray: + return iter(dataset) + raise ValueError(f"Invalid dataset_type_spec: {dataset_type_spec}") + + +def _get_next_sample( + dataset_iterator, + ensure_shape_similarity, + data_size_warning_flag, + start_time, +): + """Yield data samples from the `dataset_iterator`. + + Args: + dataset_iterator: An `iterator` object. + ensure_shape_similarity: If set to `True`, the shape of + the first sample will be used to validate the shape of rest of the + samples. Defaults to `True`. + data_size_warning_flag: If set to `True`, a warning will + be issued if the dataset takes longer than 10 seconds to iterate. + Defaults to `True`. + start_time (float): the start time of the dataset iteration. this is + used only if `data_size_warning_flag` is set to true. + + Yields: + data_sample: The next sample. + """ + from keras.src.trainers.data_adapters.data_adapter_utils import ( + is_torch_tensor, + ) + + try: + dataset_iterator = iter(dataset_iterator) + first_sample = next(dataset_iterator) + if isinstance(first_sample, (tf.Tensor, np.ndarray)) or is_torch_tensor( + first_sample + ): + first_sample_shape = np.array(first_sample).shape + else: + first_sample_shape = None + ensure_shape_similarity = False + yield first_sample + except StopIteration: + raise ValueError( + "Received an empty dataset. Argument `dataset` must " + "be a non-empty list/tuple of `numpy.ndarray` objects " + "or `tf.data.Dataset` objects." + ) + + for i, sample in enumerate(dataset_iterator): + if ensure_shape_similarity: + if first_sample_shape != np.array(sample).shape: + raise ValueError( + "All `dataset` samples must have same shape, " + f"Expected shape: {np.array(first_sample).shape} " + f"Received shape: {np.array(sample).shape} at index " + f"{i}." + ) + if data_size_warning_flag: + if i % 10 == 0: + cur_time = time.time() + # warns user if the dataset is too large to iterate within 10s + if int(cur_time - start_time) > 10 and data_size_warning_flag: + warnings.warn( + "The dataset is taking longer than 10 seconds to " + "iterate over. This may be due to the size of the " + "dataset. Keep in mind that the `split_dataset` " + "utility is only for small in-memory dataset " + "(e.g. < 10,000 samples).", + category=ResourceWarning, + source="split_dataset", + ) + data_size_warning_flag = False + yield sample + + +def is_torch_dataset(dataset): + if hasattr(dataset, "__class__"): + for parent in dataset.__class__.__mro__: + if parent.__name__ == "Dataset" and str( + parent.__module__ + ).startswith("torch.utils.data"): + return True + return False + + +def _rescale_dataset_split_sizes(left_size, right_size, total_length): + """Rescale the dataset split sizes. + + We want to ensure that the sum of + the split sizes is equal to the total length of the dataset. + + Args: + left_size: The size of the left dataset split. + right_size: The size of the right dataset split. + total_length: The total length of the dataset. + + Returns: + tuple: A tuple of rescaled `left_size` and `right_size` integers. + """ + left_size_type = type(left_size) + right_size_type = type(right_size) + + # check both left_size and right_size are integers or floats + if (left_size is not None and left_size_type not in [int, float]) and ( + right_size is not None and right_size_type not in [int, float] + ): + raise TypeError( + "Invalid `left_size` and `right_size` Types. Expected: " + "integer or float or None, Received: type(left_size)=" + f"{left_size_type} and type(right_size)={right_size_type}" + ) + + # check left_size is a integer or float + if left_size is not None and left_size_type not in [int, float]: + raise TypeError( + "Invalid `left_size` Type. Expected: int or float or None, " + f"Received: type(left_size)={left_size_type}. " + ) + + # check right_size is a integer or float + if right_size is not None and right_size_type not in [int, float]: + raise TypeError( + "Invalid `right_size` Type. " + "Expected: int or float or None," + f"Received: type(right_size)={right_size_type}." + ) + + # check left_size and right_size are non-zero + if left_size == 0 and right_size == 0: + raise ValueError( + "Both `left_size` and `right_size` are zero. " + "At least one of the split sizes must be non-zero." + ) + + # check left_size is non-negative and less than 1 and less than total_length + if ( + left_size_type is int + and (left_size <= 0 or left_size >= total_length) + or left_size_type is float + and (left_size <= 0 or left_size >= 1) + ): + raise ValueError( + "`left_size` should be either a positive integer " + f"smaller than {total_length}, or a float " + "within the range `[0, 1]`. Received: left_size=" + f"{left_size}" + ) + + # check right_size is non-negative and less than 1 and less than + # total_length + if ( + right_size_type is int + and (right_size <= 0 or right_size >= total_length) + or right_size_type is float + and (right_size <= 0 or right_size >= 1) + ): + raise ValueError( + "`right_size` should be either a positive integer " + f"and smaller than {total_length} or a float " + "within the range `[0, 1]`. Received: right_size=" + f"{right_size}" + ) + + # check sum of left_size and right_size is less than or equal to + # total_length + if ( + right_size_type is left_size_type is float + and right_size + left_size > 1 + ): + raise ValueError( + "The sum of `left_size` and `right_size` is greater " + "than 1. It must be less than or equal to 1." + ) + + if left_size_type is float: + left_size = round(left_size * total_length) + elif left_size_type is int: + left_size = float(left_size) + + if right_size_type is float: + right_size = round(right_size * total_length) + elif right_size_type is int: + right_size = float(right_size) + + if left_size is None: + left_size = total_length - right_size + elif right_size is None: + right_size = total_length - left_size + + if left_size + right_size > total_length: + raise ValueError( + "The sum of `left_size` and `right_size` should " + "be smaller than the {total_length}. " + f"Received: left_size + right_size = {left_size+right_size}" + f"and total_length = {total_length}" + ) + + for split, side in [(left_size, "left"), (right_size, "right")]: + if split == 0: + raise ValueError( + f"With `dataset` of length={total_length}, `left_size`=" + f"{left_size} and `right_size`={right_size}." + f"Resulting {side} side dataset split will be empty. " + "Adjust any of the aforementioned parameters" + ) + + left_size, right_size = int(left_size), int(right_size) + return left_size, right_size + + +def _restore_dataset_from_list( + dataset_as_list, dataset_type_spec, original_dataset +): + """Restore the dataset from the list of arrays.""" + if dataset_type_spec in [tuple, list, tf.data.Dataset] or is_torch_dataset( + original_dataset + ): + # Save structure by taking the first element. + element_spec = dataset_as_list[0] + # Flatten each element. + dataset_as_list = [tree.flatten(sample) for sample in dataset_as_list] + # Combine respective elements at all indices. + dataset_as_list = [np.array(sample) for sample in zip(*dataset_as_list)] + # Recreate the original structure of elements. + dataset_as_list = tree.pack_sequence_as(element_spec, dataset_as_list) + # Turn lists to tuples as tf.data will fail on lists. + return tree.traverse( + lambda x: tuple(x) if isinstance(x, list) else x, + dataset_as_list, + top_down=False, + ) + + return dataset_as_list + + +def is_batched(dataset): + """Check if the `tf.data.Dataset` is batched.""" + return hasattr(dataset, "_batch_size") + + +def get_batch_size(dataset): + """Get the batch size of the dataset.""" + if is_batched(dataset): + return dataset._batch_size + else: + return None + + +def _get_type_spec(dataset): + """Get the type spec of the dataset.""" + if isinstance(dataset, tuple): + return tuple + elif isinstance(dataset, list): + return list + elif isinstance(dataset, np.ndarray): + return np.ndarray + elif isinstance(dataset, tf.data.Dataset): + return tf.data.Dataset + elif is_torch_dataset(dataset): + from torch.utils.data import Dataset as TorchDataset + + return TorchDataset + else: + return None + + +def index_directory( + directory, + labels, + formats, + class_names=None, + shuffle=True, + seed=None, + follow_links=False, + verbose=True, +): + """List all files in `directory`, with their labels. + + Args: + directory: Directory where the data is located. + If `labels` is `"inferred"`, it should contain + subdirectories, each containing files for a class. + Otherwise, the directory structure is ignored. + labels: Either `"inferred"` + (labels are generated from the directory structure), + `None` (no labels), + or a list/tuple of integer labels of the same size as the number + of valid files found in the directory. + Labels should be sorted according + to the alphanumeric order of the image file paths + (obtained via `os.walk(directory)` in Python). + formats: Allowlist of file extensions to index + (e.g. `".jpg"`, `".txt"`). + class_names: Only valid if `labels="inferred"`. This is the explicit + list of class names (must match names of subdirectories). Used + to control the order of the classes + (otherwise alphanumerical order is used). + shuffle: Whether to shuffle the data. Defaults to `True`. + If set to `False`, sorts the data in alphanumeric order. + seed: Optional random seed for shuffling. + follow_links: Whether to visits subdirectories pointed to by symlinks. + verbose: Whether the function prints number of files found and classes. + Defaults to `True`. + + Returns: + tuple (file_paths, labels, class_names). + - file_paths: list of file paths (strings). + - labels: list of matching integer labels (same length as file_paths) + - class_names: names of the classes corresponding to these labels, in + order. + """ + if labels == "inferred": + subdirs = [] + for subdir in sorted(tf.io.gfile.listdir(directory)): + if tf.io.gfile.isdir(tf.io.gfile.join(directory, subdir)): + if not subdir.startswith("."): + if subdir.endswith("/"): + subdir = subdir[:-1] + subdirs.append(subdir) + if class_names is not None: + if not set(class_names).issubset(set(subdirs)): + raise ValueError( + "The `class_names` passed did not match the " + "names of the subdirectories of the target directory. " + f"Expected: {subdirs} (or a subset of it), " + f"but received: class_names={class_names}" + ) + subdirs = class_names # Keep provided order. + else: + # In the explicit/no-label cases, index from the parent directory down. + subdirs = [""] + if class_names is not None: + if labels is None: + raise ValueError( + "When `labels=None` (no labels), argument `class_names` " + "cannot be specified." + ) + else: + raise ValueError( + "When argument `labels` is specified, argument " + "`class_names` cannot be specified (the `class_names` " + "will be the sorted list of labels)." + ) + class_names = subdirs + class_indices = dict(zip(class_names, range(len(class_names)))) + + # Build an index of the files + # in the different class subfolders. + pool = ThreadPool() + results = [] + filenames = [] + + for dirpath in (tf.io.gfile.join(directory, subdir) for subdir in subdirs): + results.append( + pool.apply_async( + index_subdirectory, + (dirpath, class_indices, follow_links, formats), + ) + ) + labels_list = [] + for res in results: + partial_filenames, partial_labels = res.get() + labels_list.append(partial_labels) + filenames += partial_filenames + + if labels == "inferred": + # Inferred labels. + i = 0 + labels = np.zeros((len(filenames),), dtype="int32") + for partial_labels in labels_list: + labels[i : i + len(partial_labels)] = partial_labels + i += len(partial_labels) + elif labels is None: + class_names = None + else: + # Manual labels. + if len(labels) != len(filenames): + raise ValueError( + "Expected the lengths of `labels` to match the number " + "of files in the target directory. len(labels) is " + f"{len(labels)} while we found {len(filenames)} files " + f"in directory {directory}." + ) + class_names = [str(label) for label in sorted(set(labels))] + if verbose: + if labels is None: + io_utils.print_msg(f"Found {len(filenames)} files.") + else: + io_utils.print_msg( + f"Found {len(filenames)} files belonging " + f"to {len(class_names)} classes." + ) + pool.close() + pool.join() + file_paths = [tf.io.gfile.join(directory, fname) for fname in filenames] + + if shuffle: + # Shuffle globally to erase macro-structure + if seed is None: + seed = np.random.randint(1e6) + rng = np.random.RandomState(seed) + rng.shuffle(file_paths) + if labels is not None: + rng = np.random.RandomState(seed) + rng.shuffle(labels) + return file_paths, labels, class_names + + +def iter_valid_files(directory, follow_links, formats): + if not follow_links: + walk = tf.io.gfile.walk(directory) + else: + walk = os.walk(directory, followlinks=follow_links) + for root, _, files in sorted(walk, key=lambda x: x[0]): + for fname in sorted(files): + if fname.lower().endswith(formats): + yield root, fname + + +def index_subdirectory(directory, class_indices, follow_links, formats): + """Recursively walks directory and list image paths and their class index. + + Args: + directory: string, target directory. + class_indices: dict mapping class names to their index. + follow_links: boolean, whether to recursively follow subdirectories + (if False, we only list top-level images in `directory`). + formats: Allowlist of file extensions to index (e.g. ".jpg", ".txt"). + + Returns: + tuple `(filenames, labels)`. `filenames` is a list of relative file + paths, and `labels` is a list of integer labels corresponding + to these files. + """ + dirname = os.path.basename(directory) + valid_files = iter_valid_files(directory, follow_links, formats) + labels = [] + filenames = [] + for root, fname in valid_files: + labels.append(class_indices[dirname]) + absolute_path = tf.io.gfile.join(root, fname) + relative_path = tf.io.gfile.join( + dirname, os.path.relpath(absolute_path, directory) + ) + filenames.append(relative_path) + return filenames, labels + + +def get_training_or_validation_split(samples, labels, validation_split, subset): + """Potentially restrict samples & labels to a training or validation split. + + Args: + samples: List of elements. + labels: List of corresponding labels. + validation_split: Float, fraction of data to reserve for validation. + subset: Subset of the data to return. + Either `"training"`, `"validation"`, or `None`. + If `None`, we return all of the data. + + Returns: + tuple (samples, labels), potentially restricted to the specified subset. + """ + if not validation_split: + return samples, labels + + num_val_samples = int(validation_split * len(samples)) + if subset == "training": + io_utils.print_msg( + f"Using {len(samples) - num_val_samples} " f"files for training." + ) + samples = samples[:-num_val_samples] + if labels is not None: + labels = labels[:-num_val_samples] + elif subset == "validation": + io_utils.print_msg(f"Using {num_val_samples} files for validation.") + samples = samples[-num_val_samples:] + if labels is not None: + labels = labels[-num_val_samples:] + else: + raise ValueError( + '`subset` must be either "training" ' + f'or "validation", received: {subset}' + ) + return samples, labels + + +def labels_to_dataset(labels, label_mode, num_classes): + """Create a `tf.data.Dataset` from the list/tuple of labels. + + Args: + labels: list/tuple of labels to be converted into a `tf.data.Dataset`. + label_mode: String describing the encoding of `labels`. Options are: + - `"binary"` indicates that the labels (there can be only 2) are encoded + as `float32` scalars with values 0 or 1 + (e.g. for `binary_crossentropy`). + - `"categorical"` means that the labels are mapped into a categorical + vector. (e.g. for `categorical_crossentropy` loss). + num_classes: number of classes of labels. + + Returns: + A `tf.data.Dataset` instance. + """ + label_ds = tf.data.Dataset.from_tensor_slices(labels) + if label_mode == "binary": + label_ds = label_ds.map( + lambda x: tf.expand_dims(tf.cast(x, "float32"), axis=-1), + num_parallel_calls=tf.data.AUTOTUNE, + ) + elif label_mode == "categorical": + label_ds = label_ds.map( + lambda x: tf.one_hot(x, num_classes), + num_parallel_calls=tf.data.AUTOTUNE, + ) + return label_ds + + +def check_validation_split_arg(validation_split, subset, shuffle, seed): + """Raise errors in case of invalid argument values. + + Args: + validation_split: float between 0 and 1, fraction of data to reserve for + validation. + subset: One of `"training"`, `"validation"`, or `"both"`. Only used if + `validation_split` is set. + shuffle: Whether to shuffle the data. Either `True` or `False`. + seed: random seed for shuffling and transformations. + """ + if validation_split and not 0 < validation_split < 1: + raise ValueError( + "`validation_split` must be between 0 and 1, " + f"received: {validation_split}" + ) + if (validation_split or subset) and not (validation_split and subset): + raise ValueError( + "If `subset` is set, `validation_split` must be set, and inversely." + ) + if subset not in ("training", "validation", "both", None): + raise ValueError( + '`subset` must be either "training", ' + f'"validation" or "both", received: {subset}' + ) + if validation_split and shuffle and seed is None: + raise ValueError( + "If using `validation_split` and shuffling the data, you must " + "provide a `seed` argument, to make sure that there is no " + "overlap between the training and validation subset." + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/dtype_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/dtype_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..44ac7d4f65a3f0a75c04a9a245ac6ce1dabc61fc --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/dtype_utils.py @@ -0,0 +1,51 @@ +from keras.src import backend +from keras.src import ops + +DTYPE_TO_SIZE = { + **{f"float{i}": i for i in (16, 32, 64)}, + **{f"int{i}": i for i in (8, 16, 32, 64)}, + **{f"uint{i}": i for i in (8, 16, 32, 64)}, + "bfloat16": 16, + "bool": 1, +} + + +def dtype_size(dtype): + size = DTYPE_TO_SIZE.get(dtype, None) + if size is None: + raise ValueError(f"Invalid dtype: {dtype}") + return size + + +def is_float(dtype): + return "float" in dtype + + +def cast_to_common_dtype(tensors): + """Cast a list of tensors to a common dtype. + + If any tensor is floating-point, they will all be casted to the most-precise + floating-point dtype. Otherwise the tensors are not casted. + + Args: + tensors: A list of tensors. + + Returns: + Same list, casted to a common dtype. + """ + highest_float = None + highest_float_size = ( + -1 + ) # Initially set to an impossible value for comparison + for x in tensors: + dtype = backend.standardize_dtype(x.dtype) + if is_float(dtype): + if highest_float is None or dtype_size(dtype) > highest_float_size: + highest_float = dtype + highest_float_size = dtype_size(dtype) + elif dtype == "float16" and highest_float == "bfloat16": + highest_float = "float32" + highest_float_size = dtype_size(highest_float) + if highest_float: + tensors = [ops.cast(x, highest_float) for x in tensors] + return tensors diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/file_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/file_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7725c19c7ccaa617815b4ee4ceb1c843e58403b7 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/file_utils.py @@ -0,0 +1,518 @@ +import hashlib +import os +import re +import shutil +import tarfile +import urllib +import warnings +import zipfile +from urllib.request import urlretrieve + +from keras.src.api_export import keras_export +from keras.src.backend import config +from keras.src.utils import io_utils +from keras.src.utils.module_utils import gfile +from keras.src.utils.progbar import Progbar + + +def path_to_string(path): + """Convert `PathLike` objects to their string representation. + + If given a non-string typed path object, converts it to its string + representation. + + If the object passed to `path` is not among the above, then it is + returned unchanged. This allows e.g. passthrough of file objects + through this function. + + Args: + path: `PathLike` object that represents a path + + Returns: + A string representation of the path argument, if Python support exists. + """ + if isinstance(path, os.PathLike): + return os.fspath(path) + return path + + +def resolve_path(path): + return os.path.realpath(os.path.abspath(path)) + + +def is_path_in_dir(path, base_dir): + return resolve_path(os.path.join(base_dir, path)).startswith(base_dir) + + +def is_link_in_dir(info, base): + tip = resolve_path(os.path.join(base, os.path.dirname(info.name))) + return is_path_in_dir(info.linkname, base_dir=tip) + + +def filter_safe_paths(members): + base_dir = resolve_path(".") + for finfo in members: + valid_path = False + if is_path_in_dir(finfo.name, base_dir): + valid_path = True + yield finfo + elif finfo.issym() or finfo.islnk(): + if is_link_in_dir(finfo, base_dir): + valid_path = True + yield finfo + if not valid_path: + warnings.warn( + "Skipping invalid path during archive extraction: " + f"'{finfo.name}'.", + stacklevel=2, + ) + + +def extract_archive(file_path, path=".", archive_format="auto"): + """Extracts an archive if it matches a support format. + + Supports `.tar`, `.tar.gz`, `.tar.bz`, and `.zip` formats. + + Args: + file_path: Path to the archive file. + path: Where to extract the archive file. + archive_format: Archive format to try for extracting the file. + Options are `"auto"`, `"tar"`, `"zip"`, and `None`. + `"tar"` includes `.tar`, `.tar.gz`, and `.tar.bz` files. + The default `"auto"` uses `["tar", "zip"]`. + `None` or an empty list will return no matches found. + + Returns: + `True` if a match was found and an archive extraction was completed, + `False` otherwise. + """ + if archive_format is None: + return False + if archive_format == "auto": + archive_format = ["tar", "zip"] + if isinstance(archive_format, str): + archive_format = [archive_format] + + file_path = path_to_string(file_path) + path = path_to_string(path) + + for archive_type in archive_format: + if archive_type == "tar": + open_fn = tarfile.open + is_match_fn = tarfile.is_tarfile + elif archive_type == "zip": + open_fn = zipfile.ZipFile + is_match_fn = zipfile.is_zipfile + else: + raise NotImplementedError(archive_type) + + if is_match_fn(file_path): + with open_fn(file_path) as archive: + try: + if zipfile.is_zipfile(file_path): + # Zip archive. + archive.extractall(path) + else: + # Tar archive, perhaps unsafe. Filter paths. + archive.extractall( + path, members=filter_safe_paths(archive) + ) + except (tarfile.TarError, RuntimeError, KeyboardInterrupt): + if os.path.exists(path): + if os.path.isfile(path): + os.remove(path) + else: + shutil.rmtree(path) + raise + return True + return False + + +@keras_export("keras.utils.get_file") +def get_file( + fname=None, + origin=None, + untar=False, + md5_hash=None, + file_hash=None, + cache_subdir="datasets", + hash_algorithm="auto", + extract=False, + archive_format="auto", + cache_dir=None, + force_download=False, +): + """Downloads a file from a URL if it not already in the cache. + + By default the file at the url `origin` is downloaded to the + cache_dir `~/.keras`, placed in the cache_subdir `datasets`, + and given the filename `fname`. The final location of a file + `example.txt` would therefore be `~/.keras/datasets/example.txt`. + Files in `.tar`, `.tar.gz`, `.tar.bz`, and `.zip` formats can + also be extracted. + + Passing a hash will verify the file after download. The command line + programs `shasum` and `sha256sum` can compute the hash. + + Example: + + ```python + path_to_downloaded_file = get_file( + origin="https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz", + extract=True, + ) + ``` + + Args: + fname: If the target is a single file, this is your desired + local name for the file. + If `None`, the name of the file at `origin` will be used. + If downloading and extracting a directory archive, + the provided `fname` will be used as extraction directory + name (only if it doesn't have an extension). + origin: Original URL of the file. + untar: Deprecated in favor of `extract` argument. + Boolean, whether the file is a tar archive that should + be extracted. + md5_hash: Deprecated in favor of `file_hash` argument. + md5 hash of the file for file integrity verification. + file_hash: The expected hash string of the file after download. + The sha256 and md5 hash algorithms are both supported. + cache_subdir: Subdirectory under the Keras cache dir where the file is + saved. If an absolute path, e.g. `"/path/to/folder"` is + specified, the file will be saved at that location. + hash_algorithm: Select the hash algorithm to verify the file. + options are `"md5'`, `"sha256'`, and `"auto'`. + The default 'auto' detects the hash algorithm in use. + extract: If `True`, extracts the archive. Only applicable to compressed + archive files like tar or zip. + archive_format: Archive format to try for extracting the file. + Options are `"auto'`, `"tar'`, `"zip'`, and `None`. + `"tar"` includes tar, tar.gz, and tar.bz files. + The default `"auto"` corresponds to `["tar", "zip"]`. + None or an empty list will return no matches found. + cache_dir: Location to store cached files, when None it + defaults ether `$KERAS_HOME` if the `KERAS_HOME` environment + variable is set or `~/.keras/`. + force_download: If `True`, the file will always be re-downloaded + regardless of the cache state. + + Returns: + Path to the downloaded file. + + **⚠️ Warning on malicious downloads ⚠️** + + Downloading something from the Internet carries a risk. + NEVER download a file/archive if you do not trust the source. + We recommend that you specify the `file_hash` argument + (if the hash of the source file is known) to make sure that the file you + are getting is the one you expect. + """ + if origin is None: + raise ValueError( + 'Please specify the "origin" argument (URL of the file ' + "to download)." + ) + + if cache_dir is None: + cache_dir = config.keras_home() + if md5_hash is not None and file_hash is None: + file_hash = md5_hash + hash_algorithm = "md5" + datadir_base = os.path.expanduser(cache_dir) + if not os.access(datadir_base, os.W_OK): + datadir_base = os.path.join("/tmp", ".keras") + datadir = os.path.join(datadir_base, cache_subdir) + os.makedirs(datadir, exist_ok=True) + + provided_fname = fname + fname = path_to_string(fname) + + if not fname: + fname = os.path.basename(urllib.parse.urlsplit(origin).path) + if not fname: + raise ValueError( + "Can't parse the file name from the origin provided: " + f"'{origin}'." + "Please specify the `fname` argument." + ) + else: + if os.sep in fname: + raise ValueError( + "Paths are no longer accepted as the `fname` argument. " + "To specify the file's parent directory, use " + f"the `cache_dir` argument. Received: fname={fname}" + ) + + if extract or untar: + if provided_fname: + if "." in fname: + download_target = os.path.join(datadir, fname) + fname = fname[: fname.find(".")] + extraction_dir = os.path.join(datadir, fname + "_extracted") + else: + extraction_dir = os.path.join(datadir, fname) + download_target = os.path.join(datadir, fname + "_archive") + else: + extraction_dir = os.path.join(datadir, fname) + download_target = os.path.join(datadir, fname + "_archive") + else: + download_target = os.path.join(datadir, fname) + + if force_download: + download = True + elif os.path.exists(download_target): + # File found in cache. + download = False + # Verify integrity if a hash was provided. + if file_hash is not None: + if not validate_file( + download_target, file_hash, algorithm=hash_algorithm + ): + io_utils.print_msg( + "A local file was found, but it seems to be " + f"incomplete or outdated because the {hash_algorithm} " + "file hash does not match the original value of " + f"{file_hash} so we will re-download the data." + ) + download = True + else: + download = True + + if download: + io_utils.print_msg(f"Downloading data from {origin}") + + class DLProgbar: + """Manage progress bar state for use in urlretrieve.""" + + def __init__(self): + self.progbar = None + self.finished = False + + def __call__(self, block_num, block_size, total_size): + if total_size == -1: + total_size = None + if not self.progbar: + self.progbar = Progbar(total_size) + current = block_num * block_size + + if total_size is None: + self.progbar.update(current) + else: + if current < total_size: + self.progbar.update(current) + elif not self.finished: + self.progbar.update(self.progbar.target) + self.finished = True + + error_msg = "URL fetch failure on {}: {} -- {}" + try: + try: + urlretrieve(origin, download_target, DLProgbar()) + except urllib.error.HTTPError as e: + raise Exception(error_msg.format(origin, e.code, e.msg)) + except urllib.error.URLError as e: + raise Exception(error_msg.format(origin, e.errno, e.reason)) + except (Exception, KeyboardInterrupt): + if os.path.exists(download_target): + os.remove(download_target) + raise + + # Validate download if succeeded and user provided an expected hash + # Security conscious users would get the hash of the file from a + # separate channel and pass it to this API to prevent MITM / corruption: + if os.path.exists(download_target) and file_hash is not None: + if not validate_file( + download_target, file_hash, algorithm=hash_algorithm + ): + raise ValueError( + "Incomplete or corrupted file detected. " + f"The {hash_algorithm} " + "file hash does not match the provided value " + f"of {file_hash}." + ) + + if extract or untar: + if untar: + archive_format = "tar" + + status = extract_archive( + download_target, extraction_dir, archive_format + ) + if not status: + warnings.warn("Could not extract archive.", stacklevel=2) + return extraction_dir + + return download_target + + +def resolve_hasher(algorithm, file_hash=None): + """Returns hash algorithm as hashlib function.""" + if algorithm == "sha256": + return hashlib.sha256() + + if algorithm == "auto" and file_hash is not None and len(file_hash) == 64: + return hashlib.sha256() + + # This is used only for legacy purposes. + return hashlib.md5() + + +def hash_file(fpath, algorithm="sha256", chunk_size=65535): + """Calculates a file sha256 or md5 hash. + + Example: + + >>> hash_file('/path/to/file.zip') + 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + + Args: + fpath: Path to the file being validated. + algorithm: Hash algorithm, one of `"auto"`, `"sha256"`, or `"md5"`. + The default `"auto"` detects the hash algorithm in use. + chunk_size: Bytes to read at a time, important for large files. + + Returns: + The file hash. + """ + if isinstance(algorithm, str): + hasher = resolve_hasher(algorithm) + else: + hasher = algorithm + + with open(fpath, "rb") as fpath_file: + for chunk in iter(lambda: fpath_file.read(chunk_size), b""): + hasher.update(chunk) + + return hasher.hexdigest() + + +def validate_file(fpath, file_hash, algorithm="auto", chunk_size=65535): + """Validates a file against a sha256 or md5 hash. + + Args: + fpath: path to the file being validated + file_hash: The expected hash string of the file. + The sha256 and md5 hash algorithms are both supported. + algorithm: Hash algorithm, one of `"auto"`, `"sha256"`, or `"md5"`. + The default `"auto"` detects the hash algorithm in use. + chunk_size: Bytes to read at a time, important for large files. + + Returns: + Boolean, whether the file is valid. + """ + hasher = resolve_hasher(algorithm, file_hash) + + if str(hash_file(fpath, hasher, chunk_size)) == str(file_hash): + return True + else: + return False + + +def is_remote_path(filepath): + """ + Determines if a given filepath indicates a remote location. + + This function checks if the filepath represents a known remote pattern + such as GCS (`/gcs`), CNS (`/cns`), CFS (`/cfs`), HDFS (`/hdfs`) + + Args: + filepath (str): The path to be checked. + + Returns: + bool: True if the filepath is a recognized remote path, otherwise False + """ + if re.match(r"^(/cns|/cfs|/gcs|/hdfs|/readahead|.*://).*$", str(filepath)): + return True + return False + + +# Below are gfile-replacement utils. + + +def _raise_if_no_gfile(path): + raise ValueError( + "Handling remote paths requires installing TensorFlow " + f"(in order to use gfile). Received path: {path}" + ) + + +def exists(path): + if is_remote_path(path): + if gfile.available: + return gfile.exists(path) + else: + _raise_if_no_gfile(path) + return os.path.exists(path) + + +def File(path, mode="r"): + if is_remote_path(path): + if gfile.available: + return gfile.GFile(path, mode=mode) + else: + _raise_if_no_gfile(path) + return open(path, mode=mode) + + +def join(path, *paths): + if is_remote_path(path): + if gfile.available: + return gfile.join(path, *paths) + else: + _raise_if_no_gfile(path) + return os.path.join(path, *paths) + + +def isdir(path): + if is_remote_path(path): + if gfile.available: + return gfile.isdir(path) + else: + _raise_if_no_gfile(path) + return os.path.isdir(path) + + +def remove(path): + if is_remote_path(path): + if gfile.available: + return gfile.remove(path) + else: + _raise_if_no_gfile(path) + return os.remove(path) + + +def rmtree(path): + if is_remote_path(path): + if gfile.available: + return gfile.rmtree(path) + else: + _raise_if_no_gfile(path) + return shutil.rmtree(path) + + +def listdir(path): + if is_remote_path(path): + if gfile.available: + return gfile.listdir(path) + else: + _raise_if_no_gfile(path) + return os.listdir(path) + + +def copy(src, dst): + if is_remote_path(src) or is_remote_path(dst): + if gfile.available: + return gfile.copy(src, dst, overwrite=True) + else: + _raise_if_no_gfile(f"src={src} dst={dst}") + return shutil.copy(src, dst) + + +def makedirs(path): + if is_remote_path(path): + if gfile.available: + return gfile.makedirs(path) + else: + _raise_if_no_gfile(path) + return os.makedirs(path) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/image_dataset_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/image_dataset_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c1918be73eeff6db5b8affdc5113357000250c6f --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/image_dataset_utils.py @@ -0,0 +1,459 @@ +import numpy as np + +from keras.src.api_export import keras_export +from keras.src.backend.config import standardize_data_format +from keras.src.utils import dataset_utils +from keras.src.utils import image_utils +from keras.src.utils.module_utils import tensorflow as tf + +ALLOWLIST_FORMATS = (".bmp", ".gif", ".jpeg", ".jpg", ".png") + + +@keras_export( + [ + "keras.utils.image_dataset_from_directory", + "keras.preprocessing.image_dataset_from_directory", + ] +) +def image_dataset_from_directory( + directory, + labels="inferred", + label_mode="int", + class_names=None, + color_mode="rgb", + batch_size=32, + image_size=(256, 256), + shuffle=True, + seed=None, + validation_split=None, + subset=None, + interpolation="bilinear", + follow_links=False, + crop_to_aspect_ratio=False, + pad_to_aspect_ratio=False, + data_format=None, + verbose=True, +): + """Generates a `tf.data.Dataset` from image files in a directory. + + If your directory structure is: + + ``` + main_directory/ + ...class_a/ + ......a_image_1.jpg + ......a_image_2.jpg + ...class_b/ + ......b_image_1.jpg + ......b_image_2.jpg + ``` + + Then calling `image_dataset_from_directory(main_directory, + labels='inferred')` will return a `tf.data.Dataset` that yields batches of + images from the subdirectories `class_a` and `class_b`, together with labels + 0 and 1 (0 corresponding to `class_a` and 1 corresponding to `class_b`). + + Supported image formats: `.jpeg`, `.jpg`, `.png`, `.bmp`, `.gif`. + Animated gifs are truncated to the first frame. + + Args: + directory: Directory where the data is located. + If `labels` is `"inferred"`, it should contain + subdirectories, each containing images for a class. + Otherwise, the directory structure is ignored. + labels: Either `"inferred"` + (labels are generated from the directory structure), + `None` (no labels), + or a list/tuple of integer labels of the same size as the number of + image files found in the directory. Labels should be sorted + according to the alphanumeric order of the image file paths + (obtained via `os.walk(directory)` in Python). + label_mode: String describing the encoding of `labels`. Options are: + - `"int"`: means that the labels are encoded as integers + (e.g. for `sparse_categorical_crossentropy` loss). + - `"categorical"` means that the labels are + encoded as a categorical vector + (e.g. for `categorical_crossentropy` loss). + - `"binary"` means that the labels (there can be only 2) + are encoded as `float32` scalars with values 0 or 1 + (e.g. for `binary_crossentropy`). + - `None` (no labels). + class_names: Only valid if `labels` is `"inferred"`. + This is the explicit list of class names + (must match names of subdirectories). Used to control the order + of the classes (otherwise alphanumerical order is used). + color_mode: One of `"grayscale"`, `"rgb"`, `"rgba"`. + Whether the images will be converted to + have 1, 3, or 4 channels. Defaults to `"rgb"`. + batch_size: Size of the batches of data. Defaults to 32. + If `None`, the data will not be batched + (the dataset will yield individual samples). + image_size: Size to resize images to after they are read from disk, + specified as `(height, width)`. + Since the pipeline processes batches of images that must all have + the same size, this must be provided. Defaults to `(256, 256)`. + shuffle: Whether to shuffle the data. Defaults to `True`. + If set to `False`, sorts the data in alphanumeric order. + seed: Optional random seed for shuffling and transformations. + validation_split: Optional float between 0 and 1, + fraction of data to reserve for validation. + subset: Subset of the data to return. + One of `"training"`, `"validation"`, or `"both"`. + Only used if `validation_split` is set. + When `subset="both"`, the utility returns a tuple of two datasets + (the training and validation datasets respectively). + interpolation: String, the interpolation method used when + resizing images. + Supports `"bilinear"`, `"nearest"`, `"bicubic"`, `"area"`, + `"lanczos3"`, `"lanczos5"`, `"gaussian"`, `"mitchellcubic"`. + Defaults to `"bilinear"`. + follow_links: Whether to visit subdirectories pointed to by symlinks. + Defaults to `False`. + crop_to_aspect_ratio: If `True`, resize the images without aspect + ratio distortion. When the original aspect ratio differs from the + target aspect ratio, the output image will be cropped so as to + return the largest possible window in the image + (of size `image_size`) that matches the target aspect ratio. By + default (`crop_to_aspect_ratio=False`), aspect ratio may not be + preserved. + pad_to_aspect_ratio: If `True`, resize the images without aspect + ratio distortion. When the original aspect ratio differs from the + target aspect ratio, the output image will be padded so as to + return the largest possible window in the image + (of size `image_size`) that matches the target aspect ratio. By + default (`pad_to_aspect_ratio=False`), aspect ratio may not be + preserved. + data_format: If None uses keras.config.image_data_format() + otherwise either 'channel_last' or 'channel_first'. + verbose: Whether to display number information on classes and + number of files found. Defaults to `True`. + + Returns: + + A `tf.data.Dataset` object. + + - If `label_mode` is `None`, it yields `float32` tensors of shape + `(batch_size, image_size[0], image_size[1], num_channels)`, + encoding images (see below for rules regarding `num_channels`). + - Otherwise, it yields a tuple `(images, labels)`, where `images` has + shape `(batch_size, image_size[0], image_size[1], num_channels)`, + and `labels` follows the format described below. + + Rules regarding labels format: + + - if `label_mode` is `"int"`, the labels are an `int32` tensor of shape + `(batch_size,)`. + - if `label_mode` is `"binary"`, the labels are a `float32` tensor of + 1s and 0s of shape `(batch_size, 1)`. + - if `label_mode` is `"categorical"`, the labels are a `float32` tensor + of shape `(batch_size, num_classes)`, representing a one-hot + encoding of the class index. + + Rules regarding number of channels in the yielded images: + + - if `color_mode` is `"grayscale"`, + there's 1 channel in the image tensors. + - if `color_mode` is `"rgb"`, + there are 3 channels in the image tensors. + - if `color_mode` is `"rgba"`, + there are 4 channels in the image tensors. + """ + + if labels not in ("inferred", None): + if not isinstance(labels, (list, tuple)): + raise ValueError( + "`labels` argument should be a list/tuple of integer labels, " + "of the same size as the number of image files in the target " + "directory. If you wish to infer the labels from the " + "subdirectory " + 'names in the target directory, pass `labels="inferred"`. ' + "If you wish to get a dataset that only contains images " + f"(no labels), pass `labels=None`. Received: labels={labels}" + ) + if class_names: + raise ValueError( + "You can only pass `class_names` if " + f'`labels="inferred"`. Received: labels={labels}, and ' + f"class_names={class_names}" + ) + if label_mode not in {"int", "categorical", "binary", None}: + raise ValueError( + '`label_mode` argument must be one of "int", ' + '"categorical", "binary", ' + f"or None. Received: label_mode={label_mode}" + ) + if labels is None or label_mode is None: + labels = None + label_mode = None + if color_mode == "rgb": + num_channels = 3 + elif color_mode == "rgba": + num_channels = 4 + elif color_mode == "grayscale": + num_channels = 1 + else: + raise ValueError( + '`color_mode` must be one of {"rgb", "rgba", "grayscale"}. ' + f"Received: color_mode={color_mode}" + ) + + if isinstance(image_size, int): + image_size = (image_size, image_size) + elif not isinstance(image_size, (list, tuple)) or not len(image_size) == 2: + raise ValueError( + "Invalid `image_size` value. Expected a tuple of 2 integers. " + f"Received: image_size={image_size}" + ) + + interpolation = interpolation.lower() + supported_interpolations = ( + "bilinear", + "nearest", + "bicubic", + "area", + "lanczos3", + "lanczos5", + "gaussian", + "mitchellcubic", + ) + if interpolation not in supported_interpolations: + raise ValueError( + "Argument `interpolation` should be one of " + f"{supported_interpolations}. " + f"Received: interpolation={interpolation}" + ) + + dataset_utils.check_validation_split_arg( + validation_split, subset, shuffle, seed + ) + + if seed is None: + seed = np.random.randint(1e6) + image_paths, labels, class_names = dataset_utils.index_directory( + directory, + labels, + formats=ALLOWLIST_FORMATS, + class_names=class_names, + shuffle=shuffle, + seed=seed, + follow_links=follow_links, + verbose=verbose, + ) + + if label_mode == "binary" and len(class_names) != 2: + raise ValueError( + 'When passing `label_mode="binary"`, there must be exactly 2 ' + f"class_names. Received: class_names={class_names}" + ) + + data_format = standardize_data_format(data_format=data_format) + if batch_size is not None: + shuffle_buffer_size = batch_size * 8 + else: + shuffle_buffer_size = 1024 + + if subset == "both": + ( + image_paths_train, + labels_train, + ) = dataset_utils.get_training_or_validation_split( + image_paths, labels, validation_split, "training" + ) + ( + image_paths_val, + labels_val, + ) = dataset_utils.get_training_or_validation_split( + image_paths, labels, validation_split, "validation" + ) + if not image_paths_train: + raise ValueError( + f"No training images found in directory {directory}. " + f"Allowed formats: {ALLOWLIST_FORMATS}" + ) + if not image_paths_val: + raise ValueError( + f"No validation images found in directory {directory}. " + f"Allowed formats: {ALLOWLIST_FORMATS}" + ) + train_dataset = paths_and_labels_to_dataset( + image_paths=image_paths_train, + image_size=image_size, + num_channels=num_channels, + labels=labels_train, + label_mode=label_mode, + num_classes=len(class_names) if class_names else 0, + interpolation=interpolation, + crop_to_aspect_ratio=crop_to_aspect_ratio, + pad_to_aspect_ratio=pad_to_aspect_ratio, + data_format=data_format, + shuffle=shuffle, + shuffle_buffer_size=shuffle_buffer_size, + seed=seed, + ) + + val_dataset = paths_and_labels_to_dataset( + image_paths=image_paths_val, + image_size=image_size, + num_channels=num_channels, + labels=labels_val, + label_mode=label_mode, + num_classes=len(class_names) if class_names else 0, + interpolation=interpolation, + crop_to_aspect_ratio=crop_to_aspect_ratio, + pad_to_aspect_ratio=pad_to_aspect_ratio, + data_format=data_format, + shuffle=False, + ) + + if batch_size is not None: + train_dataset = train_dataset.batch(batch_size) + val_dataset = val_dataset.batch(batch_size) + + train_dataset = train_dataset.prefetch(tf.data.AUTOTUNE) + val_dataset = val_dataset.prefetch(tf.data.AUTOTUNE) + + # Users may need to reference `class_names`. + train_dataset.class_names = class_names + val_dataset.class_names = class_names + + # Include file paths for images as attribute. + train_dataset.file_paths = image_paths_train + val_dataset.file_paths = image_paths_val + + dataset = [train_dataset, val_dataset] + else: + image_paths, labels = dataset_utils.get_training_or_validation_split( + image_paths, labels, validation_split, subset + ) + if not image_paths: + raise ValueError( + f"No images found in directory {directory}. " + f"Allowed formats: {ALLOWLIST_FORMATS}" + ) + + dataset = paths_and_labels_to_dataset( + image_paths=image_paths, + image_size=image_size, + num_channels=num_channels, + labels=labels, + label_mode=label_mode, + num_classes=len(class_names) if class_names else 0, + interpolation=interpolation, + crop_to_aspect_ratio=crop_to_aspect_ratio, + pad_to_aspect_ratio=pad_to_aspect_ratio, + data_format=data_format, + shuffle=shuffle, + shuffle_buffer_size=shuffle_buffer_size, + seed=seed, + ) + + if batch_size is not None: + dataset = dataset.batch(batch_size) + + dataset = dataset.prefetch(tf.data.AUTOTUNE) + # Users may need to reference `class_names`. + dataset.class_names = class_names + + # Include file paths for images as attribute. + dataset.file_paths = image_paths + + return dataset + + +def paths_and_labels_to_dataset( + image_paths, + image_size, + num_channels, + labels, + label_mode, + num_classes, + interpolation, + data_format, + crop_to_aspect_ratio=False, + pad_to_aspect_ratio=False, + shuffle=False, + shuffle_buffer_size=None, + seed=None, +): + """Constructs a dataset of images and labels.""" + path_ds = tf.data.Dataset.from_tensor_slices(image_paths) + if label_mode: + label_ds = dataset_utils.labels_to_dataset( + labels, label_mode, num_classes + ) + ds = tf.data.Dataset.zip((path_ds, label_ds)) + else: + ds = path_ds + + if shuffle: + ds = ds.shuffle(buffer_size=shuffle_buffer_size or 1024, seed=seed) + + args = ( + image_size, + num_channels, + interpolation, + data_format, + crop_to_aspect_ratio, + pad_to_aspect_ratio, + ) + if label_mode: + ds = ds.map( + lambda x, y: (load_image(x, *args), y), + num_parallel_calls=tf.data.AUTOTUNE, + ) + else: + ds = ds.map( + lambda x: load_image(x, *args), num_parallel_calls=tf.data.AUTOTUNE + ) + return ds + + +def load_image( + path, + image_size, + num_channels, + interpolation, + data_format, + crop_to_aspect_ratio=False, + pad_to_aspect_ratio=False, +): + """Load an image from a path and resize it.""" + img = tf.io.read_file(path) + img = tf.image.decode_image( + img, channels=num_channels, expand_animations=False + ) + + if pad_to_aspect_ratio and crop_to_aspect_ratio: + raise ValueError( + "Only one of `pad_to_aspect_ratio`, `crop_to_aspect_ratio`" + " can be set to `True`." + ) + + if crop_to_aspect_ratio: + from keras.src.backend import tensorflow as tf_backend + + if data_format == "channels_first": + img = tf.transpose(img, (2, 0, 1)) + img = image_utils.smart_resize( + img, + image_size, + interpolation=interpolation, + data_format=data_format, + backend_module=tf_backend, + ) + elif pad_to_aspect_ratio: + img = tf.image.resize_with_pad( + img, image_size[0], image_size[1], method=interpolation + ) + if data_format == "channels_first": + img = tf.transpose(img, (2, 0, 1)) + else: + img = tf.image.resize(img, image_size, method=interpolation) + if data_format == "channels_first": + img = tf.transpose(img, (2, 0, 1)) + + if data_format == "channels_last": + img.set_shape((image_size[0], image_size[1], num_channels)) + else: + img.set_shape((num_channels, image_size[0], image_size[1])) + return img diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/image_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/image_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ca8289c9f9b7ebf5aef1657391bd4f1cbeae114e --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/image_utils.py @@ -0,0 +1,457 @@ +"""Utilities related to image handling.""" + +import io +import pathlib +import warnings + +import numpy as np + +from keras.src import backend +from keras.src.api_export import keras_export + +try: + from PIL import Image as pil_image + + try: + pil_image_resampling = pil_image.Resampling + except AttributeError: + pil_image_resampling = pil_image +except ImportError: + pil_image = None + pil_image_resampling = None + + +if pil_image_resampling is not None: + PIL_INTERPOLATION_METHODS = { + "nearest": pil_image_resampling.NEAREST, + "bilinear": pil_image_resampling.BILINEAR, + "bicubic": pil_image_resampling.BICUBIC, + "hamming": pil_image_resampling.HAMMING, + "box": pil_image_resampling.BOX, + "lanczos": pil_image_resampling.LANCZOS, + } + + +@keras_export( + [ + "keras.utils.array_to_img", + "keras.preprocessing.image.array_to_img", + ] +) +def array_to_img(x, data_format=None, scale=True, dtype=None): + """Converts a 3D NumPy array to a PIL Image instance. + + Example: + + ```python + from PIL import Image + img = np.random.random(size=(100, 100, 3)) + pil_img = keras.utils.array_to_img(img) + ``` + + Args: + x: Input data, in any form that can be converted to a NumPy array. + data_format: Image data format, can be either `"channels_first"` or + `"channels_last"`. Defaults to `None`, in which case the global + setting `keras.backend.image_data_format()` is used (unless you + changed it, it defaults to `"channels_last"`). + scale: Whether to rescale the image such that minimum and maximum values + are 0 and 255 respectively. Defaults to `True`. + dtype: Dtype to use. `None` means the global setting + `keras.backend.floatx()` is used (unless you changed it, it + defaults to `"float32"`). Defaults to `None`. + + Returns: + A PIL Image instance. + """ + + data_format = backend.standardize_data_format(data_format) + if dtype is None: + dtype = backend.floatx() + if pil_image is None: + raise ImportError( + "Could not import PIL.Image. " + "The use of `array_to_img` requires PIL." + ) + x = np.asarray(x, dtype=dtype) + if x.ndim != 3: + raise ValueError( + "Expected image array to have rank 3 (single image). " + f"Got array with shape: {x.shape}" + ) + + # Original NumPy array x has format (height, width, channel) + # or (channel, height, width) + # but target PIL image has format (width, height, channel) + if data_format == "channels_first": + x = x.transpose(1, 2, 0) + if scale: + x = x - np.min(x) + x_max = np.max(x) + if x_max != 0: + x /= x_max + x *= 255 + if x.shape[2] == 4: + # RGBA + return pil_image.fromarray(x.astype("uint8"), "RGBA") + elif x.shape[2] == 3: + # RGB + return pil_image.fromarray(x.astype("uint8"), "RGB") + elif x.shape[2] == 1: + # grayscale + if np.max(x) > 255: + # 32-bit signed integer grayscale image. PIL mode "I" + return pil_image.fromarray(x[:, :, 0].astype("int32"), "I") + return pil_image.fromarray(x[:, :, 0].astype("uint8"), "L") + else: + raise ValueError(f"Unsupported channel number: {x.shape[2]}") + + +@keras_export( + [ + "keras.utils.img_to_array", + "keras.preprocessing.image.img_to_array", + ] +) +def img_to_array(img, data_format=None, dtype=None): + """Converts a PIL Image instance to a NumPy array. + + Example: + + ```python + from PIL import Image + img_data = np.random.random(size=(100, 100, 3)) + img = keras.utils.array_to_img(img_data) + array = keras.utils.image.img_to_array(img) + ``` + + Args: + img: Input PIL Image instance. + data_format: Image data format, can be either `"channels_first"` or + `"channels_last"`. Defaults to `None`, in which case the global + setting `keras.backend.image_data_format()` is used (unless you + changed it, it defaults to `"channels_last"`). + dtype: Dtype to use. `None` means the global setting + `keras.backend.floatx()` is used (unless you changed it, it + defaults to `"float32"`). + + Returns: + A 3D NumPy array. + """ + + data_format = backend.standardize_data_format(data_format) + if dtype is None: + dtype = backend.floatx() + # NumPy array x has format (height, width, channel) + # or (channel, height, width) + # but original PIL image has format (width, height, channel) + x = np.asarray(img, dtype=dtype) + if len(x.shape) == 3: + if data_format == "channels_first": + x = x.transpose(2, 0, 1) + elif len(x.shape) == 2: + if data_format == "channels_first": + x = x.reshape((1, x.shape[0], x.shape[1])) + else: + x = x.reshape((x.shape[0], x.shape[1], 1)) + else: + raise ValueError(f"Unsupported image shape: {x.shape}") + return x + + +@keras_export(["keras.utils.save_img", "keras.preprocessing.image.save_img"]) +def save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs): + """Saves an image stored as a NumPy array to a path or file object. + + Args: + path: Path or file object. + x: NumPy array. + data_format: Image data format, either `"channels_first"` or + `"channels_last"`. + file_format: Optional file format override. If omitted, the format to + use is determined from the filename extension. If a file object was + used instead of a filename, this parameter should always be used. + scale: Whether to rescale image values to be within `[0, 255]`. + **kwargs: Additional keyword arguments passed to `PIL.Image.save()`. + """ + data_format = backend.standardize_data_format(data_format) + img = array_to_img(x, data_format=data_format, scale=scale) + if img.mode == "RGBA" and (file_format == "jpg" or file_format == "jpeg"): + warnings.warn( + "The JPG format does not support RGBA images, converting to RGB." + ) + img = img.convert("RGB") + img.save(path, format=file_format, **kwargs) + + +@keras_export(["keras.utils.load_img", "keras.preprocessing.image.load_img"]) +def load_img( + path, + color_mode="rgb", + target_size=None, + interpolation="nearest", + keep_aspect_ratio=False, +): + """Loads an image into PIL format. + + Example: + + ```python + image = keras.utils.load_img(image_path) + input_arr = keras.utils.img_to_array(image) + input_arr = np.array([input_arr]) # Convert single image to a batch. + predictions = model.predict(input_arr) + ``` + + Args: + path: Path to image file. + color_mode: One of `"grayscale"`, `"rgb"`, `"rgba"`. Default: `"rgb"`. + The desired image format. + target_size: Either `None` (default to original size) or tuple of ints + `(img_height, img_width)`. + interpolation: Interpolation method used to resample the image if the + target size is different from that of the loaded image. Supported + methods are `"nearest"`, `"bilinear"`, and `"bicubic"`. + If PIL version 1.1.3 or newer is installed, `"lanczos"` + is also supported. If PIL version 3.4.0 or newer is installed, + `"box"` and `"hamming"` are also + supported. By default, `"nearest"` is used. + keep_aspect_ratio: Boolean, whether to resize images to a target + size without aspect ratio distortion. The image is cropped in + the center with target aspect ratio before resizing. + + Returns: + A PIL Image instance. + """ + if pil_image is None: + raise ImportError( + "Could not import PIL.Image. The use of `load_img` requires PIL." + ) + if isinstance(path, io.BytesIO): + img = pil_image.open(path) + elif isinstance(path, (pathlib.Path, bytes, str)): + if isinstance(path, pathlib.Path): + path = str(path.resolve()) + with open(path, "rb") as f: + img = pil_image.open(io.BytesIO(f.read())) + else: + raise TypeError( + f"path should be path-like or io.BytesIO, not {type(path)}" + ) + + if color_mode == "grayscale": + # if image is not already an 8-bit, 16-bit or 32-bit grayscale image + # convert it to an 8-bit grayscale image. + if img.mode not in ("L", "I;16", "I"): + img = img.convert("L") + elif color_mode == "rgba": + if img.mode != "RGBA": + img = img.convert("RGBA") + elif color_mode == "rgb": + if img.mode != "RGB": + img = img.convert("RGB") + else: + raise ValueError('color_mode must be "grayscale", "rgb", or "rgba"') + if target_size is not None: + width_height_tuple = (target_size[1], target_size[0]) + if img.size != width_height_tuple: + if interpolation not in PIL_INTERPOLATION_METHODS: + raise ValueError( + "Invalid interpolation method {} specified. Supported " + "methods are {}".format( + interpolation, + ", ".join(PIL_INTERPOLATION_METHODS.keys()), + ) + ) + resample = PIL_INTERPOLATION_METHODS[interpolation] + + if keep_aspect_ratio: + width, height = img.size + target_width, target_height = width_height_tuple + + crop_height = (width * target_height) // target_width + crop_width = (height * target_width) // target_height + + # Set back to input height / width + # if crop_height / crop_width is not smaller. + crop_height = min(height, crop_height) + crop_width = min(width, crop_width) + + crop_box_hstart = (height - crop_height) // 2 + crop_box_wstart = (width - crop_width) // 2 + crop_box_wend = crop_box_wstart + crop_width + crop_box_hend = crop_box_hstart + crop_height + crop_box = [ + crop_box_wstart, + crop_box_hstart, + crop_box_wend, + crop_box_hend, + ] + img = img.resize(width_height_tuple, resample, box=crop_box) + else: + img = img.resize(width_height_tuple, resample) + return img + + +@keras_export("keras.preprocessing.image.smart_resize") +def smart_resize( + x, + size, + interpolation="bilinear", + data_format="channels_last", + backend_module=None, +): + """Resize images to a target size without aspect ratio distortion. + + Image datasets typically yield images that have each a different + size. However, these images need to be batched before they can be + processed by Keras layers. To be batched, images need to share the same + height and width. + + You could simply do, in TF (or JAX equivalent): + + ```python + size = (200, 200) + ds = ds.map(lambda img: resize(img, size)) + ``` + + However, if you do this, you distort the aspect ratio of your images, since + in general they do not all have the same aspect ratio as `size`. This is + fine in many cases, but not always (e.g. for image generation models + this can be a problem). + + Note that passing the argument `preserve_aspect_ratio=True` to `resize` + will preserve the aspect ratio, but at the cost of no longer respecting the + provided target size. + + This calls for: + + ```python + size = (200, 200) + ds = ds.map(lambda img: smart_resize(img, size)) + ``` + + Your output images will actually be `(200, 200)`, and will not be distorted. + Instead, the parts of the image that do not fit within the target size + get cropped out. + + The resizing process is: + + 1. Take the largest centered crop of the image that has the same aspect + ratio as the target size. For instance, if `size=(200, 200)` and the input + image has size `(340, 500)`, we take a crop of `(340, 340)` centered along + the width. + 2. Resize the cropped image to the target size. In the example above, + we resize the `(340, 340)` crop to `(200, 200)`. + + Args: + x: Input image or batch of images (as a tensor or NumPy array). + Must be in format `(height, width, channels)` + or `(batch_size, height, width, channels)`. + size: Tuple of `(height, width)` integer. Target size. + interpolation: String, interpolation to use for resizing. + Supports `"bilinear"`, `"nearest"`, `"bicubic"`, + `"lanczos3"`, `"lanczos5"`. + Defaults to `"bilinear"`. + data_format: `"channels_last"` or `"channels_first"`. + backend_module: Backend module to use (if different from the default + backend). + + Returns: + Array with shape `(size[0], size[1], channels)`. + If the input image was a NumPy array, the output is a NumPy array, + and if it was a backend-native tensor, + the output is a backend-native tensor. + """ + backend_module = backend_module or backend + if len(size) != 2: + raise ValueError( + f"Expected `size` to be a tuple of 2 integers, but got: {size}." + ) + img = backend_module.convert_to_tensor(x) + if len(img.shape) is not None: + if len(img.shape) < 3 or len(img.shape) > 4: + raise ValueError( + "Expected an image array with shape `(height, width, " + "channels)`, or `(batch_size, height, width, channels)`, but " + f"got input with incorrect rank, of shape {img.shape}." + ) + shape = backend_module.shape(img) + if data_format == "channels_last": + height, width = shape[-3], shape[-2] + else: + height, width = shape[-2], shape[-1] + target_height, target_width = size + + # Set back to input height / width if crop_height / crop_width is not + # smaller. + if isinstance(height, int) and isinstance(width, int): + # For JAX, we need to keep the slice indices as static integers + crop_height = int(float(width * target_height) / target_width) + crop_height = max(min(height, crop_height), 1) + crop_width = int(float(height * target_width) / target_height) + crop_width = max(min(width, crop_width), 1) + crop_box_hstart = int(float(height - crop_height) / 2) + crop_box_wstart = int(float(width - crop_width) / 2) + else: + crop_height = backend_module.cast( + backend_module.cast(width * target_height, "float32") + / target_width, + "int32", + ) + crop_height = backend_module.numpy.minimum(height, crop_height) + crop_height = backend_module.numpy.maximum(crop_height, 1) + crop_height = backend_module.cast(crop_height, "int32") + + crop_width = backend_module.cast( + backend_module.cast(height * target_width, "float32") + / target_height, + "int32", + ) + crop_width = backend_module.numpy.minimum(width, crop_width) + crop_width = backend_module.numpy.maximum(crop_width, 1) + crop_width = backend_module.cast(crop_width, "int32") + + crop_box_hstart = backend_module.cast( + backend_module.cast(height - crop_height, "float32") / 2, "int32" + ) + crop_box_wstart = backend_module.cast( + backend_module.cast(width - crop_width, "float32") / 2, "int32" + ) + + if data_format == "channels_last": + if len(img.shape) == 4: + img = img[ + :, + crop_box_hstart : crop_box_hstart + crop_height, + crop_box_wstart : crop_box_wstart + crop_width, + :, + ] + else: + img = img[ + crop_box_hstart : crop_box_hstart + crop_height, + crop_box_wstart : crop_box_wstart + crop_width, + :, + ] + else: + if len(img.shape) == 4: + img = img[ + :, + :, + crop_box_hstart : crop_box_hstart + crop_height, + crop_box_wstart : crop_box_wstart + crop_width, + ] + else: + img = img[ + :, + crop_box_hstart : crop_box_hstart + crop_height, + crop_box_wstart : crop_box_wstart + crop_width, + ] + + img = backend_module.image.resize( + img, size=size, interpolation=interpolation, data_format=data_format + ) + + if isinstance(x, np.ndarray): + return np.array(img) + return img diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/io_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/io_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f087ab6dd21a8f37776d0a70f5ea8467f1b9ad7b --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/io_utils.py @@ -0,0 +1,138 @@ +import sys + +from absl import logging + +from keras.src.api_export import keras_export +from keras.src.backend.common import global_state + + +@keras_export( + [ + "keras.config.enable_interactive_logging", + "keras.utils.enable_interactive_logging", + ] +) +def enable_interactive_logging(): + """Turn on interactive logging. + + When interactive logging is enabled, Keras displays logs via stdout. + This provides the best experience when using Keras in an interactive + environment such as a shell or a notebook. + """ + global_state.set_global_attribute("interactive_logging", True) + + +@keras_export( + [ + "keras.config.disable_interactive_logging", + "keras.utils.disable_interactive_logging", + ] +) +def disable_interactive_logging(): + """Turn off interactive logging. + + When interactive logging is disabled, Keras sends logs to `absl.logging`. + This is the best option when using Keras in a non-interactive + way, such as running a training or inference job on a server. + """ + global_state.set_global_attribute("interactive_logging", False) + + +@keras_export( + [ + "keras.config.is_interactive_logging_enabled", + "keras.utils.is_interactive_logging_enabled", + ] +) +def is_interactive_logging_enabled(): + """Check if interactive logging is enabled. + + To switch between writing logs to stdout and `absl.logging`, you may use + `keras.config.enable_interactive_logging()` and + `keras.config.disable_interactive_logging()`. + + Returns: + Boolean, `True` if interactive logging is enabled, + and `False` otherwise. + """ + return global_state.get_global_attribute("interactive_logging", True) + + +def set_logging_verbosity(level): + """Sets the verbosity level for logging. + + Supported log levels are as follows: + + - `"FATAL"` (least verbose) + - `"ERROR"` + - `"WARNING"` + - `"INFO"` + - `"DEBUG"` (most verbose) + + Args: + level: A string corresponding to the level of verbosity for logging. + """ + valid_levels = { + "FATAL": logging.FATAL, + "ERROR": logging.ERROR, + "WARNING": logging.WARNING, + "INFO": logging.INFO, + "DEBUG": logging.DEBUG, + } + verbosity = valid_levels.get(level) + if verbosity is None: + raise ValueError( + "Please pass a valid level for logging verbosity. " + f"Expected one of: {set(valid_levels.keys())}. " + f"Received: {level}" + ) + logging.set_verbosity(verbosity) + + +def print_msg(message, line_break=True): + """Print the message to absl logging or stdout.""" + message = str(message) + if is_interactive_logging_enabled(): + message = message + "\n" if line_break else message + try: + sys.stdout.write(message) + except UnicodeEncodeError: + # If the encoding differs from UTF-8, `sys.stdout.write` may fail. + # To address this, replace special unicode characters in the + # message, and then encode and decode using the target encoding. + message = _replace_special_unicode_character(message) + message_bytes = message.encode(sys.stdout.encoding, errors="ignore") + message = message_bytes.decode(sys.stdout.encoding) + sys.stdout.write(message) + sys.stdout.flush() + else: + logging.info(message) + + +def ask_to_proceed_with_overwrite(filepath): + """Produces a prompt asking about overwriting a file. + + Args: + filepath: the path to the file to be overwritten. + + Returns: + True if we can proceed with overwrite, False otherwise. + """ + overwrite = ( + input(f"[WARNING] {filepath} already exists - overwrite? [y/n]") + .strip() + .lower() + ) + while overwrite not in ("y", "n"): + overwrite = ( + input('Enter "y" (overwrite) or "n" (cancel).').strip().lower() + ) + if overwrite == "n": + return False + print_msg("[TIP] Next time specify overwrite=True!") + return True + + +def _replace_special_unicode_character(message): + message = str(message).replace("━", "=") # Fall back to Keras2 behavior. + return message diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/jax_layer.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/jax_layer.py new file mode 100644 index 0000000000000000000000000000000000000000..8fd69d1f5bf8419da045b6cc7d0f33b47995ab06 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/jax_layer.py @@ -0,0 +1,677 @@ +import inspect + +import numpy as np + +from keras.src import backend +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.layers.layer import Layer +from keras.src.saving import serialization_lib +from keras.src.utils import jax_utils +from keras.src.utils import tracking +from keras.src.utils.module_utils import jax + + +@keras_export("keras.layers.JaxLayer") +class JaxLayer(Layer): + """Keras Layer that wraps a JAX model. + + This layer enables the use of JAX components within Keras when using JAX as + the backend for Keras. + + ## Model function + + This layer accepts JAX models in the form of a function, `call_fn`, which + must take the following arguments with these exact names: + + - `params`: trainable parameters of the model. + - `state` (*optional*): non-trainable state of the model. Can be omitted if + the model has no non-trainable state. + - `rng` (*optional*): a `jax.random.PRNGKey` instance. Can be omitted if the + model does not need RNGs, neither during training nor during inference. + - `inputs`: inputs to the model, a JAX array or a `PyTree` of arrays. + - `training` (*optional*): an argument specifying if we're in training mode + or inference mode, `True` is passed in training mode. Can be omitted if + the model behaves the same in training mode and inference mode. + + The `inputs` argument is mandatory. Inputs to the model must be provided via + a single argument. If the JAX model takes multiple inputs as separate + arguments, they must be combined into a single structure, for instance in a + `tuple` or a `dict`. + + ## Model weights initialization + + The initialization of the `params` and `state` of the model can be handled + by this layer, in which case the `init_fn` argument must be provided. This + allows the model to be initialized dynamically with the right shape. + Alternatively, and if the shape is known, the `params` argument and + optionally the `state` argument can be used to create an already initialized + model. + + The `init_fn` function, if provided, must take the following arguments with + these exact names: + + - `rng`: a `jax.random.PRNGKey` instance. + - `inputs`: a JAX array or a `PyTree` of arrays with placeholder values to + provide the shape of the inputs. + - `training` (*optional*): an argument specifying if we're in training mode + or inference mode. `True` is always passed to `init_fn`. Can be omitted + regardless of whether `call_fn` has a `training` argument. + + ## Models with non-trainable state + + For JAX models that have non-trainable state: + + - `call_fn` must have a `state` argument + - `call_fn` must return a `tuple` containing the outputs of the model and + the new non-trainable state of the model + - `init_fn` must return a `tuple` containing the initial trainable params of + the model and the initial non-trainable state of the model. + + This code shows a possible combination of `call_fn` and `init_fn` signatures + for a model with non-trainable state. In this example, the model has a + `training` argument and an `rng` argument in `call_fn`. + + ```python + def stateful_call(params, state, rng, inputs, training): + outputs = ... + new_state = ... + return outputs, new_state + + def stateful_init(rng, inputs): + initial_params = ... + initial_state = ... + return initial_params, initial_state + ``` + + ## Models without non-trainable state + + For JAX models with no non-trainable state: + + - `call_fn` must not have a `state` argument + - `call_fn` must return only the outputs of the model + - `init_fn` must return only the initial trainable params of the model. + + This code shows a possible combination of `call_fn` and `init_fn` signatures + for a model without non-trainable state. In this example, the model does not + have a `training` argument and does not have an `rng` argument in `call_fn`. + + ```python + def stateless_call(params, inputs): + outputs = ... + return outputs + + def stateless_init(rng, inputs): + initial_params = ... + return initial_params + ``` + + ## Conforming to the required signature + + If a model has a different signature than the one required by `JaxLayer`, + one can easily write a wrapper method to adapt the arguments. This example + shows a model that has multiple inputs as separate arguments, expects + multiple RNGs in a `dict`, and has a `deterministic` argument with the + opposite meaning of `training`. To conform, the inputs are combined in a + single structure using a `tuple`, the RNG is split and used the populate the + expected `dict`, and the Boolean flag is negated: + + ```python + def my_model_fn(params, rngs, input1, input2, deterministic): + ... + if not deterministic: + dropout_rng = rngs["dropout"] + keep = jax.random.bernoulli(dropout_rng, dropout_rate, x.shape) + x = jax.numpy.where(keep, x / dropout_rate, 0) + ... + ... + return outputs + + def my_model_wrapper_fn(params, rng, inputs, training): + input1, input2 = inputs + rng1, rng2 = jax.random.split(rng) + rngs = {"dropout": rng1, "preprocessing": rng2} + deterministic = not training + return my_model_fn(params, rngs, input1, input2, deterministic) + + keras_layer = JaxLayer(my_model_wrapper_fn, params=initial_params) + ``` + + ## Usage with Haiku modules + + `JaxLayer` enables the use of [Haiku](https://dm-haiku.readthedocs.io) + components in the form of + [`haiku.Module`](https://dm-haiku.readthedocs.io/en/latest/api.html#module). + This is achieved by transforming the module per the Haiku pattern and then + passing `module.apply` in the `call_fn` parameter and `module.init` in the + `init_fn` parameter if needed. + + If the model has non-trainable state, it should be transformed with + [`haiku.transform_with_state`]( + https://dm-haiku.readthedocs.io/en/latest/api.html#haiku.transform_with_state). + If the model has no non-trainable state, it should be transformed with + [`haiku.transform`]( + https://dm-haiku.readthedocs.io/en/latest/api.html#haiku.transform). + Additionally, and optionally, if the module does not use RNGs in "apply", it + can be transformed with + [`haiku.without_apply_rng`]( + https://dm-haiku.readthedocs.io/en/latest/api.html#without-apply-rng). + + The following example shows how to create a `JaxLayer` from a Haiku module + that uses random number generators via `hk.next_rng_key()` and takes a + training positional argument: + + ```python + class MyHaikuModule(hk.Module): + def __call__(self, x, training): + x = hk.Conv2D(32, (3, 3))(x) + x = jax.nn.relu(x) + x = hk.AvgPool((1, 2, 2, 1), (1, 2, 2, 1), "VALID")(x) + x = hk.Flatten()(x) + x = hk.Linear(200)(x) + if training: + x = hk.dropout(rng=hk.next_rng_key(), rate=0.3, x=x) + x = jax.nn.relu(x) + x = hk.Linear(10)(x) + x = jax.nn.softmax(x) + return x + + def my_haiku_module_fn(inputs, training): + module = MyHaikuModule() + return module(inputs, training) + + transformed_module = hk.transform(my_haiku_module_fn) + + keras_layer = JaxLayer( + call_fn=transformed_module.apply, + init_fn=transformed_module.init, + ) + ``` + + Args: + call_fn: The function to call the model. See description above for the + list of arguments it takes and the outputs it returns. + init_fn: the function to call to initialize the model. See description + above for the list of arguments it takes and the outputs it returns. + If `None`, then `params` and/or `state` must be provided. + params: A `PyTree` containing all the model trainable parameters. This + allows passing trained parameters or controlling the initialization. + If both `params` and `state` are `None`, `init_fn` is called at + build time to initialize the trainable parameters of the model. + state: A `PyTree` containing all the model non-trainable state. This + allows passing learned state or controlling the initialization. If + both `params` and `state` are `None`, and `call_fn` takes a `state` + argument, then `init_fn` is called at build time to initialize the + non-trainable state of the model. + seed: Seed for random number generator. Optional. + """ + + def __init__( + self, + call_fn, + init_fn=None, + params=None, + state=None, + seed=None, + **kwargs, + ): + if backend.backend() != "jax": + raise ValueError( + "JaxLayer is only supported with the JAX backend. Current " + f"backend: {backend.backend()}" + ) + + if init_fn is None and params is None and state is None: + raise ValueError( + "`init_fn`, `params` and `state` cannot all be `None`." + ) + + super().__init__(**kwargs) + self.call_fn = call_fn + self.init_fn = init_fn + self.seed_generator = backend.random.SeedGenerator(seed) + self.tracked_params = self._create_variables(params, trainable=True) + self.tracked_state = self._create_variables(state, trainable=False) + if self.params is not None or self.state is not None: + self.built = True + + self.call_fn_arguments = self._validate_signature( + call_fn, + "call_fn", + {"params", "state", "rng", "inputs", "training"}, + {"inputs"}, + ) + self.has_state = "state" in self.call_fn_arguments + + if init_fn: + self.init_fn_arguments = self._validate_signature( + init_fn, "init_fn", {"rng", "inputs", "training"}, {"inputs"} + ) + + def _validate_signature(self, fn, fn_name, allowed, required): + fn_parameters = inspect.signature(fn).parameters + for parameter_name in required: + if parameter_name not in fn_parameters: + raise ValueError( + f"Missing required argument in `{fn_name}`: " + f"`{parameter_name}`" + ) + + parameter_names = [] + for parameter in fn_parameters.values(): + if parameter.name not in allowed: + raise ValueError( + f"Unsupported argument in `{fn_name}`: `{parameter.name}`, " + f"supported arguments are `{'`, `'.join(allowed)}`" + ) + parameter_names.append(parameter.name) + + return parameter_names + + @tracking.no_automatic_dependency_tracking + def _create_variables(self, values, trainable): + """Create a structure of variables from a structure of JAX arrays. + + `values` is traversed via JAX's `tree_map`. When a leaf is a JAX array + or a tensor-like object, a corresponding variable is created with it as + the initial value. The resulting structure of variables is assigned to + `self.params` or `self.state` depending on `trainable`. Then, a + flattened version of the variables is returned for tracking. + `self.params` or `self.state` are intentionally not tracked because + structures like `TrackedList` interfere with `jax.tree_utils`. + Note that leaf objects that are not JAX arrays and not tensor-like are + left intact as they are assumed to be configuration used by the model. + + Args: + values: the structure of values to traverse. + trainable: whether to create trainable variables. + + Returns: + flat list of variables initialized with `values` for tracking. + """ + + def create_variable(value): + if backend.is_tensor(value) or isinstance(value, np.ndarray): + variable = self.add_weight( + value.shape, initializer="zeros", trainable=trainable + ) + variable.assign(value) + return variable + elif isinstance(value, (np.generic, int, float)): + variable = self.add_weight( + (), initializer="zeros", trainable=trainable + ) + variable.assign(value) + return variable + else: + return value + + # Use JAX's tree_map as it understands registered classes. + variables = jax.tree_util.tree_map(create_variable, values) + + if trainable: + self.params = variables + else: + self.state = variables + + flat_variables, _ = jax.tree_util.tree_flatten(variables) + return flat_variables + + def _get_init_rng(self): + """ + Returns a JAX `PRNGKey` or structure of `PRNGKey`s to pass to `init_fn`. + + By default, this returns a single `PRNGKey` retrieved by calling + `self.seed_generator.next()`. Override this to return a different + structure. + + Returns: + a JAX `PRNGKey` or structure of `PRNGKey`s that will be passed as + the `rng` argument of `init_fn`. + """ + return self.seed_generator.next() + + def _get_call_rng(self, training): + """ + Returns a JAX `PRNGKey` or structure of `PRNGKey`s to pass to `call_fn`. + + By default, this returns a single `PRNGKey` retrieved by calling + `self.seed_generator.next()` when `training` is `True`, and `None` when + `training` is `False`. Override this to return a different structure or + to pass RNGs in inference mode too. + + Returns: + a JAX `PRNGKey` or structure of `PRNGKey`s that will be passed as + the `rng` argument of `call_fn`. + """ + if training: + return self.seed_generator.next() + else: + return None + + def build(self, input_shape): + if self.params is not None or self.state is not None: + return + + if jax_utils.is_in_jax_tracing_scope(): + # This exception is not actually shown, it is caught and a detailed + # warning about calling 'build' is printed. + raise ValueError("'JaxLayer' cannot be built in tracing scope") + + # Initialize `params` and `state` if needed by calling `init_fn`. + def create_input(shape): + shape = [d if d is not None else 1 for d in shape] + return jax.numpy.ones(shape) + + init_inputs = tree.map_shape_structure(create_input, input_shape) + init_args = [] + for argument_name in self.init_fn_arguments: + if argument_name == "rng": + init_args.append(self._get_init_rng()) + elif argument_name == "inputs": + init_args.append(init_inputs) + elif argument_name == "training": + init_args.append(True) + + init_result = self.init_fn(*init_args) + if self.has_state: + init_params, init_state = init_result + else: + init_params, init_state = init_result, None + + self.tracked_params = self._create_variables( + init_params, trainable=True + ) + self.tracked_state = self._create_variables(init_state, trainable=False) + self.built = True + + def call(self, inputs, training=False): + def unwrap_variable(variable): + return None if variable is None else variable.value + + call_args = [] + for argument_name in self.call_fn_arguments: + if argument_name == "params": + call_args.append( + jax.tree_util.tree_map(unwrap_variable, self.params) + ) + elif argument_name == "state": + call_args.append( + jax.tree_util.tree_map(unwrap_variable, self.state) + ) + elif argument_name == "rng": + call_args.append(self._get_call_rng(training)) + elif argument_name == "inputs": + call_args.append(inputs) + elif argument_name == "training": + call_args.append(training) + + def assign_state_to_variable(value, variable): + # This exists only to make debugging this error case easier. + if not hasattr(variable, "assign"): + raise ValueError( + "Structure mismatch: the structure of the state returned " + "by `call` does not match the structure of the state at " + "initialization time." + ) + variable.assign(value) + + if self.has_state: + predictions, new_state = self.call_fn(*call_args) + jax.tree_util.tree_map( + assign_state_to_variable, new_state, self.state + ) + return predictions + else: + return self.call_fn(*call_args) + + def get_config(self): + config = { + "call_fn": serialization_lib.serialize_keras_object(self.call_fn), + "init_fn": serialization_lib.serialize_keras_object(self.init_fn), + } + base_config = super().get_config() + return dict(list(base_config.items()) + list(config.items())) + + @classmethod + def from_config(cls, config): + call_fn = serialization_lib.deserialize_keras_object(config["call_fn"]) + init_fn = serialization_lib.deserialize_keras_object(config["init_fn"]) + config["call_fn"] = call_fn + config["init_fn"] = init_fn + return super().from_config(config) + + +@keras_export("keras.layers.FlaxLayer") +class FlaxLayer(JaxLayer): + """Keras Layer that wraps a [Flax](https://flax.readthedocs.io) module. + + This layer enables the use of Flax components in the form of + [`flax.linen.Module`]( + https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) + instances within Keras when using JAX as the backend for Keras. + + The module method to use for the forward pass can be specified via the + `method` argument and is `__call__` by default. This method must take the + following arguments with these exact names: + + - `self` if the method is bound to the module, which is the case for the + default of `__call__`, and `module` otherwise to pass the module. + - `inputs`: the inputs to the model, a JAX array or a `PyTree` of arrays. + - `training` *(optional)*: an argument specifying if we're in training mode + or inference mode, `True` is passed in training mode. + + `FlaxLayer` handles the non-trainable state of your model and required RNGs + automatically. Note that the `mutable` parameter of + [`flax.linen.Module.apply()`]( + https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html#flax.linen.apply) + is set to `DenyList(["params"])`, therefore making the assumption that all + the variables outside of the "params" collection are non-trainable weights. + + This example shows how to create a `FlaxLayer` from a Flax `Module` with + the default `__call__` method and no training argument: + + ```python + class MyFlaxModule(flax.linen.Module): + @flax.linen.compact + def __call__(self, inputs): + x = inputs + x = flax.linen.Conv(features=32, kernel_size=(3, 3))(x) + x = flax.linen.relu(x) + x = flax.linen.avg_pool(x, window_shape=(2, 2), strides=(2, 2)) + x = x.reshape((x.shape[0], -1)) # flatten + x = flax.linen.Dense(features=200)(x) + x = flax.linen.relu(x) + x = flax.linen.Dense(features=10)(x) + x = flax.linen.softmax(x) + return x + + flax_module = MyFlaxModule() + keras_layer = FlaxLayer(flax_module) + ``` + + This example shows how to wrap the module method to conform to the required + signature. This allows having multiple input arguments and a training + argument that has a different name and values. This additionally shows how + to use a function that is not bound to the module. + + ```python + class MyFlaxModule(flax.linen.Module): + @flax.linen.compact + def forward(self, input1, input2, deterministic): + ... + return outputs + + def my_flax_module_wrapper(module, inputs, training): + input1, input2 = inputs + return module.forward(input1, input2, not training) + + flax_module = MyFlaxModule() + keras_layer = FlaxLayer( + module=flax_module, + method=my_flax_module_wrapper, + ) + ``` + + Args: + module: An instance of `flax.linen.Module` or subclass. + method: The method to call the model. This is generally a method in the + `Module`. If not provided, the `__call__` method is used. `method` + can also be a function not defined in the `Module`, in which case it + must take the `Module` as the first argument. It is used for both + `Module.init` and `Module.apply`. Details are documented in the + `method` argument of [`flax.linen.Module.apply()`]( + https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html#flax.linen.apply). + variables: A `dict` containing all the variables of the module in the + same format as what is returned by [`flax.linen.Module.init()`]( + https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html#flax.linen.init). + It should contain a "params" key and, if applicable, other keys for + collections of variables for non-trainable state. This allows + passing trained parameters and learned non-trainable state or + controlling the initialization. If `None` is passed, the module's + `init` function is called at build time to initialize the variables + of the model. + """ + + def __init__( + self, + module, + method=None, + variables=None, + **kwargs, + ): + # Late import to only require Flax when this is used. + from flax.core import scope as flax_scope + + if backend.backend() != "jax": + raise ValueError( + "FlaxLayer is only supported with the JAX backend. Current " + f"backend: {backend.backend()}" + ) + + self.module = module + self.method = method + + apply_mutable = flax_scope.DenyList(["params"]) + + def apply_with_training(params, state, rng, inputs, training): + return self.module.apply( + self._params_and_state_to_variables(params, state), + inputs, + rngs=rng, + method=self.method, + mutable=apply_mutable, + training=training, + ) + + def apply_without_training(params, state, rng, inputs): + return self.module.apply( + self._params_and_state_to_variables(params, state), + inputs, + rngs=rng, + method=self.method, + mutable=apply_mutable, + ) + + def init_with_training(rng, inputs, training): + return self._variables_to_params_and_state( + self.module.init( + rng, + inputs, + method=self.method, + training=training, + ) + ) + + def init_without_training(rng, inputs): + return self._variables_to_params_and_state( + self.module.init( + rng, + inputs, + method=self.method, + ) + ) + + if ( + "training" + in inspect.signature(method or module.__call__).parameters + ): + call_fn, init_fn = apply_with_training, init_with_training + else: + call_fn, init_fn = apply_without_training, init_without_training + + params, state = self._variables_to_params_and_state(variables) + + super().__init__( + call_fn=call_fn, + init_fn=init_fn, + params=params, + state=state, + **kwargs, + ) + + def _params_and_state_to_variables(self, params, state): + if params: + if state: + return {**params, **state} + else: + return params + elif state: + return state + return {} + + def _variables_to_params_and_state(self, variables): + # neither params nor state + if variables is None: + return None, None + # state only + if "params" not in variables: + return {}, variables + # params only + if len(variables) == 1: + return variables, {} + # both, we need to split + params = {"params": variables["params"]} + state = {k: v for k, v in variables.items() if k != "params"} + return params, state + + def _get_init_rng(self): + return { + "params": self.seed_generator.next(), + "dropout": self.seed_generator.next(), + } + + def _get_call_rng(self, training): + if training: + return {"dropout": self.seed_generator.next()} + else: + return {} + + def get_config(self): + config_method = self.method + if ( + hasattr(self.method, "__self__") + and self.method.__self__ == self.module + ): + # A method bound to the module is serialized by name. + config_method = self.method.__name__ + config = { + "module": serialization_lib.serialize_keras_object(self.module), + "method": serialization_lib.serialize_keras_object(config_method), + } + base_config = super().get_config() + # call_fn and init_fn come from module, do not save them. + base_config.pop("call_fn") + base_config.pop("init_fn") + return dict(list(base_config.items()) + list(config.items())) + + @classmethod + def from_config(cls, config): + module = serialization_lib.deserialize_keras_object(config["module"]) + method = serialization_lib.deserialize_keras_object(config["method"]) + if isinstance(config["method"], str): + # Deserialize bound method from the module. + method = getattr(module, method) + config["module"] = module + config["method"] = method + return cls(**config) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/jax_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/jax_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d5375785f762e98ff0e0461591fcfc884a5b51f0 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/jax_utils.py @@ -0,0 +1,11 @@ +from keras.src import backend + + +def is_in_jax_tracing_scope(x=None): + if backend.backend() == "jax": + if x is None: + x = backend.numpy.ones(()) + for c in x.__class__.__mro__: + if c.__name__ == "Tracer" and c.__module__.startswith("jax"): + return True + return False diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/model_visualization.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/model_visualization.py new file mode 100644 index 0000000000000000000000000000000000000000..786a72b8b6f131c976222e1ba69bef14208c50a6 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/model_visualization.py @@ -0,0 +1,487 @@ +"""Utilities related to model visualization.""" + +import os +import sys + +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.utils import io_utils + +try: + import pydot +except ImportError: + # pydot_ng and pydotplus are older forks of pydot + # which may still be used by some users + try: + import pydot_ng as pydot + except ImportError: + try: + import pydotplus as pydot + except ImportError: + pydot = None + + +def check_pydot(): + """Returns True if PyDot is available.""" + return pydot is not None + + +def check_graphviz(): + """Returns True if both PyDot and Graphviz are available.""" + if not check_pydot(): + return False + try: + # Attempt to create an image of a blank graph + # to check the pydot/graphviz installation. + pydot.Dot.create(pydot.Dot()) + return True + except (OSError, pydot.PydotException): + return False + + +def add_edge(dot, src, dst): + if not dot.get_edge(src, dst): + edge = pydot.Edge(src, dst) + edge.set("penwidth", "2") + dot.add_edge(edge) + + +def get_layer_activation_name(layer): + if hasattr(layer.activation, "name"): + activation_name = layer.activation.name + elif hasattr(layer.activation, "__name__"): + activation_name = layer.activation.__name__ + else: + activation_name = str(layer.activation) + return activation_name + + +def make_layer_label(layer, **kwargs): + class_name = layer.__class__.__name__ + + show_layer_names = kwargs.pop("show_layer_names") + show_layer_activations = kwargs.pop("show_layer_activations") + show_dtype = kwargs.pop("show_dtype") + show_shapes = kwargs.pop("show_shapes") + show_trainable = kwargs.pop("show_trainable") + if kwargs: + raise ValueError(f"Invalid kwargs: {kwargs}") + + table = ( + '<' + ) + + colspan_max = sum(int(x) for x in (show_dtype, show_trainable)) + if show_shapes: + colspan_max += 2 + colspan = max(1, colspan_max) + + if show_layer_names: + table += ( + f'" + ) + else: + table += ( + f'" + ) + if ( + show_layer_activations + and hasattr(layer, "activation") + and layer.activation is not None + ): + table += ( + f'" + ) + + cols = [] + if show_shapes: + input_shape = None + output_shape = None + try: + input_shape = tree.map_structure(lambda x: x.shape, layer.input) + output_shape = tree.map_structure(lambda x: x.shape, layer.output) + except (ValueError, AttributeError): + pass + + def format_shape(shape): + if shape is not None: + if isinstance(shape, dict): + shape_str = ", ".join( + [f"{k}: {v}" for k, v in shape.items()] + ) + else: + shape_str = f"{shape}" + shape_str = shape_str.replace("}", "").replace("{", "") + else: + shape_str = "?" + return shape_str + + if class_name != "InputLayer": + cols.append( + ( + '" + ) + ) + cols.append( + ( + '" + ) + ) + if show_dtype: + dtype = None + try: + dtype = tree.map_structure(lambda x: x.dtype, layer.output) + except (ValueError, AttributeError): + pass + cols.append( + ( + '" + ) + ) + if show_trainable and hasattr(layer, "trainable") and layer.weights: + if layer.trainable: + cols.append( + ( + '" + ) + ) + else: + cols.append( + ( + '" + ) + ) + if cols: + colspan = len(cols) + else: + colspan = 1 + + if cols: + table += "" + "".join(cols) + "" + table += "
' + '' + f"{layer.name} ({class_name})" + "
' + '' + f"{class_name}" + "
' + '' + f"Activation: {get_layer_activation_name(layer)}" + "
' + f"Input shape: {format_shape(input_shape)}" + "' + f"Output shape: {format_shape(output_shape)}" + "' + f'Output dtype: {dtype or "?"}' + "' + '' + "Trainable' + '' + "Non-trainable
>" + return table + + +def make_node(layer, **kwargs): + node = pydot.Node(str(id(layer)), label=make_layer_label(layer, **kwargs)) + node.set("fontname", "Helvetica") + node.set("border", "0") + node.set("margin", "0") + return node + + +def remove_unused_edges(dot): + nodes = [v.get_name() for v in dot.get_nodes()] + for edge in dot.get_edges(): + if edge.get_destination() not in nodes: + dot.del_edge(edge.get_source(), edge.get_destination()) + return dot + + +@keras_export("keras.utils.model_to_dot") +def model_to_dot( + model, + show_shapes=False, + show_dtype=False, + show_layer_names=True, + rankdir="TB", + expand_nested=False, + dpi=200, + subgraph=False, + show_layer_activations=False, + show_trainable=False, + **kwargs, +): + """Convert a Keras model to dot format. + + Args: + model: A Keras model instance. + show_shapes: whether to display shape information. + show_dtype: whether to display layer dtypes. + show_layer_names: whether to display layer names. + rankdir: `rankdir` argument passed to PyDot, + a string specifying the format of the plot: `"TB"` + creates a vertical plot; `"LR"` creates a horizontal plot. + expand_nested: whether to expand nested Functional models + into clusters. + dpi: Image resolution in dots per inch. + subgraph: whether to return a `pydot.Cluster` instance. + show_layer_activations: Display layer activations (only for layers that + have an `activation` property). + show_trainable: whether to display if a layer is trainable. + + Returns: + A `pydot.Dot` instance representing the Keras model or + a `pydot.Cluster` instance representing nested model if + `subgraph=True`. + """ + from keras.src.ops.function import make_node_key + + if not model.built: + raise ValueError( + "This model has not yet been built. " + "Build the model first by calling `build()` or by calling " + "the model on a batch of data." + ) + + from keras.src.models import functional + from keras.src.models import sequential + + # from keras.src.layers import Wrapper + + if not check_pydot(): + raise ImportError( + "You must install pydot (`pip install pydot`) for " + "model_to_dot to work." + ) + + if subgraph: + dot = pydot.Cluster(style="dashed", graph_name=model.name) + dot.set("label", model.name) + dot.set("labeljust", "l") + else: + dot = pydot.Dot() + dot.set("rankdir", rankdir) + dot.set("concentrate", True) + dot.set("dpi", dpi) + dot.set("splines", "ortho") + dot.set_node_defaults(shape="record") + + if kwargs.pop("layer_range", None) is not None: + raise ValueError("Argument `layer_range` is no longer supported.") + if kwargs: + raise ValueError(f"Unrecognized keyword arguments: {kwargs}") + + kwargs = { + "show_layer_names": show_layer_names, + "show_layer_activations": show_layer_activations, + "show_dtype": show_dtype, + "show_shapes": show_shapes, + "show_trainable": show_trainable, + } + + if isinstance(model, sequential.Sequential): + layers = model.layers + elif not isinstance(model, functional.Functional): + # We treat subclassed models as a single node. + node = make_node(model, **kwargs) + dot.add_node(node) + return dot + else: + layers = model._operations + + # Create graph nodes. + sub_n_first_node = {} + sub_n_last_node = {} + for i, layer in enumerate(layers): + # Process nested functional models. + if expand_nested and isinstance(layer, functional.Functional): + submodel = model_to_dot( + layer, + show_shapes, + show_dtype, + show_layer_names, + rankdir, + expand_nested, + subgraph=True, + show_layer_activations=show_layer_activations, + show_trainable=show_trainable, + ) + # sub_n : submodel + sub_n_nodes = submodel.get_nodes() + sub_n_first_node[layer.name] = sub_n_nodes[0] + sub_n_last_node[layer.name] = sub_n_nodes[-1] + dot.add_subgraph(submodel) + + else: + node = make_node(layer, **kwargs) + dot.add_node(node) + + # Connect nodes with edges. + # Sequential case. + if isinstance(model, sequential.Sequential): + for i in range(len(layers) - 1): + inbound_layer_id = str(id(layers[i])) + layer_id = str(id(layers[i + 1])) + add_edge(dot, inbound_layer_id, layer_id) + return dot + + # Functional case. + for i, layer in enumerate(layers): + layer_id = str(id(layer)) + for i, node in enumerate(layer._inbound_nodes): + node_key = make_node_key(layer, i) + if node_key in model._nodes: + for parent_node in node.parent_nodes: + inbound_layer = parent_node.operation + inbound_layer_id = str(id(inbound_layer)) + if not expand_nested: + assert dot.get_node(inbound_layer_id) + assert dot.get_node(layer_id) + add_edge(dot, inbound_layer_id, layer_id) + else: + # if inbound_layer is not Functional + if not isinstance(inbound_layer, functional.Functional): + # if current layer is not Functional + if not isinstance(layer, functional.Functional): + assert dot.get_node(inbound_layer_id) + assert dot.get_node(layer_id) + add_edge(dot, inbound_layer_id, layer_id) + # if current layer is Functional + elif isinstance(layer, functional.Functional): + add_edge( + dot, + inbound_layer_id, + sub_n_first_node[layer.name].get_name(), + ) + # if inbound_layer is Functional + elif isinstance(inbound_layer, functional.Functional): + name = sub_n_last_node[ + inbound_layer.name + ].get_name() + if isinstance(layer, functional.Functional): + output_name = sub_n_first_node[ + layer.name + ].get_name() + add_edge(dot, name, output_name) + else: + add_edge(dot, name, layer_id) + return dot + + +@keras_export("keras.utils.plot_model") +def plot_model( + model, + to_file="model.png", + show_shapes=False, + show_dtype=False, + show_layer_names=False, + rankdir="TB", + expand_nested=False, + dpi=200, + show_layer_activations=False, + show_trainable=False, + **kwargs, +): + """Converts a Keras model to dot format and save to a file. + + Example: + + ```python + inputs = ... + outputs = ... + model = keras.Model(inputs=inputs, outputs=outputs) + + dot_img_file = '/tmp/model_1.png' + keras.utils.plot_model(model, to_file=dot_img_file, show_shapes=True) + ``` + + Args: + model: A Keras model instance + to_file: File name of the plot image. + show_shapes: whether to display shape information. + show_dtype: whether to display layer dtypes. + show_layer_names: whether to display layer names. + rankdir: `rankdir` argument passed to PyDot, + a string specifying the format of the plot: `"TB"` + creates a vertical plot; `"LR"` creates a horizontal plot. + expand_nested: whether to expand nested Functional models + into clusters. + dpi: Image resolution in dots per inch. + show_layer_activations: Display layer activations (only for layers that + have an `activation` property). + show_trainable: whether to display if a layer is trainable. + + Returns: + A Jupyter notebook Image object if Jupyter is installed. + This enables in-line display of the model plots in notebooks. + """ + + if not model.built: + raise ValueError( + "This model has not yet been built. " + "Build the model first by calling `build()` or by calling " + "the model on a batch of data." + ) + if not check_pydot(): + message = ( + "You must install pydot (`pip install pydot`) " + "for `plot_model` to work." + ) + if "IPython.core.magics.namespace" in sys.modules: + # We don't raise an exception here in order to avoid crashing + # notebook tests where graphviz is not available. + io_utils.print_msg(message) + return + else: + raise ImportError(message) + if not check_graphviz(): + message = ( + "You must install graphviz " + "(see instructions at https://graphviz.gitlab.io/download/) " + "for `plot_model` to work." + ) + if "IPython.core.magics.namespace" in sys.modules: + # We don't raise an exception here in order to avoid crashing + # notebook tests where graphviz is not available. + io_utils.print_msg(message) + return + else: + raise ImportError(message) + + if kwargs.pop("layer_range", None) is not None: + raise ValueError("Argument `layer_range` is no longer supported.") + if kwargs: + raise ValueError(f"Unrecognized keyword arguments: {kwargs}") + + dot = model_to_dot( + model, + show_shapes=show_shapes, + show_dtype=show_dtype, + show_layer_names=show_layer_names, + rankdir=rankdir, + expand_nested=expand_nested, + dpi=dpi, + show_layer_activations=show_layer_activations, + show_trainable=show_trainable, + ) + to_file = str(to_file) + if dot is None: + return + dot = remove_unused_edges(dot) + _, extension = os.path.splitext(to_file) + if not extension: + extension = "png" + else: + extension = extension[1:] + # Save image to disk. + dot.write(to_file, format=extension) + # Return the image as a Jupyter Image object, to be displayed in-line. + # Note that we cannot easily detect whether the code is running in a + # notebook, and thus we always return the Image if Jupyter is available. + if extension != "pdf": + try: + from IPython import display + + return display.Image(filename=to_file) + except ImportError: + pass diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/module_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/module_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..190bc8dc72febca4ae3f8246173b8b56dc340b6c --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/module_utils.py @@ -0,0 +1,61 @@ +import importlib + + +class LazyModule: + def __init__(self, name, pip_name=None, import_error_msg=None): + self.name = name + self.pip_name = pip_name or name + self.import_error_msg = import_error_msg or ( + f"This requires the {self.name} module. " + f"You can install it via `pip install {self.pip_name}`" + ) + self.module = None + self._available = None + + @property + def available(self): + if self._available is None: + try: + self.initialize() + self._available = True + except ImportError: + self._available = False + return self._available + + def initialize(self): + try: + self.module = importlib.import_module(self.name) + except ImportError: + raise ImportError(self.import_error_msg) + + def __getattr__(self, name): + if name == "_api_export_path": + raise AttributeError + if self.module is None: + self.initialize() + return getattr(self.module, name) + + def __repr__(self): + return f"LazyModule({self.name})" + + +tensorflow = LazyModule("tensorflow") +gfile = LazyModule("tensorflow.io.gfile", pip_name="tensorflow") +tensorflow_io = LazyModule("tensorflow_io") +scipy = LazyModule("scipy") +jax = LazyModule("jax") +torchvision = LazyModule("torchvision") +torch_xla = LazyModule( + "torch_xla", + import_error_msg=( + "This requires the torch_xla module. You can install it via " + "`pip install torch-xla`. Additionally, you may need to update " + "LD_LIBRARY_PATH if necessary. Torch XLA builds a shared library, " + "_XLAC.so, which needs to link to the version of Python it was built " + "with. Use the following command to update LD_LIBRARY_PATH: " + "`export LD_LIBRARY_PATH=/lib:$LD_LIBRARY_PATH`" + ), +) +optree = LazyModule("optree") +dmtree = LazyModule("tree") +tf2onnx = LazyModule("tf2onnx") diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/naming.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/naming.py new file mode 100644 index 0000000000000000000000000000000000000000..28107f0f30f4b383a279d2dc636d0c1ed2c5be45 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/naming.py @@ -0,0 +1,73 @@ +import collections +import re + +from keras.src.api_export import keras_export +from keras.src.backend.common import global_state + + +def auto_name(prefix): + prefix = to_snake_case(prefix) + return uniquify(prefix) + + +def uniquify(name): + object_name_uids = global_state.get_global_attribute( + "object_name_uids", + default=collections.defaultdict(int), + set_to_default=True, + ) + if name in object_name_uids: + unique_name = f"{name}_{object_name_uids[name]}" + else: + unique_name = name + object_name_uids[name] += 1 + return unique_name + + +def to_snake_case(name): + name = re.sub(r"\W+", "", name) + name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name) + name = re.sub("([a-z])([A-Z])", r"\1_\2", name).lower() + return name + + +@keras_export("keras.backend.get_uid") +def get_uid(prefix=""): + """Associates a string prefix with an integer counter. + + Args: + prefix: String prefix to index. + + Returns: + Unique integer ID. + + Example: + + >>> get_uid('dense') + 1 + >>> get_uid('dense') + 2 + """ + object_name_uids = global_state.get_global_attribute( + "object_name_uids", + default=collections.defaultdict(int), + set_to_default=True, + ) + object_name_uids[prefix] += 1 + return object_name_uids[prefix] + + +def reset_uids(): + global_state.set_global_attribute( + "object_name_uids", collections.defaultdict(int) + ) + + +def get_object_name(obj): + if hasattr(obj, "name"): # Most Keras objects. + return obj.name + elif hasattr(obj, "__name__"): # Function. + return to_snake_case(obj.__name__) + elif hasattr(obj, "__class__"): # Class instance. + return to_snake_case(obj.__class__.__name__) + return to_snake_case(str(obj)) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/numerical_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/numerical_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..dcd2cc422d6a4d4400905e470123a2e46832dec1 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/numerical_utils.py @@ -0,0 +1,225 @@ +import numpy as np + +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.utils import tf_utils + + +@keras_export("keras.utils.normalize") +def normalize(x, axis=-1, order=2): + """Normalizes an array. + + If the input is a NumPy array, a NumPy array will be returned. + If it's a backend tensor, a backend tensor will be returned. + + Args: + x: Array to normalize. + axis: axis along which to normalize. + order: Normalization order (e.g. `order=2` for L2 norm). + + Returns: + A normalized copy of the array. + """ + from keras.src import ops + + if isinstance(x, np.ndarray): + # NumPy input + norm = np.atleast_1d(np.linalg.norm(x, order, axis)) + norm[norm == 0] = 1 + + # axis cannot be `None` + axis = axis or -1 + return x / np.expand_dims(norm, axis) + + # Backend tensor input + return ops.nn.normalize(x, axis=axis, order=order) + + +@keras_export("keras.utils.to_categorical") +def to_categorical(x, num_classes=None): + """Converts a class vector (integers) to binary class matrix. + + E.g. for use with `categorical_crossentropy`. + + Args: + x: Array-like with class values to be converted into a matrix + (integers from 0 to `num_classes - 1`). + num_classes: Total number of classes. If `None`, this would be inferred + as `max(x) + 1`. Defaults to `None`. + + Returns: + A binary matrix representation of the input as a NumPy array. The class + axis is placed last. + + Example: + + >>> a = keras.utils.to_categorical([0, 1, 2, 3], num_classes=4) + >>> print(a) + [[1. 0. 0. 0.] + [0. 1. 0. 0.] + [0. 0. 1. 0.] + [0. 0. 0. 1.]] + + >>> b = np.array([.9, .04, .03, .03, + ... .3, .45, .15, .13, + ... .04, .01, .94, .05, + ... .12, .21, .5, .17], + ... shape=[4, 4]) + >>> loss = keras.ops.categorical_crossentropy(a, b) + >>> print(np.around(loss, 5)) + [0.10536 0.82807 0.1011 1.77196] + + >>> loss = keras.ops.categorical_crossentropy(a, a) + >>> print(np.around(loss, 5)) + [0. 0. 0. 0.] + """ + if backend.is_tensor(x): + input_shape = backend.core.shape(x) + # Shrink the last dimension if the shape is (..., 1). + if ( + input_shape is not None + and len(input_shape) > 1 + and input_shape[-1] == 1 + ): + newshape = tuple(input_shape[:-1]) + x = backend.numpy.reshape(x, newshape) + return backend.nn.one_hot(x, num_classes) + x = np.array(x, dtype="int64") + input_shape = x.shape + + # Shrink the last dimension if the shape is (..., 1). + if input_shape and input_shape[-1] == 1 and len(input_shape) > 1: + input_shape = tuple(input_shape[:-1]) + + x = x.reshape(-1) + if not num_classes: + num_classes = np.max(x) + 1 + batch_size = x.shape[0] + categorical = np.zeros((batch_size, num_classes)) + categorical[np.arange(batch_size), x] = 1 + output_shape = input_shape + (num_classes,) + categorical = np.reshape(categorical, output_shape) + return categorical + + +def encode_categorical_inputs( + inputs, + output_mode, + depth, + dtype, + sparse=False, + count_weights=None, + backend_module=None, +): + """Encodes categorical inputs according to output_mode. + + Args: + inputs: the inputs to encode. + output_mode: one of `"int"`, `"one_hot"`, `"multi_hot"`, or `"count"`. + depth: number of classes, this will be the last dimension of the output. + dtype: the dtype of the output, unless `count_weights` is not `None`. + sparse: whether the output should be sparse for backends supporting it. + count_weights: weights to apply if `output_mode` is `"count"`. + backend_module: the backend to use instead of the current one. + + Returns: the encoded inputs. + """ + backend_module = backend_module or backend + + if output_mode == "int": + return backend_module.cast(inputs, dtype=dtype) + + rank_of_inputs = len(backend_module.shape(inputs)) + + # In all cases, we should uprank scalar input to a single sample. + if rank_of_inputs == 0: + inputs = backend_module.numpy.expand_dims(inputs, -1) + rank_of_inputs = 1 + + if ( + backend_module.__name__.endswith("tensorflow") + and rank_of_inputs <= 2 + and output_mode in ("multi_hot", "count") + ): + # TF only fastpath. Uses bincount; faster. Doesn't work for rank 3+. + try: + return tf_utils.tf_encode_categorical_inputs( + inputs, + output_mode, + depth, + dtype=dtype, + sparse=sparse, + count_weights=count_weights, + ) + except ValueError: + pass + + if output_mode == "multi_hot": + return backend_module.nn.multi_hot( + inputs, depth, dtype=dtype, sparse=sparse + ) + elif output_mode == "one_hot": + input_shape = backend_module.core.shape(inputs) + # Shrink the last dimension if the shape is (..., 1). + if ( + input_shape is not None + and len(input_shape) > 1 + and input_shape[-1] == 1 + ): + newshape = tuple(input_shape[:-1]) + inputs = backend_module.numpy.reshape(inputs, newshape) + return backend_module.nn.one_hot( + inputs, depth, dtype=dtype, sparse=sparse + ) + elif output_mode == "count": + # We don't use `ops.bincount` because its output has a dynamic shape + # (last dimension is the highest value of `inputs`). We implement a + # narrower use case where `minlength` and `maxlength` (not supported by + # `ops.bincount`) are the same and static value: `depth`. We also don't + # need to support indices that are negative or greater than `depth`. + reduction_axis = 1 if len(inputs.shape) > 1 else 0 + + if count_weights is not None: + dtype = count_weights.dtype + one_hot_encoding = backend_module.nn.one_hot( + inputs, depth, dtype=dtype, sparse=sparse + ) + if count_weights is not None: + count_weights = backend_module.numpy.expand_dims(count_weights, -1) + one_hot_encoding = one_hot_encoding * count_weights + + outputs = backend_module.numpy.sum( + one_hot_encoding, + axis=reduction_axis, + ) + return outputs + + +def build_pos_neg_masks( + query_labels, + key_labels, + remove_diagonal=True, +): + from keras.src import ops + + if ops.ndim(query_labels) == 1: + query_labels = ops.reshape(query_labels, (-1, 1)) + + if ops.ndim(key_labels) == 1: + key_labels = ops.reshape(key_labels, (-1, 1)) + + positive_mask = ops.equal(query_labels, ops.transpose(key_labels)) + negative_mask = ops.logical_not(positive_mask) + + if remove_diagonal: + positive_mask = ops.logical_and( + positive_mask, + ~ops.eye( + ops.size(query_labels), + ops.size(key_labels), + k=0, + dtype="bool", + ), + ) + + return positive_mask, negative_mask diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/progbar.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/progbar.py new file mode 100644 index 0000000000000000000000000000000000000000..e2b61a041b02e7559c2345329d97febaf04f7236 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/progbar.py @@ -0,0 +1,269 @@ +import math +import os +import sys +import time + +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.utils import io_utils + + +@keras_export("keras.utils.Progbar") +class Progbar: + """Displays a progress bar. + + Args: + target: Total number of steps expected, None if unknown. + width: Progress bar width on screen. + verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose) + stateful_metrics: Iterable of string names of metrics that should *not* + be averaged over time. Metrics in this list will be displayed as-is. + All others will be averaged by the progbar before display. + interval: Minimum visual progress update interval (in seconds). + unit_name: Display name for step counts (usually "step" or "sample"). + """ + + def __init__( + self, + target, + width=20, + verbose=1, + interval=0.05, + stateful_metrics=None, + unit_name="step", + ): + self.target = target + self.width = width + self.verbose = verbose + self.interval = interval + self.unit_name = unit_name + if stateful_metrics: + self.stateful_metrics = set(stateful_metrics) + else: + self.stateful_metrics = set() + + self._dynamic_display = ( + (hasattr(sys.stdout, "isatty") and sys.stdout.isatty()) + or "ipykernel" in sys.modules + or "posix" in sys.modules + or "PYCHARM_HOSTED" in os.environ + ) + self._seen_so_far = 0 + # We use a dict + list to avoid garbage collection + # issues found in OrderedDict + self._values = {} + self._values_order = [] + self._start = time.time() + self._last_update = 0 + self._time_at_epoch_start = self._start + self._time_after_first_step = None + self._prev_total_width = 0 + + def update(self, current, values=None, finalize=None): + """Updates the progress bar. + + Args: + current: Index of current step. + values: List of tuples: `(name, value_for_last_step)`. If `name` is + in `stateful_metrics`, `value_for_last_step` will be displayed + as-is. Else, an average of the metric over time will be + displayed. + finalize: Whether this is the last update for the progress bar. If + `None`, defaults to `current >= self.target`. + """ + if finalize is None: + if self.target is None: + finalize = False + else: + finalize = current >= self.target + + values = values or [] + for k, v in values: + if k not in self._values_order: + self._values_order.append(k) + if k not in self.stateful_metrics: + # In the case that progress bar doesn't have a target value in + # the first epoch, both on_batch_end and on_epoch_end will be + # called, which will cause 'current' and 'self._seen_so_far' to + # have the same value. Force the minimal value to 1 here, + # otherwise stateful_metric will be 0s. + value_base = max(current - self._seen_so_far, 1) + if k not in self._values: + self._values[k] = [v * value_base, value_base] + else: + self._values[k][0] += v * value_base + self._values[k][1] += value_base + else: + # Stateful metrics output a numeric value. This representation + # means "take an average from a single value" but keeps the + # numeric formatting. + self._values[k] = [v, 1] + self._seen_so_far = current + + message = "" + special_char_len = 0 + now = time.time() + time_per_unit = self._estimate_step_duration(current, now) + + if self.verbose == 1: + if now - self._last_update < self.interval and not finalize: + return + + if self._dynamic_display: + message += "\b" * self._prev_total_width + message += "\r" + else: + message += "\n" + + if self.target is not None: + numdigits = int(math.log10(self.target)) + 1 + bar = ("%" + str(numdigits) + "d/%d") % (current, self.target) + bar = f"\x1b[1m{bar}\x1b[0m " + special_char_len += 8 + prog = float(current) / self.target + prog_width = int(self.width * prog) + + if prog_width > 0: + bar += "\33[32m" + "━" * prog_width + "\x1b[0m" + special_char_len += 9 + bar += "\33[37m" + "━" * (self.width - prog_width) + "\x1b[0m" + special_char_len += 9 + + else: + bar = "%7d/Unknown" % current + message += bar + + # Add ETA if applicable + if self.target is not None and not finalize: + eta = time_per_unit * (self.target - current) + if eta > 3600: + eta_format = "%d:%02d:%02d" % ( + eta // 3600, + (eta % 3600) // 60, + eta % 60, + ) + elif eta > 60: + eta_format = "%d:%02d" % (eta // 60, eta % 60) + else: + eta_format = "%ds" % eta + info = f" \x1b[1m{eta_format}\x1b[0m" + else: + # Time elapsed since start, in seconds + info = f" \x1b[1m{now - self._start:.0f}s\x1b[0m" + special_char_len += 8 + + # Add time/step + info += self._format_time(time_per_unit, self.unit_name) + + # Add metrics + for k in self._values_order: + info += f" - {k}:" + if isinstance(self._values[k], list): + avg = backend.convert_to_numpy( + backend.numpy.mean( + self._values[k][0] / max(1, self._values[k][1]) + ) + ) + avg = float(avg) + if abs(avg) > 1e-3: + info += f" {avg:.4f}" + else: + info += f" {avg:.4e}" + else: + info += f" {self._values[k]}" + message += info + + total_width = len(bar) + len(info) - special_char_len + if self._prev_total_width > total_width: + message += " " * (self._prev_total_width - total_width) + if finalize: + message += "\n" + + io_utils.print_msg(message, line_break=False) + self._prev_total_width = total_width + message = "" + + elif self.verbose == 2: + if finalize: + numdigits = int(math.log10(self.target)) + 1 + count = ("%" + str(numdigits) + "d/%d") % (current, self.target) + info = f"{count} - {now - self._start:.0f}s" + info += " -" + self._format_time(time_per_unit, self.unit_name) + for k in self._values_order: + info += f" - {k}:" + avg = backend.convert_to_numpy( + backend.numpy.mean( + self._values[k][0] / max(1, self._values[k][1]) + ) + ) + if avg > 1e-3: + info += f" {avg:.4f}" + else: + info += f" {avg:.4e}" + info += "\n" + message += info + io_utils.print_msg(message, line_break=False) + message = "" + + self._last_update = now + + def add(self, n, values=None): + self.update(self._seen_so_far + n, values) + + def _format_time(self, time_per_unit, unit_name): + """format a given duration to display to the user. + + Given the duration, this function formats it in either milliseconds + or seconds and displays the unit (i.e. ms/step or s/epoch). + + Args: + time_per_unit: the duration to display + unit_name: the name of the unit to display + + Returns: + A string with the correctly formatted duration and units + """ + formatted = "" + if time_per_unit >= 1 or time_per_unit == 0: + formatted += f" {time_per_unit:.0f}s/{unit_name}" + elif time_per_unit >= 1e-3: + formatted += f" {time_per_unit * 1000.0:.0f}ms/{unit_name}" + else: + formatted += f" {time_per_unit * 1000000.0:.0f}us/{unit_name}" + return formatted + + def _estimate_step_duration(self, current, now): + """Estimate the duration of a single step. + + Given the step number `current` and the corresponding time `now` this + function returns an estimate for how long a single step takes. If this + is called before one step has been completed (i.e. `current == 0`) then + zero is given as an estimate. The duration estimate ignores the duration + of the (assumed to be non-representative) first step for estimates when + more steps are available (i.e. `current>1`). + + Args: + current: Index of current step. + now: The current time. + + Returns: Estimate of the duration of a single step. + """ + if current: + # there are a few special scenarios here: + # 1) somebody is calling the progress bar without ever supplying + # step 1 + # 2) somebody is calling the progress bar and supplies step one + # multiple times, e.g. as part of a finalizing call + # in these cases, we just fall back to the simple calculation + if self._time_after_first_step is not None and current > 1: + time_per_unit = (now - self._time_after_first_step) / ( + current - 1 + ) + else: + time_per_unit = (now - self._start) / current + + if current == 1: + self._time_after_first_step = now + return time_per_unit + else: + return 0 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/python_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/python_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..312871675b802d1d1d853dcd8a1476707b7c8e64 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/python_utils.py @@ -0,0 +1,177 @@ +import binascii +import codecs +import marshal +import os +import types as python_types + + +def default(method): + """Decorates a method to detect overrides in subclasses.""" + method._is_default = True + return method + + +def is_default(method): + """Check if a method is decorated with the `default` wrapper.""" + return getattr(method, "_is_default", False) + + +def func_dump(func): + """Serializes a user-defined function. + + Args: + func: the function to serialize. + + Returns: + A tuple `(code, defaults, closure)`. + """ + if os.name == "nt": + raw_code = marshal.dumps(func.__code__).replace(b"\\", b"/") + code = codecs.encode(raw_code, "base64").decode("ascii") + else: + raw_code = marshal.dumps(func.__code__) + code = codecs.encode(raw_code, "base64").decode("ascii") + defaults = func.__defaults__ + if func.__closure__: + closure = tuple(c.cell_contents for c in func.__closure__) + else: + closure = None + return code, defaults, closure + + +def func_load(code, defaults=None, closure=None, globs=None): + """Deserializes a user defined function. + + Args: + code: bytecode of the function. + defaults: defaults of the function. + closure: closure of the function. + globs: dictionary of global objects. + + Returns: + A function object. + """ + if isinstance(code, (tuple, list)): # unpack previous dump + code, defaults, closure = code + if isinstance(defaults, list): + defaults = tuple(defaults) + + def ensure_value_to_cell(value): + """Ensures that a value is converted to a python cell object. + + Args: + value: Any value that needs to be casted to the cell type + + Returns: + A value wrapped as a cell object (see function "func_load") + """ + + def dummy_fn(): + value # just access it so it gets captured in .__closure__ + + cell_value = dummy_fn.__closure__[0] + if not isinstance(value, type(cell_value)): + return cell_value + return value + + if closure is not None: + closure = tuple(ensure_value_to_cell(_) for _ in closure) + try: + raw_code = codecs.decode(code.encode("ascii"), "base64") + except (UnicodeEncodeError, binascii.Error): + raw_code = code.encode("raw_unicode_escape") + code = marshal.loads(raw_code) + if globs is None: + globs = globals() + return python_types.FunctionType( + code, globs, name=code.co_name, argdefs=defaults, closure=closure + ) + + +def to_list(x): + """Normalizes a list/tensor into a list. + + If a tensor is passed, we return + a list of size 1 containing the tensor. + + Args: + x: target object to be normalized. + + Returns: + A list. + """ + if isinstance(x, list): + return x + return [x] + + +def remove_long_seq(maxlen, seq, label): + """Removes sequences that exceed the maximum length. + + Args: + maxlen: Int, maximum length of the output sequences. + seq: List of lists, where each sublist is a sequence. + label: List where each element is an integer. + + Returns: + new_seq, new_label: shortened lists for `seq` and `label`. + """ + new_seq, new_label = [], [] + for x, y in zip(seq, label): + if len(x) < maxlen: + new_seq.append(x) + new_label.append(y) + return new_seq, new_label + + +def removeprefix(x, prefix): + """Backport of `removeprefix` from PEP-616 (Python 3.9+)""" + + if len(prefix) > 0 and x.startswith(prefix): + return x[len(prefix) :] + else: + return x + + +def removesuffix(x, suffix): + """Backport of `removesuffix` from PEP-616 (Python 3.9+)""" + + if len(suffix) > 0 and x.endswith(suffix): + return x[: -len(suffix)] + else: + return x + + +def remove_by_id(lst, value): + """Remove a value from a list by id.""" + for i, v in enumerate(lst): + if id(v) == id(value): + del lst[i] + return + + +def pythonify_logs(logs): + """Flatten and convert log values to Python-native types. + + This function attempts to convert dict value by `float(value)` and skips + the conversion if it fails. + + Args: + logs: A dict containing log values. + + Returns: + A flattened dict with values converted to Python-native types if + possible. + """ + logs = logs or {} + result = {} + for key, value in sorted(logs.items()): + if isinstance(value, dict): + result.update(pythonify_logs(value)) + else: + try: + value = float(value) + except: + pass + result[key] = value + return result diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/rng_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/rng_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..15804d0e43e698f4e28f5b73bfcbdc3c5a76ce04 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/rng_utils.py @@ -0,0 +1,56 @@ +import random + +import numpy as np + +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.utils.module_utils import tensorflow as tf + + +@keras_export("keras.utils.set_random_seed") +def set_random_seed(seed): + """Sets all random seeds (Python, NumPy, and backend framework, e.g. TF). + + You can use this utility to make almost any Keras program fully + deterministic. Some limitations apply in cases where network communications + are involved (e.g. parameter server distribution), which creates additional + sources of randomness, or when certain non-deterministic cuDNN ops are + involved. + + Calling this utility is equivalent to the following: + + ```python + import random + random.seed(seed) + + import numpy as np + np.random.seed(seed) + + import tensorflow as tf # Only if TF is installed + tf.random.set_seed(seed) + + import torch # Only if the backend is 'torch' + torch.manual_seed(seed) + ``` + + Note that the TensorFlow seed is set even if you're not using TensorFlow + as your backend framework, since many workflows leverage `tf.data` + pipelines (which feature random shuffling). Likewise many workflows + might leverage NumPy APIs. + + Arguments: + seed: Integer, the random seed to use. + """ + if not isinstance(seed, int): + raise ValueError( + "Expected `seed` argument to be an integer. " + f"Received: seed={seed} (of type {type(seed)})" + ) + random.seed(seed) + np.random.seed(seed) + if tf.available: + tf.random.set_seed(seed) + if backend.backend() == "torch": + import torch + + torch.manual_seed(seed) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/sequence_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/sequence_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..cfb27ef25de6e29e4501dcc9aaeb636d21b0930f --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/sequence_utils.py @@ -0,0 +1,139 @@ +import numpy as np + +from keras.src.api_export import keras_export + + +@keras_export( + [ + "keras.utils.pad_sequences", + "keras.preprocessing.sequence.pad_sequences", + ] +) +def pad_sequences( + sequences, + maxlen=None, + dtype="int32", + padding="pre", + truncating="pre", + value=0.0, +): + """Pads sequences to the same length. + + This function transforms a list (of length `num_samples`) + of sequences (lists of integers) + into a 2D NumPy array of shape `(num_samples, num_timesteps)`. + `num_timesteps` is either the `maxlen` argument if provided, + or the length of the longest sequence in the list. + + Sequences that are shorter than `num_timesteps` + are padded with `value` until they are `num_timesteps` long. + + Sequences longer than `num_timesteps` are truncated + so that they fit the desired length. + + The position where padding or truncation happens is determined by + the arguments `padding` and `truncating`, respectively. + Pre-padding or removing values from the beginning of the sequence is the + default. + + >>> sequence = [[1], [2, 3], [4, 5, 6]] + >>> keras.utils.pad_sequences(sequence) + array([[0, 0, 1], + [0, 2, 3], + [4, 5, 6]], dtype=int32) + + >>> keras.utils.pad_sequences(sequence, value=-1) + array([[-1, -1, 1], + [-1, 2, 3], + [ 4, 5, 6]], dtype=int32) + + >>> keras.utils.pad_sequences(sequence, padding='post') + array([[1, 0, 0], + [2, 3, 0], + [4, 5, 6]], dtype=int32) + + >>> keras.utils.pad_sequences(sequence, maxlen=2) + array([[0, 1], + [2, 3], + [5, 6]], dtype=int32) + + Args: + sequences: List of sequences (each sequence is a list of integers). + maxlen: Optional Int, maximum length of all sequences. If not provided, + sequences will be padded to the length of the longest individual + sequence. + dtype: (Optional, defaults to `"int32"`). Type of the output sequences. + To pad sequences with variable length strings, you can use `object`. + padding: String, "pre" or "post" (optional, defaults to `"pre"`): + pad either before or after each sequence. + truncating: String, "pre" or "post" (optional, defaults to `"pre"`): + remove values from sequences larger than + `maxlen`, either at the beginning or at the end of the sequences. + value: Float or String, padding value. (Optional, defaults to `0.`) + + Returns: + NumPy array with shape `(len(sequences), maxlen)` + """ + if not hasattr(sequences, "__len__"): + raise ValueError("`sequences` must be iterable.") + num_samples = len(sequences) + + lengths = [] + sample_shape = () + flag = True + + # take the sample shape from the first non empty sequence + # checking for consistency in the main loop below. + + for x in sequences: + try: + lengths.append(len(x)) + if flag and len(x): + sample_shape = np.asarray(x).shape[1:] + flag = False + except TypeError as e: + raise ValueError( + "`sequences` must be a list of iterables. " + f"Found non-iterable: {str(x)}" + ) from e + + if maxlen is None: + maxlen = np.max(lengths) + + is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype( + dtype, np.str_ + ) + if isinstance(value, str) and dtype is not object and not is_dtype_str: + raise ValueError( + f"`dtype` {dtype} is not compatible with `value`'s type: " + f"{type(value)}\nYou should set `dtype=object` for variable length " + "strings." + ) + + x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype) + for idx, s in enumerate(sequences): + if not len(s): + continue # empty list/array was found + if truncating == "pre": + trunc = s[-maxlen:] + elif truncating == "post": + trunc = s[:maxlen] + else: + raise ValueError(f'Truncating type "{truncating}" not understood') + + # check `trunc` has expected shape + trunc = np.asarray(trunc, dtype=dtype) + if trunc.shape[1:] != sample_shape: + raise ValueError( + f"Shape of sample {trunc.shape[1:]} of sequence at " + f"position {idx} is different from expected shape " + f"{sample_shape}" + ) + + if padding == "post": + x[idx, : len(trunc)] = trunc + elif padding == "pre": + x[idx, -len(trunc) :] = trunc + else: + raise ValueError(f'Padding type "{padding}" not understood') + return x diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/summary_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/summary_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2e67b5c2f8410f9c5f2ca31440d33656a8f04cc2 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/summary_utils.py @@ -0,0 +1,443 @@ +import functools +import math +import re +import shutil + +import rich +import rich.console +import rich.markup + +# See https://github.com/keras-team/keras/issues/448 +# for below imports +import rich.table + +from keras.src import backend +from keras.src import tree +from keras.src.utils import dtype_utils +from keras.src.utils import io_utils + + +def count_params(weights): + shapes = [v.shape for v in weights] + return int(sum(math.prod(p) for p in shapes)) + + +@functools.lru_cache(512) +def _compute_memory_size(shape, dtype): + weight_counts = math.prod(shape) + dtype = backend.standardize_dtype(dtype) + per_param_size = dtype_utils.dtype_size(dtype) + return weight_counts * per_param_size + + +def weight_memory_size(weights): + """Compute the memory footprint for weights based on their dtypes. + + Args: + weights: An iterable contains the weights to compute weight size. + + Returns: + The total memory size (in Bytes) of the weights. + """ + unique_weights = {id(w): w for w in weights}.values() + total_memory_size = 0 + for w in unique_weights: + total_memory_size += _compute_memory_size(w.shape, w.dtype) + return total_memory_size / 8 + + +def readable_memory_size(weight_memory_size): + """Convert the weight memory size (Bytes) to a readable string.""" + units = ["B", "KB", "MB", "GB", "TB", "PB"] + scale = 1024 + for unit in units: + if weight_memory_size / scale < 1: + return "{:.2f} {}".format(weight_memory_size, unit) + else: + weight_memory_size /= scale + return "{:.2f} {}".format(weight_memory_size, units[-1]) + + +def highlight_number(x): + """Themes numbers in a summary using rich markup. + + We use a separate color for `None`s, e.g. in a layer shape. + """ + if x is None: + return f"[color(45)]{x}[/]" + else: + return f"[color(34)]{x}[/]" + + +def highlight_symbol(x): + """Themes keras symbols in a summary using rich markup.""" + return f"[color(33)]{x}[/]" + + +def bold_text(x, color=None): + """Bolds text using rich markup.""" + if color: + return f"[bold][color({color})]{x}[/][/]" + return f"[bold]{x}[/]" + + +def format_layer_shape(layer): + if not layer._inbound_nodes and not layer._build_shapes_dict: + return "?" + + def format_shape(shape): + highlighted = [highlight_number(x) for x in shape] + return "(" + ", ".join(highlighted) + ")" + + # There are 2 approaches to get output shapes: + # 1. Using `layer._inbound_nodes`, which is possible if the model is a + # Sequential or Functional. + # 2. Using `layer._build_shapes_dict`, which is possible if users manually + # build the layer. + if len(layer._inbound_nodes) > 0: + for i in range(len(layer._inbound_nodes)): + outputs = layer._inbound_nodes[i].output_tensors + output_shapes = tree.map_structure( + lambda x: format_shape(x.shape), outputs + ) + else: + try: + if hasattr(layer, "output_shape"): + output_shapes = format_shape(layer.output_shape) + else: + outputs = layer.compute_output_shape(**layer._build_shapes_dict) + output_shapes = tree.map_shape_structure( + lambda x: format_shape(x), outputs + ) + except NotImplementedError: + return "?" + if len(output_shapes) == 1: + return output_shapes[0] + out = str(output_shapes) + out = out.replace("'", "") + return out + + +def print_summary( + model, + line_length=None, + positions=None, + print_fn=None, + expand_nested=False, + show_trainable=False, + layer_range=None, +): + """Prints a summary of a model. + + Args: + model: Keras model instance. + line_length: Total length of printed lines + (e.g. set this to adapt the display to different + terminal window sizes). + positions: Relative or absolute positions of log elements in each line. + If not provided, defaults to `[0.3, 0.6, 0.70, 1.]`. + print_fn: Print function to use. + It will be called on each line of the summary. + You can set it to a custom function + in order to capture the string summary. + It defaults to `print` (prints to stdout). + expand_nested: Whether to expand the nested models. + If not provided, defaults to `False`. + show_trainable: Whether to show if a layer is trainable. + If not provided, defaults to `False`. + layer_range: List or tuple containing two strings, + the starting layer name and ending layer name (both inclusive), + indicating the range of layers to be printed in the summary. The + strings could also be regexes instead of an exact name. In this + case, the starting layer will be the first layer that matches + `layer_range[0]` and the ending layer will be the last element that + matches `layer_range[1]`. By default (`None`) all + layers in the model are included in the summary. + """ + from keras.src.models import Functional + from keras.src.models import Sequential + + if not print_fn and not io_utils.is_interactive_logging_enabled(): + print_fn = io_utils.print_msg + + if isinstance(model, Sequential): + sequential_like = True + layers = model.layers + elif not isinstance(model, Functional): + # We treat subclassed models as a simple sequence of layers, for logging + # purposes. + sequential_like = True + layers = model.layers + else: + layers = model._operations + sequential_like = True + nodes_by_depth = model._nodes_by_depth.values() + nodes = [] + for v in nodes_by_depth: + if (len(v) > 1) or ( + len(v) == 1 and len(tree.flatten(v[0].input_tensors)) > 1 + ): + # if the model has multiple nodes + # or if the nodes have multiple inbound_layers + # the model is no longer sequential + sequential_like = False + break + nodes += v + if sequential_like: + # search for shared layers + for layer in model.layers: + flag = False + for node in layer._inbound_nodes: + if node in nodes: + if flag: + sequential_like = False + break + else: + flag = True + if not sequential_like: + break + + if sequential_like: + default_line_length = 88 + positions = positions or [0.45, 0.80, 1.0] + # header names for the different log elements + header = ["Layer (type)", "Output Shape", "Param #"] + alignment = ["left", "left", "right"] + else: + default_line_length = 108 + positions = positions or [0.3, 0.56, 0.74, 1.0] + # header names for the different log elements + header = ["Layer (type)", "Output Shape", "Param #", "Connected to"] + alignment = ["left", "left", "right", "left"] + relevant_nodes = [] + for v in model._nodes_by_depth.values(): + relevant_nodes += v + + if show_trainable: + default_line_length += 12 + positions = [p * 0.90 for p in positions] + [1.0] + header.append("Trainable") + alignment.append("center") + + # Compute columns widths + default_line_length = min( + default_line_length, shutil.get_terminal_size().columns - 4 + ) + line_length = line_length or default_line_length + column_widths = [] + current = 0 + for pos in positions: + width = int(pos * line_length) - current + if width < 4: + raise ValueError("Insufficient console width to print summary.") + column_widths.append(width) + current += width + + # Render summary as a rich table. + columns = [] + # Right align parameter counts. + for i, name in enumerate(header): + column = rich.table.Column( + name, + justify=alignment[i], + width=column_widths[i], + ) + columns.append(column) + + table = rich.table.Table(*columns, width=line_length, show_lines=True) + + def get_connections(layer): + connections = "" + for node in layer._inbound_nodes: + if relevant_nodes and node not in relevant_nodes: + # node is not part of the current network + continue + for kt in node.input_tensors: + keras_history = kt._keras_history + inbound_layer = keras_history.operation + node_index = highlight_number(keras_history.node_index) + tensor_index = highlight_number(keras_history.tensor_index) + if connections: + connections += ", " + connections += ( + f"{inbound_layer.name}[{node_index}][{tensor_index}]" + ) + if not connections: + connections = "-" + return connections + + def get_layer_fields(layer, prefix=""): + output_shape = format_layer_shape(layer) + name = prefix + layer.name + cls_name = layer.__class__.__name__ + name = rich.markup.escape(name) + name += f" ({highlight_symbol(rich.markup.escape(cls_name))})" + + if not hasattr(layer, "built"): + params = highlight_number(0) + elif not layer.built: + params = highlight_number(0) + " (unbuilt)" + else: + params = highlight_number(f"{layer.count_params():,}") + + fields = [name, output_shape, params] + if not sequential_like: + fields.append(get_connections(layer)) + if show_trainable: + if hasattr(layer, "weights") and len(layer.weights) > 0: + fields.append( + bold_text("Y", color=34) + if layer.trainable + else bold_text("N", color=9) + ) + else: + fields.append(bold_text("-")) + return fields + + def print_layer(layer, nested_level=0): + if nested_level: + prefix = " " * nested_level + "└" + " " + else: + prefix = "" + + fields = get_layer_fields(layer, prefix=prefix) + + rows = [fields] + if expand_nested and hasattr(layer, "layers") and layer.layers: + nested_layers = layer.layers + nested_level += 1 + for i in range(len(nested_layers)): + rows.extend( + print_layer(nested_layers[i], nested_level=nested_level) + ) + return rows + + # Render all layers to the rich table. + layer_range = get_layer_index_bound_by_layer_name(layers, layer_range) + for layer in layers[layer_range[0] : layer_range[1]]: + for row in print_layer(layer): + table.add_row(*row) + + # After the table, append information about parameter count and size. + if hasattr(model, "_collected_trainable_weights"): + trainable_count = count_params(model._collected_trainable_weights) + trainable_memory_size = weight_memory_size( + model._collected_trainable_weights + ) + else: + trainable_count = count_params(model.trainable_weights) + trainable_memory_size = weight_memory_size(model.trainable_weights) + + non_trainable_count = count_params(model.non_trainable_weights) + non_trainable_memory_size = weight_memory_size(model.non_trainable_weights) + + if model.compiled and model.optimizer and model.optimizer.built: + optimizer_weight_count = count_params(model.optimizer.variables) + optimizer_memory_size = weight_memory_size(model.optimizer.variables) + optimizer_built = True + else: + optimizer_weight_count = 0 + optimizer_memory_size = 0 + optimizer_built = False + + total_count = trainable_count + non_trainable_count + optimizer_weight_count + total_memory_size = ( + trainable_memory_size + + non_trainable_memory_size + + optimizer_memory_size + ) + + # Create a rich console for printing. Capture for non-interactive logging. + if print_fn: + console = rich.console.Console( + highlight=False, force_terminal=False, color_system=None + ) + console.begin_capture() + else: + console = rich.console.Console(highlight=False) + + # Print the to the console. + console.print(bold_text(f'Model: "{rich.markup.escape(model.name)}"')) + console.print(table) + console.print( + bold_text(" Total params: ") + + highlight_number(f"{total_count:,}") + + f" ({readable_memory_size(total_memory_size)})" + ) + console.print( + bold_text(" Trainable params: ") + + highlight_number(f"{trainable_count:,}") + + f" ({readable_memory_size(trainable_memory_size)})" + ) + console.print( + bold_text(" Non-trainable params: ") + + highlight_number(f"{non_trainable_count:,}") + + f" ({readable_memory_size(non_trainable_memory_size)})" + ) + if optimizer_built: + console.print( + bold_text(" Optimizer params: ") + + highlight_number(f"{optimizer_weight_count:,}") + + f" ({readable_memory_size(optimizer_memory_size)})" + ) + + # Output captured summary for non-interactive logging. + if print_fn: + if print_fn is io_utils.print_msg: + print_fn(console.end_capture(), line_break=False) + else: + print_fn(console.end_capture()) + + +def get_layer_index_bound_by_layer_name(layers, layer_range=None): + """Get the layer indexes from the model based on layer names. + + The layer indexes can be used to slice the model into sub models for + display. + + Args: + model: `Model` instance. + layer_names: a list or tuple of 2 strings, the starting layer name and + ending layer name (both inclusive) for the result. All layers will + be included when `None` is provided. + + Returns: + The index value of layer based on its unique name (layer_names). + Output will be [first_layer_index, last_layer_index + 1]. + """ + if layer_range is not None: + if len(layer_range) != 2: + raise ValueError( + "layer_range must be a list or tuple of length 2. Received: " + f"layer_range = {layer_range} of length {len(layer_range)}" + ) + if not isinstance(layer_range[0], str) or not isinstance( + layer_range[1], str + ): + raise ValueError( + "layer_range should contain string type only. " + f"Received: {layer_range}" + ) + else: + return [0, len(layers)] + + lower_index = [ + idx + for idx, layer in enumerate(layers) + if re.match(layer_range[0], layer.name) + ] + upper_index = [ + idx + for idx, layer in enumerate(layers) + if re.match(layer_range[1], layer.name) + ] + + if not lower_index or not upper_index: + raise ValueError( + "Passed layer_names do not match the layer names in the model. " + f"Received: {layer_range}" + ) + + if min(lower_index) > max(upper_index): + return [min(upper_index), max(lower_index) + 1] + return [min(lower_index), max(upper_index) + 1] diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/text_dataset_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/text_dataset_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a7613481857060871be262b71ec7881c177936c6 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/text_dataset_utils.py @@ -0,0 +1,291 @@ +import numpy as np + +from keras.src.api_export import keras_export +from keras.src.utils import dataset_utils +from keras.src.utils.module_utils import tensorflow as tf + + +@keras_export( + [ + "keras.utils.text_dataset_from_directory", + "keras.preprocessing.text_dataset_from_directory", + ] +) +def text_dataset_from_directory( + directory, + labels="inferred", + label_mode="int", + class_names=None, + batch_size=32, + max_length=None, + shuffle=True, + seed=None, + validation_split=None, + subset=None, + follow_links=False, + verbose=True, +): + """Generates a `tf.data.Dataset` from text files in a directory. + + If your directory structure is: + + ``` + main_directory/ + ...class_a/ + ......a_text_1.txt + ......a_text_2.txt + ...class_b/ + ......b_text_1.txt + ......b_text_2.txt + ``` + + Then calling `text_dataset_from_directory(main_directory, + labels='inferred')` will return a `tf.data.Dataset` that yields batches of + texts from the subdirectories `class_a` and `class_b`, together with labels + 0 and 1 (0 corresponding to `class_a` and 1 corresponding to `class_b`). + + Only `.txt` files are supported at this time. + + Args: + directory: Directory where the data is located. + If `labels` is `"inferred"`, it should contain + subdirectories, each containing text files for a class. + Otherwise, the directory structure is ignored. + labels: Either `"inferred"` + (labels are generated from the directory structure), + `None` (no labels), + or a list/tuple of integer labels of the same size as the number of + text files found in the directory. Labels should be sorted according + to the alphanumeric order of the text file paths + (obtained via `os.walk(directory)` in Python). + label_mode: String describing the encoding of `labels`. Options are: + - `"int"`: means that the labels are encoded as integers + (e.g. for `sparse_categorical_crossentropy` loss). + - `"categorical"` means that the labels are + encoded as a categorical vector + (e.g. for `categorical_crossentropy` loss). + - `"binary"` means that the labels (there can be only 2) + are encoded as `float32` scalars with values 0 or 1 + (e.g. for `binary_crossentropy`). + - `None` (no labels). + class_names: Only valid if `"labels"` is `"inferred"`. + This is the explicit list of class names + (must match names of subdirectories). Used to control the order + of the classes (otherwise alphanumerical order is used). + batch_size: Size of the batches of data. + If `None`, the data will not be batched + (the dataset will yield individual samples). + Defaults to `32`. + max_length: Maximum size of a text string. Texts longer than this will + be truncated to `max_length`. + shuffle: Whether to shuffle the data. + If set to `False`, sorts the data in alphanumeric order. + Defaults to `True`. + seed: Optional random seed for shuffling and transformations. + validation_split: Optional float between 0 and 1, + fraction of data to reserve for validation. + subset: Subset of the data to return. + One of `"training"`, `"validation"` or `"both"`. + Only used if `validation_split` is set. + When `subset="both"`, the utility returns a tuple of two datasets + (the training and validation datasets respectively). + follow_links: Whether to visits subdirectories pointed to by symlinks. + Defaults to `False`. + verbose: Whether to display number information on classes and + number of files found. Defaults to `True`. + + Returns: + + A `tf.data.Dataset` object. + + - If `label_mode` is `None`, it yields `string` tensors of shape + `(batch_size,)`, containing the contents of a batch of text files. + - Otherwise, it yields a tuple `(texts, labels)`, where `texts` + has shape `(batch_size,)` and `labels` follows the format described + below. + + Rules regarding labels format: + + - if `label_mode` is `int`, the labels are an `int32` tensor of shape + `(batch_size,)`. + - if `label_mode` is `binary`, the labels are a `float32` tensor of + 1s and 0s of shape `(batch_size, 1)`. + - if `label_mode` is `categorical`, the labels are a `float32` tensor + of shape `(batch_size, num_classes)`, representing a one-hot + encoding of the class index. + """ + if labels not in ("inferred", None): + if not isinstance(labels, (list, tuple)): + raise ValueError( + "`labels` argument should be a list/tuple of integer labels, " + "of the same size as the number of text files in the target " + "directory. If you wish to infer the labels from the " + "subdirectory names in the target directory, " + 'pass `labels="inferred"`. ' + "If you wish to get a dataset that only contains text samples " + f"(no labels), pass `labels=None`. Received: labels={labels}" + ) + if class_names: + raise ValueError( + "You can only pass `class_names` if " + f'`labels="inferred"`. Received: labels={labels}, and ' + f"class_names={class_names}" + ) + if label_mode not in {"int", "categorical", "binary", None}: + raise ValueError( + '`label_mode` argument must be one of "int", ' + '"categorical", "binary", ' + f"or None. Received: label_mode={label_mode}" + ) + if labels is None or label_mode is None: + labels = None + label_mode = None + dataset_utils.check_validation_split_arg( + validation_split, subset, shuffle, seed + ) + + if seed is None: + seed = np.random.randint(1e6) + file_paths, labels, class_names = dataset_utils.index_directory( + directory, + labels, + formats=(".txt",), + class_names=class_names, + shuffle=shuffle, + seed=seed, + follow_links=follow_links, + verbose=verbose, + ) + + if label_mode == "binary" and len(class_names) != 2: + raise ValueError( + 'When passing `label_mode="binary"`, there must be exactly 2 ' + f"class_names. Received: class_names={class_names}" + ) + if batch_size is not None: + shuffle_buffer_size = batch_size * 8 + else: + shuffle_buffer_size = 1024 + + if subset == "both": + ( + file_paths_train, + labels_train, + ) = dataset_utils.get_training_or_validation_split( + file_paths, labels, validation_split, "training" + ) + ( + file_paths_val, + labels_val, + ) = dataset_utils.get_training_or_validation_split( + file_paths, labels, validation_split, "validation" + ) + if not file_paths_train: + raise ValueError( + f"No training text files found in directory {directory}. " + "Allowed format: .txt" + ) + if not file_paths_val: + raise ValueError( + f"No validation text files found in directory {directory}. " + "Allowed format: .txt" + ) + train_dataset = paths_and_labels_to_dataset( + file_paths=file_paths_train, + labels=labels_train, + label_mode=label_mode, + num_classes=len(class_names) if class_names else 0, + max_length=max_length, + shuffle=shuffle, + shuffle_buffer_size=shuffle_buffer_size, + seed=seed, + ) + val_dataset = paths_and_labels_to_dataset( + file_paths=file_paths_val, + labels=labels_val, + label_mode=label_mode, + num_classes=len(class_names) if class_names else 0, + max_length=max_length, + shuffle=False, + ) + + if batch_size is not None: + train_dataset = train_dataset.batch(batch_size) + val_dataset = val_dataset.batch(batch_size) + + train_dataset = train_dataset.prefetch(tf.data.AUTOTUNE) + val_dataset = val_dataset.prefetch(tf.data.AUTOTUNE) + + # Users may need to reference `class_names`. + train_dataset.class_names = class_names + val_dataset.class_names = class_names + dataset = [train_dataset, val_dataset] + else: + file_paths, labels = dataset_utils.get_training_or_validation_split( + file_paths, labels, validation_split, subset + ) + if not file_paths: + raise ValueError( + f"No text files found in directory {directory}. " + "Allowed format: .txt" + ) + dataset = paths_and_labels_to_dataset( + file_paths=file_paths, + labels=labels, + label_mode=label_mode, + num_classes=len(class_names) if class_names else 0, + max_length=max_length, + shuffle=shuffle, + shuffle_buffer_size=shuffle_buffer_size, + seed=seed, + ) + if batch_size is not None: + dataset = dataset.batch(batch_size) + dataset = dataset.prefetch(tf.data.AUTOTUNE) + + # Users may need to reference `class_names`. + dataset.class_names = class_names + return dataset + + +def paths_and_labels_to_dataset( + file_paths, + labels, + label_mode, + num_classes, + max_length, + shuffle=False, + shuffle_buffer_size=None, + seed=None, +): + """Constructs a dataset of text strings and labels.""" + path_ds = tf.data.Dataset.from_tensor_slices(file_paths) + if label_mode: + label_ds = dataset_utils.labels_to_dataset( + labels, label_mode, num_classes + ) + ds = tf.data.Dataset.zip((path_ds, label_ds)) + else: + ds = path_ds + + if shuffle: + ds = ds.shuffle(buffer_size=shuffle_buffer_size or 1024, seed=seed) + + if label_mode: + ds = ds.map( + lambda x, y: (path_to_string_content(x, max_length), y), + num_parallel_calls=tf.data.AUTOTUNE, + ) + else: + ds = ds.map( + lambda x: path_to_string_content(x, max_length), + num_parallel_calls=tf.data.AUTOTUNE, + ) + return ds + + +def path_to_string_content(path, max_length): + txt = tf.io.read_file(path) + if max_length is not None: + txt = tf.strings.substr(txt, 0, max_length) + return txt diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/tf_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/tf_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..485cc2c1362c6015783c9fdef9534d3d882718a8 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/tf_utils.py @@ -0,0 +1,157 @@ +from keras.src import backend +from keras.src.utils.module_utils import tensorflow as tf + + +def get_tensor_spec(t, dynamic_batch=False, name=None): + """Returns a `TensorSpec` given a single `Tensor` or `TensorSpec`.""" + if isinstance(t, tf.TypeSpec): + spec = t + elif isinstance(t, tf.__internal__.CompositeTensor): + # Check for ExtensionTypes + spec = t._type_spec + elif hasattr(t, "shape") and hasattr(t, "dtype"): + spec = tf.TensorSpec(shape=t.shape, dtype=t.dtype, name=name) + else: + return None # Allow non-Tensors to pass through. + + if not dynamic_batch: + return spec + + shape = spec.shape + if shape.rank is None or shape.rank == 0: + return spec + + shape_list = shape.as_list() + shape_list[0] = None + shape = tf.TensorShape(shape_list) + spec._shape = shape + return spec + + +def ensure_tensor(inputs, dtype=None): + """Ensures the input is a Tensor, SparseTensor or RaggedTensor.""" + if not isinstance(inputs, (tf.Tensor, tf.SparseTensor, tf.RaggedTensor)): + if backend.backend() == "torch" and backend.is_tensor(inputs): + # Plain `np.asarray()` conversion fails with PyTorch. + inputs = backend.convert_to_numpy(inputs) + inputs = tf.convert_to_tensor(inputs, dtype) + if dtype is not None and inputs.dtype != dtype: + inputs = tf.cast(inputs, dtype) + return inputs + + +def is_ragged_tensor(x): + return "ragged_tensor.RaggedTensor" in str(type(x)) + + +def sparse_bincount(inputs, depth, binary_output, dtype, count_weights=None): + """Apply binary or count encoding to an input and return a sparse tensor.""" + result = tf.sparse.bincount( + inputs, + weights=count_weights, + minlength=depth, + maxlength=depth, + axis=-1, + binary_output=binary_output, + ) + result = tf.cast(result, dtype) + if inputs.shape.rank == 1: + output_shape = (depth,) + else: + batch_size = tf.shape(result)[0] + output_shape = (batch_size, depth) + result = tf.SparseTensor( + indices=result.indices, values=result.values, dense_shape=output_shape + ) + return result + + +def dense_bincount(inputs, depth, binary_output, dtype, count_weights=None): + """Apply binary or count encoding to an input.""" + result = tf.math.bincount( + inputs, + weights=count_weights, + minlength=depth, + maxlength=depth, + dtype=dtype, + axis=-1, + binary_output=binary_output, + ) + if inputs.shape.rank == 1: + result.set_shape(tf.TensorShape((depth,))) + else: + batch_size = inputs.shape.as_list()[0] + result.set_shape(tf.TensorShape((batch_size, depth))) + return result + + +def expand_dims(inputs, axis): + """Expand dims on sparse, ragged, or dense tensors.""" + if isinstance(inputs, tf.SparseTensor): + return tf.sparse.expand_dims(inputs, axis) + return tf.expand_dims(inputs, axis) + + +def tf_encode_categorical_inputs( + inputs, + output_mode, + depth, + dtype="float32", + sparse=False, + count_weights=None, + idf_weights=None, +): + """Encodes categorical inputs according to output_mode. + + Faster method that relies on bincount. + """ + + if output_mode == "int": + return tf.identity(tf.cast(inputs, dtype)) + + original_shape = inputs.shape + # In all cases, we should uprank scalar input to a single sample. + if inputs.shape.rank == 0: + inputs = expand_dims(inputs, -1) + # One hot will unprank only if the final output dimension is not already 1. + if output_mode == "one_hot": + if inputs.shape[-1] != 1: + inputs = expand_dims(inputs, -1) + + if inputs.shape.rank > 2: + raise ValueError( + "When output_mode is not `'int'`, maximum supported output rank " + f"is 2. Received output_mode {output_mode} and input shape " + f"{original_shape}, " + f"which would result in output rank {inputs.shape.rank}." + ) + + binary_output = output_mode in ("multi_hot", "one_hot") + if sparse: + bincounts = sparse_bincount( + inputs, depth, binary_output, dtype, count_weights + ) + else: + bincounts = dense_bincount( + inputs, depth, binary_output, dtype, count_weights + ) + + bincounts = tf.cast(bincounts, dtype) + if output_mode != "tf_idf": + return bincounts + + if idf_weights is None: + raise ValueError( + "When output mode is `'tf_idf'`, idf_weights must be provided. " + f"Received: output_mode={output_mode} and idf_weights={idf_weights}" + ) + + if sparse: + value_weights = tf.gather(idf_weights, bincounts.indices[:, -1]) + return tf.SparseTensor( + bincounts.indices, + value_weights * bincounts.values, + bincounts.dense_shape, + ) + else: + return tf.multiply(bincounts, idf_weights) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/timeseries_dataset_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/timeseries_dataset_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bf0997b98bbe9946bbb051bdb2fb488af941e746 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/timeseries_dataset_utils.py @@ -0,0 +1,261 @@ +import numpy as np + +from keras.src.api_export import keras_export +from keras.src.utils.module_utils import tensorflow as tf + + +@keras_export( + [ + "keras.utils.timeseries_dataset_from_array", + "keras.preprocessing.timeseries_dataset_from_array", + ] +) +def timeseries_dataset_from_array( + data, + targets, + sequence_length, + sequence_stride=1, + sampling_rate=1, + batch_size=128, + shuffle=False, + seed=None, + start_index=None, + end_index=None, +): + """Creates a dataset of sliding windows over a timeseries provided as array. + + This function takes in a sequence of data-points gathered at + equal intervals, along with time series parameters such as + length of the sequences/windows, spacing between two sequence/windows, etc., + to produce batches of timeseries inputs and targets. + + Args: + data: Numpy array or eager tensor + containing consecutive data points (timesteps). + Axis 0 is expected to be the time dimension. + targets: Targets corresponding to timesteps in `data`. + `targets[i]` should be the target + corresponding to the window that starts at index `i` + (see example 2 below). + Pass `None` if you don't have target data (in this case the dataset + will only yield the input data). + sequence_length: Length of the output sequences + (in number of timesteps). + sequence_stride: Period between successive output sequences. + For stride `s`, output samples would + start at index `data[i]`, `data[i + s]`, `data[i + 2 * s]`, etc. + sampling_rate: Period between successive individual timesteps + within sequences. For rate `r`, timesteps + `data[i], data[i + r], ... data[i + sequence_length]` + are used for creating a sample sequence. + batch_size: Number of timeseries samples in each batch + (except maybe the last one). If `None`, the data will not be batched + (the dataset will yield individual samples). + shuffle: Whether to shuffle output samples, + or instead draw them in chronological order. + seed: Optional int; random seed for shuffling. + start_index: Optional int; data points earlier (exclusive) + than `start_index` will not be used + in the output sequences. This is useful to reserve part of the + data for test or validation. + end_index: Optional int; data points later (exclusive) than `end_index` + will not be used in the output sequences. + This is useful to reserve part of the data for test or validation. + + Returns: + + A `tf.data.Dataset` instance. If `targets` was passed, the dataset yields + tuple `(batch_of_sequences, batch_of_targets)`. If not, the dataset yields + only `batch_of_sequences`. + + Example 1: + + Consider indices `[0, 1, ... 98]`. + With `sequence_length=10, sampling_rate=2, sequence_stride=3`, + `shuffle=False`, the dataset will yield batches of sequences + composed of the following indices: + + ``` + First sequence: [0 2 4 6 8 10 12 14 16 18] + Second sequence: [3 5 7 9 11 13 15 17 19 21] + Third sequence: [6 8 10 12 14 16 18 20 22 24] + ... + Last sequence: [78 80 82 84 86 88 90 92 94 96] + ``` + + In this case the last 2 data points are discarded since no full sequence + can be generated to include them (the next sequence would have started + at index 81, and thus its last step would have gone over 98). + + Example 2: Temporal regression. + + Consider an array `data` of scalar values, of shape `(steps,)`. + To generate a dataset that uses the past 10 + timesteps to predict the next timestep, you would use: + + ```python + input_data = data[:-10] + targets = data[10:] + dataset = timeseries_dataset_from_array( + input_data, targets, sequence_length=10) + for batch in dataset: + inputs, targets = batch + assert np.array_equal(inputs[0], data[:10]) # First sequence: steps [0-9] + # Corresponding target: step 10 + assert np.array_equal(targets[0], data[10]) + break + ``` + + Example 3: Temporal regression for many-to-many architectures. + + Consider two arrays of scalar values `X` and `Y`, + both of shape `(100,)`. The resulting dataset should consist samples with + 20 timestamps each. The samples should not overlap. + To generate a dataset that uses the current timestamp + to predict the corresponding target timestep, you would use: + + ```python + X = np.arange(100) + Y = X*2 + + sample_length = 20 + input_dataset = timeseries_dataset_from_array( + X, None, sequence_length=sample_length, sequence_stride=sample_length) + target_dataset = timeseries_dataset_from_array( + Y, None, sequence_length=sample_length, sequence_stride=sample_length) + + for batch in zip(input_dataset, target_dataset): + inputs, targets = batch + assert np.array_equal(inputs[0], X[:sample_length]) + + # second sample equals output timestamps 20-40 + assert np.array_equal(targets[1], Y[sample_length:2*sample_length]) + break + ``` + """ + if start_index: + if start_index < 0: + raise ValueError( + "`start_index` must be 0 or greater. Received: " + f"start_index={start_index}" + ) + if start_index >= len(data): + raise ValueError( + "`start_index` must be lower than the length of the " + f"data. Received: start_index={start_index}, for data " + f"of length {len(data)}" + ) + if end_index: + if start_index and end_index <= start_index: + raise ValueError( + "`end_index` must be higher than `start_index`. " + f"Received: start_index={start_index}, and " + f"end_index={end_index} " + ) + if end_index >= len(data): + raise ValueError( + "`end_index` must be lower than the length of the " + f"data. Received: end_index={end_index}, for data of " + f"length {len(data)}" + ) + if end_index <= 0: + raise ValueError( + "`end_index` must be higher than 0. " + f"Received: end_index={end_index}" + ) + + # Validate strides + if sampling_rate <= 0: + raise ValueError( + "`sampling_rate` must be higher than 0. Received: " + f"sampling_rate={sampling_rate}" + ) + if sampling_rate >= len(data): + raise ValueError( + "`sampling_rate` must be lower than the length of the " + f"data. Received: sampling_rate={sampling_rate}, for data " + f"of length {len(data)}" + ) + if sequence_stride <= 0: + raise ValueError( + "`sequence_stride` must be higher than 0. Received: " + f"sequence_stride={sequence_stride}" + ) + if sequence_stride >= len(data): + raise ValueError( + "`sequence_stride` must be lower than the length of the " + f"data. Received: sequence_stride={sequence_stride}, for " + f"data of length {len(data)}" + ) + + if start_index is None: + start_index = 0 + if end_index is None: + end_index = len(data) + + # Determine the lowest dtype to store start positions (to lower memory + # usage). + num_seqs = end_index - start_index - (sequence_length - 1) * sampling_rate + if targets is not None: + num_seqs = min(num_seqs, len(targets)) + if num_seqs < 2147483647: + index_dtype = "int32" + else: + index_dtype = "int64" + + # Generate start positions + start_positions = np.arange(0, num_seqs, sequence_stride, dtype=index_dtype) + if shuffle: + if seed is None: + seed = np.random.randint(1e6) + rng = np.random.RandomState(seed) + rng.shuffle(start_positions) + + sequence_length = tf.cast(sequence_length, dtype=index_dtype) + sampling_rate = tf.cast(sampling_rate, dtype=index_dtype) + + positions_ds = tf.data.Dataset.from_tensors(start_positions).repeat() + + # For each initial window position, generates indices of the window elements + indices = tf.data.Dataset.zip( + (tf.data.Dataset.range(len(start_positions)), positions_ds) + ).map( + lambda i, positions: tf.range( + positions[i], + positions[i] + sequence_length * sampling_rate, + sampling_rate, + ), + num_parallel_calls=tf.data.AUTOTUNE, + ) + + dataset = sequences_from_indices(data, indices, start_index, end_index) + if targets is not None: + indices = tf.data.Dataset.zip( + (tf.data.Dataset.range(len(start_positions)), positions_ds) + ).map( + lambda i, positions: positions[i], + num_parallel_calls=tf.data.AUTOTUNE, + ) + target_ds = sequences_from_indices( + targets, indices, start_index, end_index + ) + dataset = tf.data.Dataset.zip((dataset, target_ds)) + dataset = dataset.prefetch(tf.data.AUTOTUNE) + if batch_size is not None: + if shuffle: + # Shuffle locally at each iteration + dataset = dataset.shuffle(buffer_size=batch_size * 8, seed=seed) + dataset = dataset.batch(batch_size) + else: + if shuffle: + dataset = dataset.shuffle(buffer_size=1024, seed=seed) + return dataset + + +def sequences_from_indices(array, indices_ds, start_index, end_index): + dataset = tf.data.Dataset.from_tensors(array[start_index:end_index]) + dataset = tf.data.Dataset.zip((dataset.repeat(), indices_ds)).map( + lambda steps, inds: tf.gather(steps, inds), + num_parallel_calls=tf.data.AUTOTUNE, + ) + return dataset diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/torch_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/torch_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ceed2425ea254df4e318025520f459a2c2569233 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/torch_utils.py @@ -0,0 +1,166 @@ +import io + +from packaging.version import parse + +from keras.src import backend +from keras.src.api_export import keras_export +from keras.src.layers import Layer +from keras.src.ops import convert_to_numpy +from keras.src.ops import convert_to_tensor + + +@keras_export("keras.layers.TorchModuleWrapper") +class TorchModuleWrapper(Layer): + """Torch module wrapper layer. + + `TorchModuleWrapper` is a wrapper class that can turn any + `torch.nn.Module` into a Keras layer, in particular by making its + parameters trackable by Keras. + + `TorchModuleWrapper` is only compatible with the PyTorch backend and + cannot be used with the TensorFlow or JAX backends. + + Args: + module: `torch.nn.Module` instance. If it's a `LazyModule` + instance, then its parameters must be initialized before + passing the instance to `TorchModuleWrapper` (e.g. by calling + it once). + name: The name of the layer (string). + + Example: + + Here's an example of how the `TorchModuleWrapper` can be used with vanilla + PyTorch modules. + + ```python + import torch + import torch.nn as nn + import torch.nn.functional as F + + import keras + from keras.layers import TorchModuleWrapper + + class Classifier(keras.Model): + def __init__(self, **kwargs): + super().__init__(**kwargs) + # Wrap `torch.nn.Module`s with `TorchModuleWrapper` + # if they contain parameters + self.conv1 = TorchModuleWrapper( + nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(3, 3)) + ) + self.conv2 = TorchModuleWrapper( + nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3)) + ) + self.pool = nn.MaxPool2d(kernel_size=(2, 2)) + self.flatten = nn.Flatten() + self.dropout = nn.Dropout(p=0.5) + self.fc = TorchModuleWrapper(nn.Linear(1600, 10)) + + def call(self, inputs): + x = F.relu(self.conv1(inputs)) + x = self.pool(x) + x = F.relu(self.conv2(x)) + x = self.pool(x) + x = self.flatten(x) + x = self.dropout(x) + x = self.fc(x) + return F.softmax(x, dim=1) + + + model = Classifier() + model.build((1, 28, 28)) + print("Output shape:", model(torch.ones(1, 1, 28, 28).to("cuda")).shape) + + model.compile( + loss="sparse_categorical_crossentropy", + optimizer="adam", + metrics=["accuracy"] + ) + model.fit(train_loader, epochs=5) + ``` + """ + + def __init__(self, module, name=None, **kwargs): + super().__init__(name=name, **kwargs) + import torch.nn as nn + + from keras.src.backend.torch.core import get_device + + if ( + isinstance(module, nn.modules.lazy.LazyModuleMixin) + and module.has_uninitialized_params() + ): + raise ValueError( + "LazyModules are not supported unless they " + "are already initialized. " + f"Received uninitialized LazyModule: module={module}" + ) + + self.module = module.to(get_device()) + self._track_module_parameters() + + def parameters(self, recurse=True): + return self.module.parameters(recurse=recurse) + + def _track_module_parameters(self): + for param in self.module.parameters(): + # The Variable will reuse the raw `param` + # and simply wrap it. + variable = backend.Variable( + initializer=param, trainable=param.requires_grad + ) + self._track_variable(variable) + self.built = True + + def call(self, *args, training=None, **kwargs): + if training is False: + self.eval() + else: + self.train() + return self.module(*args, **kwargs) + + def save_own_variables(self, store): + """Saves model's state from `state_dict`. + `model.parameters` excludes some of model's state like + `BatchNorm` mean and variance. So, use `state_dict` to obtain + all of model's state. + """ + state_dict = self.module.state_dict() + for key in state_dict.keys(): + store[key] = convert_to_numpy(state_dict[key]) + + def load_own_variables(self, store): + """Loads model's state via `state_dict`.""" + state_dict = {} + for key in store.keys(): + if isinstance(key, bytes): + key = key.decode() + state_dict[key] = convert_to_tensor(store[key]) + self.module.load_state_dict(state_dict) + + def get_config(self): + base_config = super().get_config() + import torch + + buffer = io.BytesIO() + torch.save(self.module, buffer) + config = {"module": buffer.getvalue()} + return {**base_config, **config} + + @classmethod + def from_config(cls, config): + import torch + + if "module" in config: + buffer = io.BytesIO(config["module"]) + config["module"] = torch.load(buffer) + return cls(**config) + + +def no_grad(orig_func): + import torch + + if parse(torch.__version__) >= parse("2.1.0"): + return torch.no_grad(orig_func) + else: + return orig_func diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..88c3e9ac0ba262df28ed96388c0934e04697759f --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py @@ -0,0 +1,241 @@ +import inspect +import os +import traceback +import types +from functools import wraps + +from keras.src import backend +from keras.src import tree +from keras.src.api_export import keras_export +from keras.src.backend.common import global_state + +_EXCLUDED_PATHS = ( + os.path.abspath(os.path.join(__file__, "..", "..")), + os.path.join("tensorflow", "python"), +) + + +@keras_export("keras.config.enable_traceback_filtering") +def enable_traceback_filtering(): + """Turn on traceback filtering. + + Raw Keras tracebacks (also known as stack traces) + involve many internal frames, which can be + challenging to read through, while not being actionable for end users. + By default, Keras filters internal frames in most exceptions that it + raises, to keep traceback short, readable, and focused on what's + actionable for you (your own code). + + See also `keras.config.disable_traceback_filtering()` and + `keras.config.is_traceback_filtering_enabled()`. + + If you have previously disabled traceback filtering via + `keras.config.disable_traceback_filtering()`, you can re-enable it via + `keras.config.enable_traceback_filtering()`. + """ + global_state.set_global_attribute("traceback_filtering", True) + + +@keras_export("keras.config.disable_traceback_filtering") +def disable_traceback_filtering(): + """Turn off traceback filtering. + + Raw Keras tracebacks (also known as stack traces) + involve many internal frames, which can be + challenging to read through, while not being actionable for end users. + By default, Keras filters internal frames in most exceptions that it + raises, to keep traceback short, readable, and focused on what's + actionable for you (your own code). + + See also `keras.config.enable_traceback_filtering()` and + `keras.config.is_traceback_filtering_enabled()`. + + If you have previously disabled traceback filtering via + `keras.config.disable_traceback_filtering()`, you can re-enable it via + `keras.config.enable_traceback_filtering()`. + """ + global_state.set_global_attribute("traceback_filtering", False) + + +@keras_export("keras.config.is_traceback_filtering_enabled") +def is_traceback_filtering_enabled(): + """Check if traceback filtering is enabled. + + Raw Keras tracebacks (also known as stack traces) + involve many internal frames, which can be + challenging to read through, while not being actionable for end users. + By default, Keras filters internal frames in most exceptions that it + raises, to keep traceback short, readable, and focused on what's + actionable for you (your own code). + + See also `keras.config.enable_traceback_filtering()` and + `keras.config.disable_traceback_filtering()`. + + If you have previously disabled traceback filtering via + `keras.config.disable_traceback_filtering()`, you can re-enable it via + `keras.config.enable_traceback_filtering()`. + + Returns: + Boolean, `True` if traceback filtering is enabled, + and `False` otherwise. + """ + return global_state.get_global_attribute("traceback_filtering", True) + + +def include_frame(fname): + for exclusion in _EXCLUDED_PATHS: + if exclusion in fname: + return False + return True + + +def _process_traceback_frames(tb): + """Iterate through traceback frames and return a new, filtered traceback.""" + last_tb = None + tb_list = list(traceback.walk_tb(tb)) + for f, line_no in reversed(tb_list): + if include_frame(f.f_code.co_filename): + last_tb = types.TracebackType(last_tb, f, f.f_lasti, line_no) + if last_tb is None and tb_list: + # If no frames were kept during filtering, create a new traceback + # from the outermost function. + f, line_no = tb_list[-1] + last_tb = types.TracebackType(last_tb, f, f.f_lasti, line_no) + return last_tb + + +def filter_traceback(fn): + """Filter out Keras-internal traceback frames in exceptions raised by fn.""" + + @wraps(fn) + def error_handler(*args, **kwargs): + if not is_traceback_filtering_enabled(): + return fn(*args, **kwargs) + + filtered_tb = None + try: + return fn(*args, **kwargs) + except Exception as e: + filtered_tb = _process_traceback_frames(e.__traceback__) + # To get the full stack trace, call: + # `keras.config.disable_traceback_filtering()` + raise e.with_traceback(filtered_tb) from None + finally: + del filtered_tb + + return error_handler + + +def inject_argument_info_in_traceback(fn, object_name=None): + """Add information about call argument values to an error message. + + Arguments: + fn: Function to wrap. Exceptions raised by the this function will be + re-raised with additional information added to the error message, + displaying the values of the different arguments that the function + was called with. + object_name: String, display name of the class/function being called, + e.g. `'layer "layer_name" (LayerClass)'`. + + Returns: + A wrapped version of `fn`. + """ + if backend.backend() == "tensorflow": + from tensorflow import errors as tf_errors + else: + tf_errors = None + + @wraps(fn) + def error_handler(*args, **kwargs): + if not is_traceback_filtering_enabled(): + return fn(*args, **kwargs) + + signature = None + bound_signature = None + try: + return fn(*args, **kwargs) + except Exception as e: + if hasattr(e, "_keras_call_info_injected"): + # Only inject info for the innermost failing call + raise e + signature = inspect.signature(fn) + try: + # The first argument is `self`, so filter it out + bound_signature = signature.bind(*args, **kwargs) + except TypeError: + # Likely unbindable arguments + raise e + + # Add argument context + arguments_context = [] + for arg in list(signature.parameters.values()): + if arg.name in bound_signature.arguments: + value = tree.map_structure( + format_argument_value, + bound_signature.arguments[arg.name], + ) + else: + value = arg.default + arguments_context.append(f" • {arg.name}={value}") + if arguments_context: + arguments_context = "\n".join(arguments_context) + # Get original error message and append information to it. + if tf_errors is not None and isinstance(e, tf_errors.OpError): + message = e.message + elif e.args: + # Canonically, the 1st argument in an exception is the error + # message. This works for all built-in Python exceptions. + message = e.args[0] + else: + message = "" + display_name = f"{object_name if object_name else fn.__name__}" + message = ( + f"Exception encountered when calling {display_name}.\n\n" + f"\x1b[1m{message}\x1b[0m\n\n" + f"Arguments received by {display_name}:\n" + f"{arguments_context}" + ) + + # Reraise exception, with added context + if tf_errors is not None and isinstance(e, tf_errors.OpError): + new_e = e.__class__(e.node_def, e.op, message, e.error_code) + else: + try: + # For standard exceptions such as ValueError, TypeError, + # etc. + new_e = e.__class__(message) + except TypeError: + # For any custom error that doesn't have a standard + # signature. + new_e = RuntimeError(message) + new_e._keras_call_info_injected = True + else: + new_e = e + raise new_e.with_traceback(e.__traceback__) from None + finally: + del signature + del bound_signature + + return error_handler + + +def format_argument_value(value): + if backend.is_tensor(value): + # Simplified representation for eager / graph tensors + # to keep messages readable + if backend.backend() == "tensorflow": + tensor_cls = "tf.Tensor" + elif backend.backend() == "jax": + tensor_cls = "jnp.ndarray" + elif backend.backend() == "torch": + tensor_cls = "torch.Tensor" + elif backend.backend() == "numpy": + tensor_cls = "np.ndarray" + else: + tensor_cls = "array" + + return ( + f"{tensor_cls}(shape={value.shape}, " + f"dtype={backend.standardize_dtype(value.dtype)})" + ) + return repr(value) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/tracking.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/tracking.py new file mode 100644 index 0000000000000000000000000000000000000000..a2a26679937ace68a1a8ddd16d02d4c43c2a571d --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/tracking.py @@ -0,0 +1,290 @@ +from functools import wraps + +from keras.src import tree +from keras.src.backend.common.global_state import get_global_attribute +from keras.src.backend.common.global_state import set_global_attribute +from keras.src.utils import python_utils + + +class DotNotTrackScope: + def __enter__(self): + self.original_value = is_tracking_enabled() + set_global_attribute("tracking_on", False) + + def __exit__(self, *args, **kwargs): + set_global_attribute("tracking_on", self.original_value) + + +def is_tracking_enabled(): + return get_global_attribute("tracking_on", True) + + +def no_automatic_dependency_tracking(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + with DotNotTrackScope(): + return fn(*args, **kwargs) + + return wrapper + + +class Tracker: + """Attribute tracker, used for e.g. Variable tracking. + + Monitors certain attribute types + and put them in appropriate lists in case of a match. + + Also passively tracks certain mutable collections + (dict, list) so that items added to them later + still get tracked. This is done by wrapping these + collections into an equivalent, tracking-aware object. + + Example: + + ```python + def __init__(self): + self.tracker = Tracker( + # Format: `name: (test_fn, store)` + { + "variables": + (lambda x: isinstance(x, Variable), self._variables), + "metrics": (lambda x: isinstance(x, Metric), self._metrics), + "layers": (lambda x: isinstance(x, Layer), self._layers), + } + ) + + def __setattr__(self, name, value): + if hasattr(self, "_tracker"): + value = self._tracker.track(value) + return super().__setattr__(name, value) + ``` + """ + + def __init__(self, config, exclusions=None): + self.config = config + self.stored_ids = {name: set() for name in self.config.keys()} + self.locked = False + self._lock_violation_msg = None + self.exclusions = exclusions or {} + + def track(self, attr): + if not is_tracking_enabled(): + return attr + + for store_name, (is_attr_type, _) in self.config.items(): + if is_attr_type(attr): + if store_name in self.exclusions: + for excl in self.exclusions[store_name]: + if self.is_in_store(excl, attr): + return attr + if not self.is_in_store(store_name, attr): + self.add_to_store(store_name, attr) + return attr + if isinstance(attr, tuple) and hasattr(attr, "_fields"): + # Named tuple case. + wrapped_attr = {} + for name, e in attr._asdict().items(): + wrapped_attr[name] = self.track(e) + return attr.__class__(**wrapped_attr) + if isinstance(attr, tuple): + wrapped_attr = [] + for e in attr: + wrapped_attr.append(self.track(e)) + return attr.__class__(wrapped_attr) + elif isinstance(attr, list): + return TrackedList(attr, self) + elif isinstance(attr, dict): + # TODO: OrderedDict? + return TrackedDict(attr, self) + elif isinstance(attr, set): + return TrackedSet(attr, self) + return attr + + def untrack(self, value): + for store_name in self.stored_ids.keys(): + if id(value) in self.stored_ids[store_name]: + self.stored_ids[store_name].remove(id(value)) + python_utils.remove_by_id(self.config[store_name][1], value) + + def lock(self, msg=None): + self.locked = True + if msg is not None: + self._lock_violation_msg = msg + + def unlock(self): + self.locked = False + + def add_to_store(self, store_name, value): + if self.locked: + raise ValueError(self._lock_violation_msg) + self.config[store_name][1].append(value) + self.stored_ids[store_name].add(id(value)) + + def is_in_store(self, store_name, value): + return id(value) in self.stored_ids[store_name] + + def replace_tracked_value(self, store_name, old_value, new_value): + if not self.is_in_store(store_name, old_value): + raise ValueError(f"Unknown value: {old_value}") + store_list = self.config[store_name][1] + index = store_list.index(old_value) + store_list[index] = new_value + self.stored_ids[store_name].remove(id(old_value)) + self.stored_ids[store_name].add(id(new_value)) + + +@tree.register_tree_node_class +class TrackedList(list): + def __init__(self, values=None, tracker=None): + self.tracker = tracker + if tracker and values: + values = [tracker.track(v) for v in values] + super().__init__(values or []) + + def append(self, value): + if self.tracker: + self.tracker.track(value) + super().append(value) + + def insert(self, index, value): + if self.tracker: + self.tracker.track(value) + super().insert(index, value) + + def extend(self, values): + if self.tracker: + values = [self.tracker.track(v) for v in values] + super().extend(values) + + def remove(self, value): + if self.tracker: + self.tracker.untrack(value) + try: + super().remove(value) + except ValueError: + python_utils.remove_by_id(self, value) + + def pop(self, index=-1): + if self.tracker: + value = self[index] + self.tracker.untrack(value) + return super().pop(index) + else: + return super().pop(index) + + def clear(self): + if self.tracker: + for value in self: + self.tracker.untrack(value) + super().clear() + + def __delitem__(self, index): + value = self[index] # Get value before removing + super().__delitem__(index) + if self.tracker: + self.tracker.untrack(value) + + def tree_flatten(self): + # For optree / dmtree + return (self, None) + + @classmethod + def tree_unflatten(cls, metadata, children): + # For optree / dmtree + return cls(children) + + +@tree.register_tree_node_class +class TrackedDict(dict): + def __init__(self, values=None, tracker=None): + self.tracker = tracker + if tracker and values: + values = {k: tracker.track(v) for k, v in values.items()} + super().__init__(values or []) + + def __setitem__(self, key, value): + if self.tracker: + self.tracker.track(value) + super().__setitem__(key, value) + + def update(self, mapping): + if self.tracker: + mapping = {k: self.tracker.track(v) for k, v in mapping.items()} + super().update(mapping) + + def pop(self, key, default=None): + if self.tracker: + value = super().pop(key, default) + if value is not default: + self.tracker.untrack(value) + return value + else: + return super().pop(key, default) + + def popitem(self): + key, value = super().popitem() + if self.tracker: + self.tracker.untrack(value) + return key, value + + def clear(self): + if self.tracker: + for value in self.values(): + self.tracker.untrack(value) + super().clear() + + def tree_flatten(self): + # For optree / dmtree + keys = sorted(list(self.keys())) + values = [self[k] for k in keys] + return values, keys, keys + + @classmethod + def tree_unflatten(cls, keys, values): + # For optree / dmtree + return cls(zip(keys, values)) + + +@tree.register_tree_node_class +class TrackedSet(set): + def __init__(self, values=None, tracker=None): + self.tracker = tracker + if tracker and values: + values = {tracker.track(v) for v in values} + super().__init__(values or []) + + def add(self, value): + if self.tracker: + self.tracker.track(value) + super().add(value) + + def update(self, values): + if self.tracker: + values = [self.tracker.track(v) for v in values] + super().update(values) + + def remove(self, value): + if self.tracker: + self.tracker.untrack(value) + super().remove(value) + + def pop(self): + value = super().pop() + if self.tracker: + self.tracker.untrack(value) + return value + + def clear(self): + if self.tracker: + for value in self: + self.tracker.untrack(value) + super().clear() + + def tree_flatten(self): + # For optree / dmtree + return (self, None) + + @classmethod + def tree_unflatten(cls, metadata, children): + # For optree / dmtree + return cls(children) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/version.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/version.py new file mode 100644 index 0000000000000000000000000000000000000000..db523fbaa13c4dd7652726accb9218daed566aa1 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/version.py @@ -0,0 +1,9 @@ +from keras.src.api_export import keras_export + +# Unique source of truth for the version number. +__version__ = "3.8.0" + + +@keras_export("keras.version") +def version(): + return __version__ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..04524f857be5ca3a91b04c24cea993c2424876a8 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__init__.py @@ -0,0 +1,2 @@ +from keras.src.visualization import draw_bounding_boxes +from keras.src.visualization import plot_image_gallery diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbf94f1fe49eaa408b14a8782f5f0072e2407d53 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/draw_bounding_boxes.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/draw_bounding_boxes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ecf1c5812f86d948dc19b9eab030ebe8c670fdaf Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/draw_bounding_boxes.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/draw_segmentation_masks.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/draw_segmentation_masks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8260648fce9e8057072ed4d3b9f29cc2ef03a1fd Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/draw_segmentation_masks.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/plot_bounding_box_gallery.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/plot_bounding_box_gallery.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a6f4417353721e33629040b953f2c311714a521 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/plot_bounding_box_gallery.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/plot_image_gallery.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/plot_image_gallery.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f8e052e7cf1c3184e18a8b45717f6e21b38c8f8 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/plot_image_gallery.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/plot_segmentation_mask_gallery.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/plot_segmentation_mask_gallery.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d35ff659602c47893ef2a41f43118009b37b4fbe Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/plot_segmentation_mask_gallery.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/draw_bounding_boxes.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/draw_bounding_boxes.py new file mode 100644 index 0000000000000000000000000000000000000000..e5e93920d2e4021ddf1f1986e95839f424095156 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/draw_bounding_boxes.py @@ -0,0 +1,177 @@ +import numpy as np + +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501 + convert_format, +) + +try: + import cv2 +except ImportError: + cv2 = None + + +@keras_export("keras.visualization.draw_bounding_boxes") +def draw_bounding_boxes( + images, + bounding_boxes, + bounding_box_format, + class_mapping=None, + color=(128, 128, 128), + line_thickness=2, + text_thickness=1, + font_scale=1.0, + data_format=None, +): + """Draws bounding boxes on images. + + This function draws bounding boxes on a batch of images. It supports + different bounding box formats and can optionally display class labels + and confidences. + + Args: + images: A batch of images as a 4D tensor or NumPy array. Shape should be + `(batch_size, height, width, channels)`. + bounding_boxes: A dictionary containing bounding box data. Should have + the following keys: + - `boxes`: A tensor or array of shape `(batch_size, num_boxes, 4)` + containing the bounding box coordinates in the specified format. + - `labels`: A tensor or array of shape `(batch_size, num_boxes)` + containing the class labels for each bounding box. + - `confidences` (Optional): A tensor or array of shape + `(batch_size, num_boxes)` containing the confidence scores for + each bounding box. + bounding_box_format: A string specifying the format of the bounding + boxes. Refer [keras-io](TODO) + class_mapping: A dictionary mapping class IDs (integers) to class labels + (strings). Used to display class labels next to the bounding boxes. + Defaults to None (no labels displayed). + color: A tuple or list representing the RGB color of the bounding boxes. + For example, `(255, 0, 0)` for red. Defaults to `(128, 128, 128)`. + line_thickness: An integer specifying the thickness of the bounding box + lines. Defaults to `2`. + text_thickness: An integer specifying the thickness of the text labels. + Defaults to `1`. + font_scale: A float specifying the scale of the font used for text + labels. Defaults to `1.0`. + data_format: A string, either `"channels_last"` or `"channels_first"`, + specifying the order of dimensions in the input images. Defaults to + the `image_data_format` value found in your Keras config file at + `~/.keras/keras.json`. If you never set it, then it will be + "channels_last". + + Returns: + A NumPy array of the annotated images with the bounding boxes drawn. + The array will have the same shape as the input `images`. + + Raises: + ValueError: If `images` is not a 4D tensor/array, if `bounding_boxes` is + not a dictionary, or if `bounding_boxes` does not contain `"boxes"` + and `"labels"` keys. + TypeError: If `bounding_boxes` is not a dictionary. + ImportError: If `cv2` (OpenCV) is not installed. + """ + + if cv2 is None: + raise ImportError( + "The `draw_bounding_boxes` function requires the `cv2` package " + " (OpenCV). Please install it with `pip install opencv-python`." + ) + + class_mapping = class_mapping or {} + text_thickness = ( + text_thickness or line_thickness + ) # Default text_thickness if not provided. + data_format = data_format or backend.image_data_format() + images_shape = ops.shape(images) + if len(images_shape) != 4: + raise ValueError( + "`images` must be batched 4D tensor. " + f"Received: images.shape={images_shape}" + ) + if not isinstance(bounding_boxes, dict): + raise TypeError( + "`bounding_boxes` should be a dict. " + f"Received: bounding_boxes={bounding_boxes} of type " + f"{type(bounding_boxes)}" + ) + if "boxes" not in bounding_boxes or "labels" not in bounding_boxes: + raise ValueError( + "`bounding_boxes` should be a dict containing 'boxes' and " + f"'labels' keys. Received: bounding_boxes={bounding_boxes}" + ) + if data_format == "channels_last": + h_axis = -3 + w_axis = -2 + else: + h_axis = -2 + w_axis = -1 + height = images_shape[h_axis] + width = images_shape[w_axis] + bounding_boxes = bounding_boxes.copy() + bounding_boxes = convert_format( + bounding_boxes, bounding_box_format, "xyxy", height, width + ) + + # To numpy array + images = ops.convert_to_numpy(images).astype("uint8") + boxes = ops.convert_to_numpy(bounding_boxes["boxes"]) + labels = ops.convert_to_numpy(bounding_boxes["labels"]) + if "confidences" in bounding_boxes: + confidences = ops.convert_to_numpy(bounding_boxes["confidences"]) + else: + confidences = None + + result = [] + batch_size = images.shape[0] + for i in range(batch_size): + _image = images[i] + _box = boxes[i] + _class = labels[i] + for box_i in range(_box.shape[0]): + x1, y1, x2, y2 = _box[box_i].astype("int32") + c = _class[box_i].astype("int32") + if c == -1: + continue + x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) + c = int(c) + # Draw bounding box + cv2.rectangle(_image, (x1, y1), (x2, y2), color, line_thickness) + + if c in class_mapping: + label = class_mapping[c] + if confidences is not None: + conf = confidences[i][box_i] + label = f"{label} | {conf:.2f}" + + font_x1, font_y1 = _find_text_location( + x1, y1, font_scale, text_thickness + ) + cv2.putText( + img=_image, + text=label, + org=(font_x1, font_y1), + fontFace=cv2.FONT_HERSHEY_SIMPLEX, + fontScale=font_scale, + color=color, + thickness=text_thickness, + ) + result.append(_image) + return np.stack(result, axis=0) + + +def _find_text_location(x, y, font_scale, thickness): + font_height = int(font_scale * 12) + target_y = y - 8 + if target_y - (2 * font_height) > 0: + return x, y - 8 + + line_offset = thickness + static_offset = 3 + + return ( + x + static_offset, + y + (2 * font_height) + line_offset + static_offset, + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/draw_segmentation_masks.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/draw_segmentation_masks.py new file mode 100644 index 0000000000000000000000000000000000000000..99689778d995f738dab03657cd57a31d8fb54cda --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/draw_segmentation_masks.py @@ -0,0 +1,109 @@ +import numpy as np + +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export + + +@keras_export("keras.visualization.draw_segmentation_masks") +def draw_segmentation_masks( + images, + segmentation_masks, + num_classes=None, + color_mapping=None, + alpha=0.8, + blend=True, + ignore_index=-1, + data_format=None, +): + """Draws segmentation masks on images. + + The function overlays segmentation masks on the input images. + The masks are blended with the images using the specified alpha value. + + Args: + images: A batch of images as a 4D tensor or NumPy array. Shape + should be (batch_size, height, width, channels). + segmentation_masks: A batch of segmentation masks as a 3D or 4D tensor + or NumPy array. Shape should be (batch_size, height, width) or + (batch_size, height, width, 1). The values represent class indices + starting from 1 up to `num_classes`. Class 0 is reserved for + the background and will be ignored if `ignore_index` is not 0. + num_classes: The number of segmentation classes. If `None`, it is + inferred from the maximum value in `segmentation_masks`. + color_mapping: A dictionary mapping class indices to RGB colors. + If `None`, a default color palette is generated. The keys should be + integers starting from 1 up to `num_classes`. + alpha: The opacity of the segmentation masks. Must be in the range + `[0, 1]`. + blend: Whether to blend the masks with the input image using the + `alpha` value. If `False`, the masks are drawn directly on the + images without blending. Defaults to `True`. + ignore_index: The class index to ignore. Mask pixels with this value + will not be drawn. Defaults to -1. + data_format: Image data format, either `"channels_last"` or + `"channels_first"`. Defaults to the `image_data_format` value found + in your Keras config file at `~/.keras/keras.json`. If you never + set it, then it will be `"channels_last"`. + + Returns: + A NumPy array of the images with the segmentation masks overlaid. + + Raises: + ValueError: If the input `images` is not a 4D tensor or NumPy array. + TypeError: If the input `segmentation_masks` is not an integer type. + """ + data_format = data_format or backend.image_data_format() + images_shape = ops.shape(images) + if len(images_shape) != 4: + raise ValueError( + "`images` must be batched 4D tensor. " + f"Received: images.shape={images_shape}" + ) + if data_format == "channels_first": + images = ops.transpose(images, (0, 2, 3, 1)) + segmentation_masks = ops.transpose(segmentation_masks, (0, 2, 3, 1)) + images = ops.convert_to_tensor(images, dtype="float32") + segmentation_masks = ops.convert_to_tensor(segmentation_masks) + + if not backend.is_int_dtype(segmentation_masks.dtype): + dtype = backend.standardize_dtype(segmentation_masks.dtype) + raise TypeError( + "`segmentation_masks` must be in integer dtype. " + f"Received: segmentation_masks.dtype={dtype}" + ) + + # Infer num_classes + if num_classes is None: + num_classes = int(ops.convert_to_numpy(ops.max(segmentation_masks))) + if color_mapping is None: + colors = _generate_color_palette(num_classes) + else: + colors = [color_mapping[i] for i in range(num_classes)] + valid_masks = ops.not_equal(segmentation_masks, ignore_index) + valid_masks = ops.squeeze(valid_masks, axis=-1) + segmentation_masks = ops.one_hot(segmentation_masks, num_classes) + segmentation_masks = segmentation_masks[..., 0, :] + segmentation_masks = ops.convert_to_numpy(segmentation_masks) + + # Replace class with color + masks = segmentation_masks + masks = np.transpose(masks, axes=(3, 0, 1, 2)).astype("bool") + images_to_draw = ops.convert_to_numpy(images).copy() + for mask, color in zip(masks, colors): + color = np.array(color, dtype=images_to_draw.dtype) + images_to_draw[mask, ...] = color[None, :] + images_to_draw = ops.convert_to_tensor(images_to_draw) + outputs = ops.cast(images_to_draw, dtype="float32") + + if blend: + outputs = images * (1 - alpha) + outputs * alpha + outputs = ops.where(valid_masks[..., None], outputs, images) + outputs = ops.cast(outputs, dtype="uint8") + outputs = ops.convert_to_numpy(outputs) + return outputs + + +def _generate_color_palette(num_classes: int): + palette = np.array([2**25 - 1, 2**15 - 1, 2**21 - 1]) + return [((i * palette) % 255).tolist() for i in range(num_classes)] diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/plot_bounding_box_gallery.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/plot_bounding_box_gallery.py new file mode 100644 index 0000000000000000000000000000000000000000..3fe3242f718cc42812c914b0ca24e59c26534773 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/plot_bounding_box_gallery.py @@ -0,0 +1,165 @@ +import functools + +import numpy as np + +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.visualization.draw_bounding_boxes import draw_bounding_boxes +from keras.src.visualization.plot_image_gallery import plot_image_gallery + +try: + from matplotlib import patches # For legend patches +except ImportError: + patches = None + + +@keras_export("keras.visualization.plot_bounding_box_gallery") +def plot_bounding_box_gallery( + images, + bounding_box_format, + y_true=None, + y_pred=None, + value_range=(0, 255), + true_color=(0, 188, 212), + pred_color=(255, 235, 59), + line_thickness=2, + font_scale=1.0, + text_thickness=None, + class_mapping=None, + ground_truth_mapping=None, + prediction_mapping=None, + legend=False, + legend_handles=None, + rows=None, + cols=None, + data_format=None, + **kwargs, +): + """Plots a gallery of images with bounding boxes. + + This function can display both ground truth and predicted bounding boxes on + a set of images. It supports various bounding box formats and can include + class labels and a legend. + + Args: + images: A 4D tensor or NumPy array of images. Shape should be + `(batch_size, height, width, channels)`. + bounding_box_format: The format of the bounding boxes. + Refer [keras-io](TODO) + y_true: A dictionary containing the ground truth bounding boxes and + labels. Should have the same structure as the `bounding_boxes` + argument in `keras.visualization.draw_bounding_boxes`. + Defaults to `None`. + y_pred: A dictionary containing the predicted bounding boxes and labels. + Should have the same structure as `y_true`. Defaults to `None`. + value_range: A tuple specifying the value range of the images + (e.g., `(0, 255)` or `(0, 1)`). Defaults to `(0, 255)`. + true_color: A tuple of three integers representing the RGB color for the + ground truth bounding boxes. Defaults to `(0, 188, 212)`. + pred_color: A tuple of three integers representing the RGB color for the + predicted bounding boxes. Defaults to `(255, 235, 59)`. + line_thickness: The thickness of the bounding box lines. Defaults to 2. + font_scale: The scale of the font used for labels. Defaults to 1.0. + text_thickness: The thickness of the bounding box text. Defaults to + `line_thickness`. + class_mapping: A dictionary mapping class IDs to class names. Used f + or both ground truth and predicted boxes if `ground_truth_mapping` + and `prediction_mapping` are not provided. Defaults to `None`. + ground_truth_mapping: A dictionary mapping class IDs to class names + specifically for ground truth boxes. Overrides `class_mapping` + for ground truth. Defaults to `None`. + prediction_mapping: A dictionary mapping class IDs to class names + specifically for predicted boxes. Overrides `class_mapping` for + predictions. Defaults to `None`. + legend: A boolean indicating whether to show a legend. + Defaults to `False`. + legend_handles: A list of matplotlib `Patch` objects to use for the + legend. If this is provided, the `legend` argument will be ignored. + Defaults to `None`. + rows: The number of rows in the image gallery. Required if the images + are not batched. Defaults to `None`. + cols: The number of columns in the image gallery. Required if the images + are not batched. Defaults to `None`. + data_format: The image data format `"channels_last"` or + `"channels_first"`. Defaults to the Keras backend data format. + kwargs: Additional keyword arguments to be passed to + `keras.visualization.plot_image_gallery`. + + Returns: + The output of `keras.visualization.plot_image_gallery`. + + Raises: + ValueError: If `images` is not a 4D tensor/array or if both `legend` a + nd `legend_handles` are specified. + ImportError: if matplotlib is not installed + """ + if patches is None: + raise ImportError( + "The `plot_bounding_box_gallery` function requires the " + " `matplotlib` package. Please install it with " + " `pip install matplotlib`." + ) + + prediction_mapping = prediction_mapping or class_mapping + ground_truth_mapping = ground_truth_mapping or class_mapping + data_format = data_format or backend.image_data_format() + images_shape = ops.shape(images) + if len(images_shape) != 4: + raise ValueError( + "`images` must be batched 4D tensor. " + f"Received: images.shape={images_shape}" + ) + if data_format == "channels_first": # Ensure correct data format + images = ops.transpose(images, (0, 2, 3, 1)) + plotted_images = ops.convert_to_numpy(images) + + draw_fn = functools.partial( + draw_bounding_boxes, + bounding_box_format=bounding_box_format, + line_thickness=line_thickness, + text_thickness=text_thickness, + font_scale=font_scale, + ) + + if y_true is not None: + plotted_images = draw_fn( + plotted_images, + y_true, + color=true_color, + class_mapping=ground_truth_mapping, + ) + + if y_pred is not None: + plotted_images = draw_fn( + plotted_images, + y_pred, + color=pred_color, + class_mapping=prediction_mapping, + ) + + if legend: + if legend_handles: + raise ValueError( + "Only pass `legend` OR `legend_handles` to " + "`keras.visualization.plot_bounding_box_gallery()`." + ) + legend_handles = [ + patches.Patch( + color=np.array(true_color) / 255.0, # Normalize color + label="Ground Truth", + ), + patches.Patch( + color=np.array(pred_color) / 255.0, # Normalize color + label="Prediction", + ), + ] + + return plot_image_gallery( + plotted_images, + value_range=value_range, + legend_handles=legend_handles, + rows=rows, + cols=cols, + **kwargs, + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/plot_image_gallery.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/plot_image_gallery.py new file mode 100644 index 0000000000000000000000000000000000000000..902872be5387a6b061cc8291e2f909e0f1a6de70 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/plot_image_gallery.py @@ -0,0 +1,165 @@ +import math + +import numpy as np + +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501 + BaseImagePreprocessingLayer, +) + +try: + import matplotlib.pyplot as plt +except ImportError: + plt = None + + +def _extract_image_batch(images, num_images, batch_size): + """Extracts a batch of images for plotting. + + Args: + images: The 4D tensor or NumPy array of images. + num_images: The number of images to extract. + batch_size: The original batch size of the images. + + Returns: + A 4D tensor or NumPy array containing the extracted images. + + Raises: + ValueError: If `images` is not a 4D tensor/array. + """ + + if len(ops.shape(images)) != 4: + raise ValueError( + "`plot_images_gallery()` requires you to " + "batch your `np.array` samples together." + ) + num_samples = min(num_images, batch_size) + sample = images[:num_samples, ...] + + return sample + + +@keras_export("keras.visualization.plot_image_gallery") +def plot_image_gallery( + images, + rows=None, + cols=None, + value_range=(0, 255), + scale=2, + path=None, + show=None, + transparent=True, + dpi=60, + legend_handles=None, + data_format=None, +): + """Displays a gallery of images. + + Args: + images: A 4D tensor or NumPy array of images. Shape should be + `(batch_size, height, width, channels)`. + value_range: A tuple specifying the value range of the images + (e.g., `(0, 255)` or `(0, 1)`). Defaults to `(0, 255)`. + rows: The number of rows in the gallery. If `None`, it's calculated + based on the number of images and `cols`. Defaults to `None`. + cols: The number of columns in the gallery. If `None`, it's calculated + based on the number of images and `rows`. Defaults to `None`. + scale: A float controlling the size of the displayed images. The images + are scaled by this factor. Defaults to `2`. + path: The path to save the generated gallery image. If `None`, the + image is displayed using `plt.show()`. Defaults to `None`. + show: Whether to display the image using `plt.show()`. If `True`, the + image is displayed. If `False`, the image is not displayed. + Ignored if `path` is not `None`. Defaults to `True` if `path` + is `None`, `False` otherwise. + transparent: A boolean, whether to save the figure with a transparent + background. Defaults to `True`. + dpi: The DPI (dots per inch) for saving the figure. Defaults to 60. + legend_handles: A list of matplotlib `Patch` objects to use as legend + handles. Defaults to `None`. + data_format: The image data format `"channels_last"` or + `"channels_first"`. Defaults to the Keras backend data format. + + Raises: + ValueError: If both `path` and `show` are set to non-`None` values or if + `images` is not a 4D tensor or array. + ImportError: if matplotlib is not installed. + """ + if plt is None: + raise ImportError( + "The `plot_image_gallery` function requires the `matplotlib` " + "package. Please install it with `pip install matplotlib`." + ) + + if path is not None and show: + raise ValueError( + "plot_gallery() expects either `path` to be set, or `show` " + "to be true." + ) + + show = show if show is not None else (path is None) + data_format = data_format or backend.image_data_format() + + batch_size = ops.shape(images)[0] if len(ops.shape(images)) == 4 else 1 + + rows = rows or int(math.ceil(math.sqrt(batch_size))) + cols = cols or int(math.ceil(batch_size // rows)) + num_images = rows * cols + + images = _extract_image_batch(images, num_images, batch_size) + if ( + data_format == "channels_first" + ): # Ensure correct data format for plotting + images = ops.transpose(images, (0, 2, 3, 1)) + # Generate subplots + fig, axes = plt.subplots( + nrows=rows, + ncols=cols, + figsize=(cols * scale, rows * scale), + frameon=False, + layout="tight", + squeeze=True, + sharex="row", + sharey="col", + ) + fig.subplots_adjust(wspace=0, hspace=0) + + if isinstance(axes, np.ndarray) and len(axes.shape) == 1: + expand_axis = 0 if rows == 1 else -1 + axes = np.expand_dims(axes, expand_axis) + + if legend_handles is not None: + fig.legend(handles=legend_handles, loc="lower center") + + images = BaseImagePreprocessingLayer()._transform_value_range( + images=images, original_range=value_range, target_range=(0, 255) + ) + + images = ops.convert_to_numpy(images) + if data_format == "channels_first": + images = images.transpose(0, 2, 3, 1) + + for row in range(rows): + for col in range(cols): + index = row * cols + col + current_axis = ( + axes[row, col] if isinstance(axes, np.ndarray) else axes + ) + current_axis.imshow(images[index].astype("uint8")) + current_axis.margins(x=0, y=0) + current_axis.axis("off") + + if path is not None: + plt.savefig( + fname=path, + pad_inches=0, + bbox_inches="tight", + transparent=transparent, + dpi=dpi, + ) + plt.close() + elif show: + plt.show() + plt.close() diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/plot_segmentation_mask_gallery.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/plot_segmentation_mask_gallery.py new file mode 100644 index 0000000000000000000000000000000000000000..1edf603ddf72ec9824d3893c6280e5742ebe7c1c --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/plot_segmentation_mask_gallery.py @@ -0,0 +1,121 @@ +import functools + +import numpy as np + +from keras.src import backend +from keras.src import ops +from keras.src.api_export import keras_export +from keras.src.visualization.draw_segmentation_masks import ( + draw_segmentation_masks, +) +from keras.src.visualization.plot_image_gallery import plot_image_gallery + + +@keras_export("keras.visualization.plot_segmentation_mask_gallery") +def plot_segmentation_mask_gallery( + images, + num_classes, + value_range=(0, 255), + y_true=None, + y_pred=None, + color_mapping=None, + blend=True, + alpha=0.8, + ignore_index=-1, + data_format=None, + **kwargs, +): + """Plots a gallery of images with corresponding segmentation masks. + + Args: + images: A 4D tensor or NumPy array of images. Shape should be + `(batch_size, height, width, channels)`. + num_classes: The number of segmentation classes. Class indices should + start from `1`. Class `0` will be treated as background and + ignored if `ignore_index` is not 0. + value_range: A tuple specifying the value range of the images + (e.g., `(0, 255)` or `(0, 1)`). Defaults to `(0, 255)`. + y_true: A 3D/4D tensor or NumPy array representing the ground truth + segmentation masks. Shape should be `(batch_size, height, width)` or + `(batch_size, height, width, 1)`. Defaults to `None`. + y_pred: A 3D/4D tensor or NumPy array representing the predicted + segmentation masks. Shape should be the same as `y_true`. + Defaults to `None`. + color_mapping: A dictionary mapping class indices to RGB colors. + If `None`, a default color palette is used. Class indices start + from `1`. Defaults to `None`. + blend: Whether to blend the masks with the input image using the + `alpha` value. If `False`, the masks are drawn directly on the + images without blending. Defaults to `True`. + alpha: The opacity of the segmentation masks (a float between 0 and 1). + Defaults to `0.8`. + ignore_index: The class index to ignore when drawing masks. + Defaults to `-1`. + data_format: The image data format `"channels_last"` or + `"channels_first"`. Defaults to the Keras backend data format. + kwargs: Additional keyword arguments to be passed to + `keras.visualization.plot_image_gallery`. + + Returns: + The output of `keras.visualization.plot_image_gallery`. + + Raises: + ValueError: If `images` is not a 4D tensor/array. + """ + data_format = data_format or backend.image_data_format() + image_shape = ops.shape(images) + if len(image_shape) != 4: + raise ValueError( + "`images` must be batched 4D tensor. " + f"Received: images.shape={image_shape}" + ) + if data_format == "channels_first": + images = ops.transpose(images, (0, 2, 3, 1)) + + batch_size = image_shape[0] if len(image_shape) == 4 else 1 + + rows = batch_size + cols = 1 + + if y_true is not None: + cols += 1 + + if y_pred is not None: + cols += 1 + + images_np = ops.convert_to_numpy(images) + + draw_masks_fn = functools.partial( + draw_segmentation_masks, + num_classes=num_classes, + color_mapping=color_mapping, + alpha=alpha, + ignore_index=ignore_index, + blend=blend, + ) + + if y_true is not None: + if data_format == "channels_first": + y_true = ops.transpose(y_true, (0, 2, 3, 1)) + y_true = ops.cast(y_true, "int32") + true_masks_drawn = draw_masks_fn(images_np, y_true) + + if y_pred is not None: + if data_format == "channels_first": + y_pred = ops.transpose(y_pred, (0, 2, 3, 1)) + y_pred = ops.cast(y_pred, "int32") + predicted_masks_drawn = draw_masks_fn(images_np, y_pred) + + images_with_masks = [] + for i in range(batch_size): + images_with_masks.append(images_np[i]) + if y_true is not None: + images_with_masks.append(true_masks_drawn[i]) + if y_pred is not None: + images_with_masks.append(predicted_masks_drawn[i]) + + gallery_images = np.stack(images_with_masks, axis=0) + + return plot_image_gallery( + gallery_images, value_range=value_range, rows=rows, cols=cols, **kwargs + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/wrappers/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/wrappers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8c55aa752f5c128c64073881314aa37ed127847c --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/wrappers/__init__.py @@ -0,0 +1,5 @@ +from keras.src.wrappers.sklearn_wrapper import SKLearnClassifier +from keras.src.wrappers.sklearn_wrapper import SKLearnRegressor +from keras.src.wrappers.sklearn_wrapper import SKLearnTransformer + +__all__ = ["SKLearnClassifier", "SKLearnRegressor", "SKLearnTransformer"] diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/wrappers/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/wrappers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be7e06e2e9f6cfb6ab035cca2a0a2d6df342944b Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/wrappers/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/wrappers/__pycache__/fixes.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/wrappers/__pycache__/fixes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a35f230cadb418162b4f2732f66a824b12dd009f Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/wrappers/__pycache__/fixes.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/wrappers/__pycache__/sklearn_wrapper.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/wrappers/__pycache__/sklearn_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9383f656bf1b8fb3319a31bc3bd3c573d414d5b7 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/wrappers/__pycache__/sklearn_wrapper.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/wrappers/__pycache__/utils.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/wrappers/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4679cdbe9a7fcedbaac64ceae1c3ec7166b378b9 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/wrappers/__pycache__/utils.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/wrappers/fixes.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/wrappers/fixes.py new file mode 100644 index 0000000000000000000000000000000000000000..e16819782526eab36ef3a131a408620f8006bb5e --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/wrappers/fixes.py @@ -0,0 +1,83 @@ +try: + import sklearn +except ImportError: + sklearn = None + + +def _validate_data(estimator, *args, **kwargs): + """Validate the input data. + + wrapper for sklearn.utils.validation.validate_data or + BaseEstimator._validate_data depending on the scikit-learn version. + + TODO: remove when minimum scikit-learn version is 1.6 + """ + try: + # scikit-learn >= 1.6 + from sklearn.utils.validation import validate_data + + return validate_data(estimator, *args, **kwargs) + except ImportError: + return estimator._validate_data(*args, **kwargs) + except: + raise + + +def type_of_target(y, input_name="", *, raise_unknown=False): + def _raise_or_return(target_type): + """Depending on the value of raise_unknown, either raise an error or + return 'unknown'. + """ + if raise_unknown and target_type == "unknown": + input = input_name if input_name else "data" + raise ValueError(f"Unknown label type for {input}: {y!r}") + else: + return target_type + + target_type = sklearn.utils.multiclass.type_of_target( + y, input_name=input_name + ) + return _raise_or_return(target_type) + + +def _routing_enabled(): + """Return whether metadata routing is enabled. + + Returns: + enabled : bool + Whether metadata routing is enabled. If the config is not set, it + defaults to False. + + TODO: remove when the config key is no longer available in scikit-learn + """ + return sklearn.get_config().get("enable_metadata_routing", False) + + +def _raise_for_params(params, owner, method): + """Raise an error if metadata routing is not enabled and params are passed. + + Parameters: + params : dict + The metadata passed to a method. + owner : object + The object to which the method belongs. + method : str + The name of the method, e.g. "fit". + + Raises: + ValueError + If metadata routing is not enabled and params are passed. + """ + caller = ( + f"{owner.__class__.__name__}.{method}" + if method + else owner.__class__.__name__ + ) + if not _routing_enabled() and params: + raise ValueError( + f"Passing extra keyword arguments to {caller} is only supported if" + " enable_metadata_routing=True, which you can set using" + " `sklearn.set_config`. See the User Guide" + " for more" + f" details. Extra parameters passed are: {set(params)}" + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/wrappers/sklearn_wrapper.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/wrappers/sklearn_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..4437014da2ef5a03adf6c128d5ff68a18ba6eccd --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/wrappers/sklearn_wrapper.py @@ -0,0 +1,488 @@ +import copy + +import numpy as np + +from keras.src.api_export import keras_export +from keras.src.models.cloning import clone_model +from keras.src.models.model import Model +from keras.src.wrappers.fixes import _routing_enabled +from keras.src.wrappers.fixes import _validate_data +from keras.src.wrappers.fixes import type_of_target +from keras.src.wrappers.utils import TargetReshaper +from keras.src.wrappers.utils import _check_model +from keras.src.wrappers.utils import assert_sklearn_installed + +try: + import sklearn + from sklearn.base import BaseEstimator + from sklearn.base import ClassifierMixin + from sklearn.base import RegressorMixin + from sklearn.base import TransformerMixin +except ImportError: + sklearn = None + + class BaseEstimator: + pass + + class ClassifierMixin: + pass + + class RegressorMixin: + pass + + class TransformerMixin: + pass + + +class SKLBase(BaseEstimator): + """Base class for scikit-learn wrappers. + + Note that there are sources of randomness in model initialization and + training. Refer to [Reproducibility in Keras Models]( + https://keras.io/examples/keras_recipes/reproducibility_recipes/) on how to + control randomness. + + Args: + model: `Model`. + An instance of `Model`, or a callable returning such an object. + Note that if input is a `Model`, it will be cloned using + `keras.models.clone_model` before being fitted, unless + `warm_start=True`. + The `Model` instance needs to be passed as already compiled. + If callable, it must accept at least `X` and `y` as keyword + arguments. Other arguments must be accepted if passed as + `model_kwargs` by the user. + warm_start: bool, defaults to `False`. + Whether to reuse the model weights from the previous fit. If `True`, + the given model won't be cloned and the weights from the previous + fit will be reused. + model_kwargs: dict, defaults to `None`. + Keyword arguments passed to `model`, if `model` is callable. + fit_kwargs: dict, defaults to `None`. + Keyword arguments passed to `model.fit`. These can also be passed + directly to the `fit` method of the scikit-learn wrapper. The + values passed directly to the `fit` method take precedence over + these. + + Attributes: + model_ : `Model` + The fitted model. + history_ : dict + The history of the fit, returned by `model.fit`. + """ + + def __init__( + self, + model, + warm_start=False, + model_kwargs=None, + fit_kwargs=None, + ): + assert_sklearn_installed(self.__class__.__name__) + self.model = model + self.warm_start = warm_start + self.model_kwargs = model_kwargs + self.fit_kwargs = fit_kwargs + + def _more_tags(self): + return {"non_deterministic": True} + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.non_deterministic = True + return tags + + def __sklearn_clone__(self): + """Return a deep copy of the model. + + This is used by the `sklearn.base.clone` function. + """ + model = ( + self.model if callable(self.model) else copy.deepcopy(self.model) + ) + return type(self)( + model=model, + warm_start=self.warm_start, + model_kwargs=self.model_kwargs, + ) + + @property + def epoch_(self) -> int: + """The current training epoch.""" + return getattr(self, "history_", {}).get("epoch", 0) + + def set_fit_request(self, **kwargs): + """Set requested parameters by the fit method. + + Please see [scikit-learn's metadata routing]( + https://scikit-learn.org/stable/metadata_routing.html) for more + details. + + + Arguments: + kwargs : dict + Arguments should be of the form `param_name=alias`, and `alias` + can be one of `{True, False, None, str}`. + + Returns: + self + """ + if not _routing_enabled(): + raise RuntimeError( + "This method is only available when metadata routing is " + "enabled. You can enable it using " + "sklearn.set_config(enable_metadata_routing=True)." + ) + + self._metadata_request = sklearn.utils.metadata_routing.MetadataRequest( + owner=self.__class__.__name__ + ) + for param, alias in kwargs.items(): + self._metadata_request.score.add_request(param=param, alias=alias) + return self + + def _get_model(self, X, y): + if isinstance(self.model, Model): + return clone_model(self.model) + else: + args = self.model_kwargs or {} + return self.model(X=X, y=y, **args) + + def fit(self, X, y, **kwargs): + """Fit the model. + + Args: + X: array-like, shape=(n_samples, n_features) + The input samples. + y: array-like, shape=(n_samples,) or (n_samples, n_outputs) + The targets. + **kwargs: keyword arguments passed to `model.fit` + """ + X, y = _validate_data(self, X, y) + y = self._process_target(y, reset=True) + model = self._get_model(X, y) + _check_model(model) + + fit_kwargs = self.fit_kwargs or {} + fit_kwargs.update(kwargs) + self.history_ = model.fit(X, y, **fit_kwargs) + + self.model_ = model + return self + + def predict(self, X): + """Predict using the model.""" + sklearn.base.check_is_fitted(self) + X = _validate_data(self, X, reset=False) + raw_output = self.model_.predict(X) + return self._reverse_process_target(raw_output) + + def _process_target(self, y, reset=False): + """Regressors are NOOP here, classifiers do OHE.""" + # This is here to raise the right error in case of invalid target + type_of_target(y, raise_unknown=True) + if reset: + self._target_encoder = TargetReshaper().fit(y) + return self._target_encoder.transform(y) + + def _reverse_process_target(self, y): + """Regressors are NOOP here, classifiers reverse OHE.""" + return self._target_encoder.inverse_transform(y) + + +@keras_export("keras.wrappers.SKLearnClassifier") +class SKLearnClassifier(ClassifierMixin, SKLBase): + """scikit-learn compatible classifier wrapper for Keras models. + + Note that there are sources of randomness in model initialization and + training. Refer to [Reproducibility in Keras Models]( + https://keras.io/examples/keras_recipes/reproducibility_recipes/) on how to + control randomness. + + Args: + model: `Model`. + An instance of `Model`, or a callable returning such an object. + Note that if input is a `Model`, it will be cloned using + `keras.models.clone_model` before being fitted, unless + `warm_start=True`. + The `Model` instance needs to be passed as already compiled. + If callable, it must accept at least `X` and `y` as keyword + arguments. Other arguments must be accepted if passed as + `model_kwargs` by the user. + warm_start: bool, defaults to `False`. + Whether to reuse the model weights from the previous fit. If `True`, + the given model won't be cloned and the weights from the previous + fit will be reused. + model_kwargs: dict, defaults to `None`. + Keyword arguments passed to `model`, if `model` is callable. + fit_kwargs: dict, defaults to `None`. + Keyword arguments passed to `model.fit`. These can also be passed + directly to the `fit` method of the scikit-learn wrapper. The + values passed directly to the `fit` method take precedence over + these. + + Attributes: + model_ : `Model` + The fitted model. + history_ : dict + The history of the fit, returned by `model.fit`. + classes_ : array-like, shape=(n_classes,) + The classes labels. + + Example: + Here we use a function which creates a basic MLP model dynamically + choosing the input and output shapes. We will use this to create our + scikit-learn model. + + ``` python + from keras.src.layers import Dense, Input, Model + + def dynamic_model(X, y, loss, layers=[10]): + # Creates a basic MLP model dynamically choosing the input and + # output shapes. + n_features_in = X.shape[1] + inp = Input(shape=(n_features_in,)) + + hidden = inp + for layer_size in layers: + hidden = Dense(layer_size, activation="relu")(hidden) + + n_outputs = y.shape[1] if len(y.shape) > 1 else 1 + out = [Dense(n_outputs, activation="softmax")(hidden)] + model = Model(inp, out) + model.compile(loss=loss, optimizer="rmsprop") + + return model + ``` + + You can then use this function to create a scikit-learn compatible model + and fit it on some data. + + ``` python + from sklearn.datasets import make_classification + from keras.wrappers import SKLearnClassifier + + X, y = make_classification(n_samples=1000, n_features=10, n_classes=3) + est = SKLearnClassifier( + model=dynamic_model, + model_kwargs={ + "loss": "categorical_crossentropy", + "layers": [20, 20, 20], + }, + ) + + est.fit(X, y, epochs=5) + ``` + """ + + def _process_target(self, y, reset=False): + """Classifiers do OHE.""" + target_type = type_of_target(y, raise_unknown=True) + if target_type not in ["binary", "multiclass"]: + raise ValueError( + "Only binary and multiclass target types are supported." + f" Target type: {target_type}" + ) + if reset: + self._target_encoder = sklearn.pipeline.make_pipeline( + TargetReshaper(), + sklearn.preprocessing.OneHotEncoder(sparse_output=False), + ).fit(y) + self.classes_ = np.unique(y) + if len(self.classes_) == 1: + raise ValueError( + "Classifier can't train when only one class is present." + ) + return self._target_encoder.transform(y) + + def _more_tags(self): + # required to be compatible with scikit-learn<1.6 + return {"poor_score": True} + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.classifier_tags.poor_score = True + return tags + + +@keras_export("keras.wrappers.SKLearnRegressor") +class SKLearnRegressor(RegressorMixin, SKLBase): + """scikit-learn compatible regressor wrapper for Keras models. + + Note that there are sources of randomness in model initialization and + training. Refer to [Reproducibility in Keras Models]( + https://keras.io/examples/keras_recipes/reproducibility_recipes/) on how to + control randomness. + + Args: + model: `Model`. + An instance of `Model`, or a callable returning such an object. + Note that if input is a `Model`, it will be cloned using + `keras.models.clone_model` before being fitted, unless + `warm_start=True`. + The `Model` instance needs to be passed as already compiled. + If callable, it must accept at least `X` and `y` as keyword + arguments. Other arguments must be accepted if passed as + `model_kwargs` by the user. + warm_start: bool, defaults to `False`. + Whether to reuse the model weights from the previous fit. If `True`, + the given model won't be cloned and the weights from the previous + fit will be reused. + model_kwargs: dict, defaults to `None`. + Keyword arguments passed to `model`, if `model` is callable. + fit_kwargs: dict, defaults to `None`. + Keyword arguments passed to `model.fit`. These can also be passed + directly to the `fit` method of the scikit-learn wrapper. The + values passed directly to the `fit` method take precedence over + these. + + Attributes: + model_ : `Model` + The fitted model. + + Example: + Here we use a function which creates a basic MLP model dynamically + choosing the input and output shapes. We will use this to create our + scikit-learn model. + + ``` python + from keras.src.layers import Dense, Input, Model + + def dynamic_model(X, y, loss, layers=[10]): + # Creates a basic MLP model dynamically choosing the input and + # output shapes. + n_features_in = X.shape[1] + inp = Input(shape=(n_features_in,)) + + hidden = inp + for layer_size in layers: + hidden = Dense(layer_size, activation="relu")(hidden) + + n_outputs = y.shape[1] if len(y.shape) > 1 else 1 + out = [Dense(n_outputs, activation="softmax")(hidden)] + model = Model(inp, out) + model.compile(loss=loss, optimizer="rmsprop") + + return model + ``` + + You can then use this function to create a scikit-learn compatible model + and fit it on some data. + + ``` python + from sklearn.datasets import make_regression + from keras.wrappers import SKLearnRegressor + + X, y = make_regression(n_samples=1000, n_features=10) + est = SKLearnRegressor( + model=dynamic_model, + model_kwargs={ + "loss": "mse", + "layers": [20, 20, 20], + }, + ) + + est.fit(X, y, epochs=5) + ``` + """ + + def _more_tags(self): + # required to be compatible with scikit-learn<1.6 + return {"poor_score": True} + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.regressor_tags.poor_score = True + return tags + + +@keras_export("keras.wrappers.SKLearnTransformer") +class SKLearnTransformer(TransformerMixin, SKLBase): + """scikit-learn compatible transformer wrapper for Keras models. + + Note that this is a scikit-learn compatible transformer, and not a + transformer in the deep learning sense. + + Also note that there are sources of randomness in model initialization and + training. Refer to [Reproducibility in Keras Models]( + https://keras.io/examples/keras_recipes/reproducibility_recipes/) on how to + control randomness. + + Args: + model: `Model`. + An instance of `Model`, or a callable returning such an object. + Note that if input is a `Model`, it will be cloned using + `keras.models.clone_model` before being fitted, unless + `warm_start=True`. + The `Model` instance needs to be passed as already compiled. + If callable, it must accept at least `X` and `y` as keyword + arguments. Other arguments must be accepted if passed as + `model_kwargs` by the user. + warm_start: bool, defaults to `False`. + Whether to reuse the model weights from the previous fit. If `True`, + the given model won't be cloned and the weights from the previous + fit will be reused. + model_kwargs: dict, defaults to `None`. + Keyword arguments passed to `model`, if `model` is callable. + fit_kwargs: dict, defaults to `None`. + Keyword arguments passed to `model.fit`. These can also be passed + directly to the `fit` method of the scikit-learn wrapper. The + values passed directly to the `fit` method take precedence over + these. + + Attributes: + model_ : `Model` + The fitted model. + history_ : dict + The history of the fit, returned by `model.fit`. + + Example: + A common use case for a scikit-learn transformer, is to have a step + which gives you the embedding of your data. Here we assume + `my_package.my_model` is a Keras model which takes the input and gives + embeddings of the data, and `my_package.my_data` is your dataset loader. + + ``` python + from my_package import my_model, my_data + from keras.wrappers import SKLearnTransformer + from sklearn.frozen import FrozenEstimator # requires scikit-learn>=1.6 + from sklearn.pipeline import make_pipeline + from sklearn.ensemble import HistGradientBoostingClassifier + + X, y = my_data() + + trs = FrozenEstimator(SKLearnTransformer(model=my_model)) + pipe = make_pipeline(trs, HistGradientBoostingClassifier()) + pipe.fit(X, y) + ``` + + Note that in the above example, `FrozenEstimator` prevents any further + training of the transformer step in the pipeline, which can be the case + if you don't want to change the embedding model at hand. + """ + + def transform(self, X): + """Transform the data. + + Args: + X: array-like, shape=(n_samples, n_features) + The input samples. + + Returns: + X_transformed: array-like, shape=(n_samples, n_features) + The transformed data. + """ + sklearn.base.check_is_fitted(self) + X = _validate_data(self, X, reset=False) + return self.model_.predict(X) + + def _more_tags(self): + # required to be compatible with scikit-learn<1.6 + return { + "preserves_dtype": [], + } + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.transformer_tags.preserves_dtype = [] + return tags diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/wrappers/utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/wrappers/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..301c4b562912556e154b81787740ac1481b7337b --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/wrappers/utils.py @@ -0,0 +1,87 @@ +try: + import sklearn + from sklearn.base import BaseEstimator + from sklearn.base import TransformerMixin +except ImportError: + sklearn = None + + class BaseEstimator: + pass + + class TransformerMixin: + pass + + +def assert_sklearn_installed(symbol_name): + if sklearn is None: + raise ImportError( + f"{symbol_name} requires `scikit-learn` to be installed. " + "Run `pip install scikit-learn` to install it." + ) + + +def _check_model(model): + """Check whether the model need sto be compiled.""" + # compile model if user gave us an un-compiled model + if not model.compiled or not model.loss or not model.optimizer: + raise RuntimeError( + "Given model needs to be compiled, and have a loss and an " + "optimizer." + ) + + +class TargetReshaper(TransformerMixin, BaseEstimator): + """Convert 1D targets to 2D and back. + + For use in pipelines with transformers that only accept + 2D inputs, like OneHotEncoder and OrdinalEncoder. + + Attributes: + ndim_ : int + Dimensions of y that the transformer was trained on. + """ + + def fit(self, y): + """Fit the transformer to a target y. + + Returns: + TargetReshaper + A reference to the current instance of TargetReshaper. + """ + self.ndim_ = y.ndim + return self + + def transform(self, y): + """Makes 1D y 2D. + + Args: + y : np.ndarray + Target y to be transformed. + + Returns: + np.ndarray + A numpy array, of dimension at least 2. + """ + if y.ndim == 1: + return y.reshape(-1, 1) + return y + + def inverse_transform(self, y): + """Revert the transformation of transform. + + Args: + y: np.ndarray + Transformed numpy array. + + Returns: + np.ndarray + If the transformer was fit to a 1D numpy array, + and a 2D numpy array with a singleton second dimension + is passed, it will be squeezed back to 1D. Otherwise, it + will eb left untouched. + """ + sklearn.base.check_is_fitted(self) + xp, _ = sklearn.utils._array_api.get_namespace(y) + if self.ndim_ == 1 and y.ndim == 2: + return xp.squeeze(y, axis=1) + return y diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae9ad6b0a698a15ae90424a16a653830b78cbe45 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/__pycache__/accelerator_util.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/__pycache__/accelerator_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2dd3a636bfd934c04d691106aa83f6077876e10b Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/__pycache__/accelerator_util.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/__pycache__/api.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40eaa9c3630e2a3760997a61a87721bc104a21e0 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/__pycache__/api.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/__pycache__/config.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f92349a445e47acd282b406cdcccdce59f5d15c0 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/__pycache__/config.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83c34163aa614d8b6f85a6989e93887eb8f6891e Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/__pycache__/multi_client_test_util.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/__pycache__/multi_client_test_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00ca27f603aad769f1f42670a9888b4bd77d9c5c Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/__pycache__/multi_client_test_util.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/__pycache__/test_backend_name.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/__pycache__/test_backend_name.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99449d6ce295712cdca7ecb33568ac0328cb4165 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/__pycache__/test_backend_name.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/__pycache__/test_backend_util.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/__pycache__/test_backend_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31f323ebba4ba9101e353129fd8661bab4883a99 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/__pycache__/test_backend_util.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/__pycache__/test_util.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/__pycache__/test_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7745019fecf17453615bd8b17e31aa40b968327c Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/__pycache__/test_util.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/__pycache__/test_util_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/__pycache__/test_util_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f720a25ae5a848ee72ae8598c1e360f053a97efd Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/__pycache__/test_util_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/multi_client_test_util.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/multi_client_test_util.py new file mode 100644 index 0000000000000000000000000000000000000000..dd4a69f14f77e43bf4017c97f7b9d6e9571c012b --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/multi_client_test_util.py @@ -0,0 +1,144 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for multi-client setup.""" +import os +import sys + +from absl import flags +import portpicker + +from tensorflow.dtensor.python.tests import test_backend_util +from tensorflow.python.platform import test as tf_test + + +_NUM_LOCAL_DEVICES = flags.DEFINE_integer( + 'num_local_devices', 4, + 'Number of local devices. 4 is the only allowed value for TPU.') +_NUM_CLIENTS = flags.DEFINE_integer( + 'num_clients', 2, + 'Number of clients. 0 for local mode. 2 is the only allowed value for TPU.') + + +def pick_unused_port(): + """Helper function to return an unused port.""" + return portpicker.pick_unused_port() + + +def multi_client_main(client_config_function): + """Creates a Flock of TensorFlow Processes on localhost.""" + flags.FLAGS(sys.argv, known_only=True) + num_clients = _NUM_CLIENTS.value + num_process = num_clients or 1 + num_local_devices = _NUM_LOCAL_DEVICES.value + + # No GPU visible to the flock controller. + os.environ['CUDA_VISIBLE_DEVICES'] = '' + os.environ['HIP_VISIBLE_DEVICES'] = '' + + # Python multiprocess module in OSS. + mp_context = test_backend_util.get_mp_context() + + print('Check per client log in Test artifacts.', flush=True) + + # Inverts the order of ports intentionally to rule out ordering bugs. + server_ports = sorted( + [pick_unused_port() for _ in range(num_process)], reverse=True + ) + + additional_ports = sorted([pick_unused_port() for _ in range(num_process)]) + + # Starts processes + procs = [] + for client_idx in range(num_process): + proc = mp_context.Process( + target=run_client, + args=(client_idx, num_clients, server_ports, additional_ports, + num_local_devices, client_config_function), + name=f'Client-{client_idx}', + ) + proc.start() + procs.append(proc) + + # Joins processes + exitcode = 0 + for proc in procs: + proc.join() + if proc.exitcode != 0: + exitcode = proc.exitcode + + sys.exit(exitcode) + + +def run_client(idx, num_clients, server_ports, additional_ports, + num_local_devices, client_config_function): + """Runs test.main() from a DTensor Client process on localhost. + + This function runs in a separate process so that the eager context is + properly separated, which resembles real world multi-client setup. + + Virtual devices are configured before test.main() is called. + + Each client is configured to only have access to the physical GPU device + corresponding to its client id via CUDA_VISIBLE_DEVICES/HIP_VISIBLE_DEVICES. + + Each client is configured to only have access to some TPU cores + corresponding to its client id via flags. + + The clients redirect stdout and stderr to files under Test Artifacts. + + Args: + idx: integer task number represents the client's id from global picture. + num_clients: total number of clients. + server_ports: A list of ports that is allocated and to be used to construct + GRPC server. server_ports[idx] will be the GRPC server on the + corresponding client. + additional_ports: A list of ports that is allocated and to be used to + construct the backends. + num_local_devices: Number of devices per client. + client_config_function: A function, for each of the client to config the + local environment variables, etc. Note that the function will be called + with a dict of extra params, eg: + {'num_clients': 2 + 'client_id': 0, + 'worker_jobs': ['localhost:port1', 'localhost:port2'], + 'num_devices': 4, + } + """ + test_backend_util.slice_host_devices_for_multiworker( + num_clients, idx, additional_ports + ) + + artifact_dir = os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', '') + + # Redirect extra client's stderr/stdout to undeclared outputs on sponge. + if artifact_dir: + with open( + os.path.join(artifact_dir, f'test-client-process-{idx}.log'), + 'wb') as fp: + os.dup2(fp.fileno(), 1) + os.dup2(fp.fileno(), 2) + + # Set up cluster and enable collectives. + worker_jobs = [f'localhost:{port:06d}' for port in server_ports] + client_config_func_param = { + 'num_clients': num_clients, + 'client_id': idx, + 'worker_jobs': worker_jobs, + 'num_devices': num_local_devices, + } + client_config_function(client_config_func_param) + + # The following function call never returns. + tf_test.main() diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/test_backend_name.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/test_backend_name.py new file mode 100644 index 0000000000000000000000000000000000000000..0aa665b58bc35934e70dbba6684939351969da77 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/test_backend_name.py @@ -0,0 +1,40 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""List of test backend names.""" + +import enum +import os + + +# LINT.IfChange(backend_name) +class DTensorTestUtilBackend(enum.Enum): + """DTensor backend the test is being run on.""" + UNSPECIFIED = 'unspecified' + CPU = 'cpu' + GPU = 'gpu' + GPU_2DEVS_BACKEND = '2gpus' + TPU = 'tpu' + TPU_STREAM_EXECUTOR = 'tpu_se' + TPU_V3_DONUT_BACKEND = 'tpu_v3_2x2' + TPU_V4_DONUT_BACKEND = 'tpu_v4_2x2' + PATHWAYS = 'pw' + PATHWAYS_V3_DONUT_BACKEND = 'pw_v3_2x2' + + +DTENSOR_TEST_UTIL_BACKEND = DTensorTestUtilBackend( + os.getenv('DTENSOR_TEST_UTIL_BACKEND', default='unspecified') +) + +# LINT.ThenChange() diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/test_backend_util.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/test_backend_util.py new file mode 100644 index 0000000000000000000000000000000000000000..02fc82a71b7543482a4d6e07e9858468718365e4 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/test_backend_util.py @@ -0,0 +1,81 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility to set up DTensor backend in tests.""" + +# LINT.IfChange +import multiprocessing +import os + +from tensorflow.dtensor.python import accelerator_util +from tensorflow.dtensor.python import config +from tensorflow.dtensor.python import layout as layout_lib +from tensorflow.dtensor.python.tests.test_backend_name import DTENSOR_TEST_UTIL_BACKEND +from tensorflow.python.platform import test as tf_test + + +class DTensorTestBackendConfigurator: + """Configurate test backends.""" + + def __init__(self, test_case: tf_test.TestCase): + self._test_case = test_case + # TODO(b/260771689): Refactor common backend set up logic to here. + + def tearDown(self): + # Only need to explicitly shuts down TPU system in TFRT since in current + # runtime, the shutdown is done in initialization process. + if accelerator_util.is_initialized(): + accelerator_util.shutdown_accelerator_system() + + +def config_test_mesh(mesh: layout_lib.Mesh): + """No Op. + + Args: + mesh: The DTensor mesh. + """ + if config.backend_is_pw(): + del mesh + + +def slice_host_devices_for_multiworker(num_clients, client_id, ports): + """Configure the current process to only use a slice of devices.""" + if num_clients == 0: + # All GPUs are visible to the client. + del os.environ['CUDA_VISIBLE_DEVICES'] + del os.environ['HIP_VISIBLE_DEVICES'] + else: + # Make the client_id-th GPU visible to the client. + os.environ['CUDA_VISIBLE_DEVICES'] = f'{client_id}' + os.environ['HIP_VISIBLE_DEVICES'] = f'{client_id}' + # Make the client_id-th (4x) TPU cores visible to the client. + os.environ['CLOUD_TPU_TASK_ID'] = f'{client_id}' + if 'tpu' in DTENSOR_TEST_UTIL_BACKEND.value: + del ports # Unused due to lack of implementation. + # We need to find out if there is a way to slice a CloudTPU host to + # multiple workers. + raise NotImplementedError( + 'OSS multi-client tests of TPU is not supported.' + ) + + +def get_mp_context(): + return multiprocessing.get_context('forkserver') + + +def handle_test_main(main, *args, **kwargs): + main(*args, **kwargs) + + +# LINT.ThenChange(test_backend_util.py) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/test_util.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/test_util.py new file mode 100644 index 0000000000000000000000000000000000000000..50aa465fd969d214b473567f581ca8060991b15f --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/test_util.py @@ -0,0 +1,411 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility methods for DTensor testing.""" + +import collections +import copy +import itertools +import json +import os +import typing + +from absl import flags +from absl.testing import parameterized +import numpy as np + +# pylint: disable=g-direct-tensorflow-import +from tensorflow.dtensor.python import accelerator_util +from tensorflow.dtensor.python import api +from tensorflow.dtensor.python import config +from tensorflow.dtensor.python import layout as layout_lib +from tensorflow.dtensor.python import numpy_util +from tensorflow.dtensor.python.config import is_gpu_present # pylint: disable=unused-import +from tensorflow.dtensor.python.config import is_tpu_present # pylint: disable=unused-import +from tensorflow.dtensor.python.config import preferred_device_type # pylint: disable=unused-import +from tensorflow.dtensor.python.config import use_multi_device_mode # pylint: disable=unused-import +from tensorflow.dtensor.python.tests import test_backend_util +from tensorflow.dtensor.python.tests.test_backend_name import DTENSOR_TEST_UTIL_BACKEND +from tensorflow.dtensor.python.tests.test_backend_name import DTensorTestUtilBackend +from tensorflow.dtensor.python.tests.test_backend_util import DTensorTestBackendConfigurator +from tensorflow.python.compat import v2_compat +from tensorflow.python.eager import context +from tensorflow.python.framework import config as tf_config +from tensorflow.python.framework import device as tf_device +from tensorflow.python.ops import resource_variable_ops +from tensorflow.python.platform import test as tf_test +from tensorflow.python.util import numpy_compat + + +# pylint: enable=g-direct-tensorflow-import + +# DTensor only runs with TF V2. +v2_compat.enable_v2_behavior() + +DEFAULT_TOL = 1e-5 + +_DEFAULT_GPU_MEMORY_LIMIT = 1024 # 1G + + +def get_use_xla_spmd(device_type): + """Returns True when device_type is TPU and environment variable is set. + + Args: + device_type: A str representing the type of device on the mesh. + + Returns: + bool: True when device_type is TPU and environment variable is set. + """ + return device_type == 'TPU' and '0' != os.environ.get( + 'DTENSOR_TEST_USE_XLA_SPMD', '0' + ) + + +def create_device_ids_array(shape): + device_count = np.prod(shape) + return np.arange(device_count).reshape(shape) + + +def create_device_array(shape, device_type): + device_count = np.prod(shape) + return numpy_compat.np_asarray([ + tf_device.DeviceSpec( # pylint: disable=g-complex-comprehension + job='localhost/replica:0/task:0', + device_type=device_type, + device_index=i) for i in range(device_count) + ]).reshape(shape) + + +def create_device_list(shape, device_type): + devices = create_device_array(shape, device_type) + return np.ravel(devices).tolist() + + +def reset_context(): + context._reset_context() # pylint: disable=protected-access + + +def reset_logical_devices(device_type, count): + """Resets logical devices for CPU/GPU. + + Logical devices can only be instantiated once on a particular context. For + now, context re-use is triggering some function duplication errors, so we + reset the context on each call. + + Args: + device_type: The device_type to reset. + count: numbers of virtual device to reset to. + """ + reset_context() + devices = tf_config.list_physical_devices(device_type) + if device_type.upper() not in ('CPU', 'GPU'): + raise ValueError('resetting logical device for non-supported device type : ' + '%s' % device_type) + + if count < len(devices): + devices = devices[:count] + tf_config.set_visible_devices(devices, device_type=device_type.upper()) + + for i, device in enumerate(devices): + n = (i + 1) * count // len(devices) - i * count // len(devices) + assert n > 0 # guaranteed if count >= len(devices) + configs = [] + for ordinal in range(n): + if device_type.upper() == 'GPU': + dev_config = context.LogicalDeviceConfiguration( + memory_limit=_DEFAULT_GPU_MEMORY_LIMIT, + experimental_device_ordinal=ordinal) + else: + dev_config = context.LogicalDeviceConfiguration() + configs.append(dev_config) + + tf_config.set_logical_device_configuration(device, configs) + + +def list_local_logical_devices(device_type): + """Returns a list of local logial devices.""" + + # When coordinator service is enabled, list_logical_devices returns + # a global list. + devices = tf_config.list_logical_devices(device_type) + + def is_local(device): + spec = tf_device.DeviceSpec.from_string(device.name) + if spec.job is None or spec.job == 'localhost': + return True + elif spec.job == config.job_name() and spec.task == config.client_id(): + return True + return False + + return [d for d in devices if is_local(d)] + + +def is_tfrt_enabled(): + return context.is_tfrt_enabled() + + +FLAGS = flags.FLAGS + + +class DTensorBaseTest(tf_test.TestCase, parameterized.TestCase): + """Provides comparison helper for dtensor vs local results.""" + + @classmethod + def setUpClass(cls): + super(DTensorBaseTest, cls).setUpClass() + + def setUp(self): + super().setUp() + self._backend_configurator = DTensorTestBackendConfigurator(self) + + def tearDown(self): + # Make sure all async ops finish. + try: + context.async_wait() + finally: + # TODO(hthu): Remove the reset once we fixed the CopyToMesh with + # DefaultMesh placement issue. + reset_dtensor() + + self._backend_configurator.tearDown() + super().tearDown() + + @staticmethod + def configTestMesh( # pylint: disable=invalid-name + device_type_mesh_map: typing.Dict[typing.Text, layout_lib.Mesh] + ) -> layout_lib.Mesh: + """Configs corresponding mesh given test context. + + If runs on a CPU mesh, set virtual device on CPU. + If runs on a GPU mesh, sets virtual device on GPU with proper memory limits. + if runs on a TPU mesh, initializes TPU system. + + Args: + device_type_mesh_map: A dictionary containing device_type -> mesh mapping. + + Returns: + A properly configured mesh for use in test. + """ + reset_context() + + def get_mesh(device_type): + mesh = device_type_mesh_map.get(device_type, None) + if mesh is None: + raise ValueError('Requires a %s mesh to run test on %s.' % + (device_type, device_type)) + return mesh + + mesh = None + if is_tpu_present(): + mesh = get_mesh('TPU') + reset_context() + accelerator_util.initialize_accelerator_system('TPU') + elif tf_config.list_physical_devices('GPU'): + mesh = get_mesh('GPU') + reset_logical_devices('GPU', np.prod(mesh.shape())) + accelerator_util.initialize_accelerator_system('GPU') + else: + mesh = get_mesh('CPU') + reset_logical_devices('CPU', np.prod(mesh.shape())) + accelerator_util.initialize_accelerator_system('CPU') + + test_backend_util.config_test_mesh(mesh) + + return mesh + + def skipForDeviceType( # pylint: disable=invalid-name + self, + device_type: typing.List[str], + reason: str, + unless_device_count_equals_to=None): + """Skip the test for the specific device_type. + + Args: + device_type: list of device types, one of "CPU", "GPU", or "TPU". + reason: string that describe the reason for skipping the test. + unless_device_count_equals_to: Optional int. This parameter only works if + device_type is "TPU". If set, the test will be skipped unless the number + of TPUs equals to the specified count. + """ + physical_device_types = set( + [d.device_type for d in tf_config.list_physical_devices()]) + for device in device_type: + if device == 'TPU' and is_tpu_present(): + if unless_device_count_equals_to is None: + self.skipTest(reason) + elif len(list_local_logical_devices( + device)) != unless_device_count_equals_to: + self.skipTest(reason) + if device == 'CPU' and len( + physical_device_types) == 1 and 'CPU' in physical_device_types: + # Make sure we skip when only `CPU` is present. + self.skipTest(reason) + if device == 'GPU' and 'GPU' in physical_device_types: + self.skipTest(reason) + + def skipForTfrt(self, reason: str): # pylint: disable=invalid-name + if is_tfrt_enabled(): + self.skipTest(reason) + + def skipTest(self, reason): # pylint: disable=invalid-name + # skipTest() may be called in super().setUp() + if hasattr(self, '_backend_configurator'): + self._backend_configurator.tearDown() + super().skipTest(reason) + + def skipForPathways(self, reason: str): # pylint: disable=invalid-name + if config.backend_is_pw(): + self.skipTest(reason) + + def assertDTensorEqual( + self, # pylint: disable=invalid-name + expected_result, + expected_layout, + result_dtensor, + tol=DEFAULT_TOL): + """Asserts DTensor is of the particular value.""" + if issubclass( + type(result_dtensor), resource_variable_ops.BaseResourceVariable): + result_dtensor = result_dtensor.value() + if expected_layout is not None: + # This, the assertEqual, is a pure proto raw bytes comparison. To make it + # human-readable, use the `to_string` api for Layout for the dedicated msg + # field. + # + # Futhurmore, as the mesh part is very long and usually identical. Try to + # cut them as well, to make it easier to read. + expected_str = expected_layout.to_string() + got_str = api.fetch_layout(result_dtensor).to_string() + index_for_mesh = expected_str.find('mesh:') + if index_for_mesh != -1 and got_str.find( + expected_str[index_for_mesh:]) != -1: + # the mesh part is same. cut them so it is more readable. + expected_str = expected_str[:index_for_mesh] + got_str = got_str[:got_str.find('mesh:')] + + self.assertEqual( + api.fetch_layout(result_dtensor), + expected_layout, + msg=( + '=======\nexpected layout is\n {}\n\nwhile got layout is\n {}\n' + .format(expected_str, got_str) + ), + ) + + layout = api.fetch_layout(result_dtensor) + unpacked = [t.numpy() for t in api.unpack(result_dtensor)] + + # Check global shape. + self.assertAllEqual(expected_result.shape, result_dtensor.shape) + + result_dtensor = numpy_util.to_numpy(result_dtensor) + + # Check dtype. + # Note: This check needs be after result_dtensor is converted + # into numpy, due to failure with Numpy version 1.18.5. + self.assertEqual( + expected_result.dtype, result_dtensor.dtype, result_dtensor + ) + + # Check value on concatenated result DTensor. + self.assertAllClose(expected_result, result_dtensor, atol=tol, rtol=tol) + + # In addition to check the 'concatenated' DTensor, we also check all + # "replicated" parts are same. + # + # The algorithm is simple: + # 1. For a mesh with topology (x,y,z,p), and a DTensor with layout ('',z,x). + # 2. Create some data structures: + # - create a mapping from device id (called offset below) to mesh + # location. For the mesh above, loc {x:1,y:2,z:2,p:0} means the device + # is located at that coordinates in the 4-D mesh. + # - create a mapping from mesh location to device id. + # 3. Find all replicated mesh dimension names, i.e., 'y' and `p` in the + # example above. + # 4. Iterate over all unpacked components, translate the offset (device id) + # to mesh location, called (x',y',z',p'). + # - For `y`, which is replicated dim in the mesh, check all unpacked + # components at (x',*,z',p') are same as the component at (x',0,z',p'). + # - For `p`, which is also replicated dim in the mesh, check all unpacked + # components at (x',y',z',*) are same as the component at (x',y',z',0). + + def hash_key(loc): + """Hash key for Python dict.""" + # Python dict is unhashable. Creates a sorted dict and dumps as json str. + d = collections.OrderedDict(sorted(loc.items(), key=lambda x: x[0])) + return json.dumps(d) + + offset_to_mesh_loc_dict = layout.mesh.unravel_index() + mesh_loc_to_offset_dict = {} + for offset, loc in offset_to_mesh_loc_dict.items(): + mesh_loc_to_offset_dict[hash_key(loc)] = offset + + # pylint: disable=protected-access + replicated_dims = [ + x for x in layout.mesh.dim_names if x not in layout.sharding_specs + ] + # pylint: enable=protected-access + + for offset, tensor in enumerate(unpacked): + mesh_loc = offset_to_mesh_loc_dict[offset] + for dim_sharding in replicated_dims: + if mesh_loc[dim_sharding] != 0: + mesh_loc = copy.deepcopy(mesh_loc) # deepcopy as we will mutate + mesh_loc[dim_sharding] = 0 + offset = mesh_loc_to_offset_dict[hash_key(mesh_loc)] + # tol is be as low as possible as they should match "exactly". so, we + # ignore the `tol` passed by caller and choose the default one. + self.assertAllClose(tensor, unpacked[offset]) + + +def product(*lists): + """Makes a product of names parameters list.""" + # Each element lists should be a tuple of tuples of the form + # (("test1", ...), ("test2", ...), ...). + # Function returns the product of the lists with the labels concatenated. + return [ # pylint: disable=g-complex-comprehension + (''.join(p[0] + for p in elt), *sum((p[1:] + for p in elt), ())) + for elt in itertools.product(*lists) + ] + + +def reset_dtensor(): + """Resets the singleton DTensor Device. + + This behavior is not generally exposed and only meant to be used in tests. + """ + api._reset() # pylint: disable=protected-access + + +__all__ = [ + 'DEFAULT_TOL', + 'DTensorTestUtilBackend', + 'DTENSOR_TEST_UTIL_BACKEND', + 'create_device_ids_array', + 'create_device_array', + 'create_device_list', + 'reset_context', + 'reset_logical_devices', + 'list_local_logical_devices', + 'is_tfrt_enabled', + 'FLAGS', + 'DTensorBaseTest', + 'product', + 'reset_dtensor', + 'is_tpu_present', + 'is_gpu_present', + 'use_multi_device_mode', +] diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/test_util_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/test_util_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..89513cff54022136003753c64b3d16a3188e7ecf --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/dtensor/python/tests/test_util_ops.py @@ -0,0 +1,658 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility methods for DTensor testing.""" +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import clip_ops +from tensorflow.python.ops import gen_array_ops +from tensorflow.python.ops import gen_bitwise_ops +from tensorflow.python.ops import gen_math_ops +from tensorflow.python.ops import gen_nn_ops +from tensorflow.python.ops import gen_spectral_ops +from tensorflow.python.ops import gen_stateless_random_ops +from tensorflow.python.ops import gen_stateless_random_ops_v2 +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn_ops +from tensorflow.python.ops import special_math_ops + + +def expand_test_config(op_list, test_configs): + """Returns a list of test case args that covers ops and test_configs. + + The list is a Cartesian product between op_list and test_configs. + + Args: + op_list: A list of dicts, with items keyed by 'testcase_name' and 'op'. + Available lists are defined later in this module. + test_configs: A list of dicts, additional kwargs to be appended for each + test parameters. + + Returns: + test_configurations: a list of test parameters that covers all + provided ops in op_list and args in test_configs. + """ + test_configurations = [] + for op_info in op_list: + test_index = 0 + for added_test_config in test_configs: + test_config = op_info.copy() + test_config.update(added_test_config) + test_config['testcase_name'] = op_info['testcase_name'] + '_' + str( + test_index) + test_index += 1 + test_configurations.append(test_config) + return test_configurations + + +# Disable pyformat for this block to force compact style. +# pyformat: disable +# +# Disable g-long-lambda to make the unit test suits compact (avoid def new func) +# pylint: disable=g-long-lambda +UNARY_OPS = [ + { + 'testcase_name': 'Identity', + 'op': array_ops.identity + }, + { + 'testcase_name': 'ZerosLike', + 'op': array_ops.zeros_like_v2 + }, + { + 'testcase_name': 'Abs', + 'op': math_ops.abs + }, + { + 'testcase_name': 'Negative', + 'op': gen_math_ops.neg + }, + { + 'testcase_name': 'Cast', + 'op': lambda x: math_ops.cast(x, dtypes.int32) + }, + { + 'testcase_name': 'ErfOp', + 'op': gen_math_ops.erf + }, + { + 'testcase_name': 'Softmax', + 'op': nn_ops.softmax_v2 + }, + { + 'testcase_name': 'LogSoftmax', + 'op': nn_ops.log_softmax_v2 + }, + { + 'testcase_name': 'StopGradient', + 'op': array_ops.stop_gradient + }, + { + 'testcase_name': 'Exp', + 'op': math_ops.exp + }, + { + 'testcase_name': 'Sqrt', + 'op': math_ops.sqrt + }, + { + 'testcase_name': 'Rsqrt', + 'op': math_ops.rsqrt + }, + { + 'testcase_name': 'Reciprocal', + 'op': gen_math_ops.reciprocal + }, + { + 'testcase_name': 'Relu', + 'op': gen_nn_ops.relu + }, + { + 'testcase_name': 'Square', + 'op': gen_math_ops.square + }, + { + 'testcase_name': 'Tanh', + 'op': gen_math_ops.tanh + }, + { + 'testcase_name': 'Cos', + 'op': gen_math_ops.cos + }, + { + 'testcase_name': 'Sigmoid', + 'op': math_ops.sigmoid + }, + { + 'testcase_name': 'Acos', + 'op': math_ops.acos + }, + { + 'testcase_name': 'Acosh', + 'op': gen_math_ops.acosh + }, + { + 'testcase_name': 'Angle', + 'op': math_ops.angle + }, + { + 'testcase_name': 'Asin', + 'op': gen_math_ops.asin + }, + { + 'testcase_name': 'Asinh', + 'op': gen_math_ops.asinh + }, + { + 'testcase_name': 'Atan', + 'op': gen_math_ops.atan + }, + { + 'testcase_name': 'Bessel0e', + 'op': special_math_ops.bessel_i0e + }, + { + 'testcase_name': 'Bessel1e', + 'op': special_math_ops.bessel_i1e + }, + { + 'testcase_name': 'Bitcast', + 'op': lambda x: gen_array_ops.bitcast(x, type=dtypes.int32) + }, + { + 'testcase_name': 'Ceil', + 'op': math_ops.ceil + }, + { + 'testcase_name': 'CheckNumbers', + 'op': (lambda x: gen_array_ops.check_numerics(x, message='bug')) + }, + { + 'testcase_name': 'ClipByValue', + 'op': (lambda x: clip_ops.clip_by_value(x, 1.5, 2.5)) + }, + { + 'testcase_name': 'Conj', + 'op': math_ops.conj + }, + { + 'testcase_name': 'Cosh', + 'op': gen_math_ops.cosh + }, + { + 'testcase_name': 'Digamma', + 'op': gen_math_ops.digamma + }, + { + 'testcase_name': + 'ComplexAbs', + 'op': + lambda x: gen_math_ops.complex_abs( + x=math_ops.cast(x, dtypes.complex64), Tout=float, name='raw') + }, + { + 'testcase_name': 'Sign', + 'op': math_ops.sign + }, + { + 'testcase_name': 'Elu', + 'op': gen_nn_ops.elu + }, + { + 'testcase_name': 'Erfc', + 'op': gen_math_ops.erfc + }, + { + 'testcase_name': 'Expm1', + 'op': gen_math_ops.expm1 + }, + { + 'testcase_name': 'Floor', + 'op': math_ops.floor + }, + { + 'testcase_name': 'Imag', + 'op': math_ops.imag + }, + { + 'testcase_name': 'Inv', + 'op': (lambda x: gen_math_ops.inv(x=x, name='Inv')) + }, + { + 'testcase_name': 'IsInf', + 'op': gen_math_ops.is_inf + }, + { + 'testcase_name': 'IsNan', + 'op': gen_math_ops.is_nan + }, + { + 'testcase_name': 'LeakyRelu', + 'op': (lambda x: nn_ops.leaky_relu((x - 2), alpha=0.3)), + }, + { + 'testcase_name': 'Lgamma', + 'op': gen_math_ops.lgamma, + }, + { + 'testcase_name': 'Log1p', + 'op': gen_math_ops.log1p, + }, + { + 'testcase_name': 'Ndtri', + 'op': (lambda x: math_ops.ndtri(x / 100)), + }, + { + 'testcase_name': 'Selu', + 'op': gen_nn_ops.selu, + }, + { + 'testcase_name': 'Sin', + 'op': gen_math_ops.sin, + }, + { + 'testcase_name': 'Sinh', + 'op': gen_math_ops.sinh, + }, + { + 'testcase_name': 'Softplus', + 'op': math_ops.softplus, + }, + { + 'testcase_name': 'Softsign', + 'op': gen_nn_ops.softsign, + }, + { + 'testcase_name': 'Tan', + 'op': gen_math_ops.tan, + }, + { + 'testcase_name': 'Round', + 'op': math_ops.round, + }, + { + 'testcase_name': 'Rint', + 'op': gen_math_ops.rint, + }, + { + 'testcase_name': 'Relu6', + 'op': nn_ops.relu6, + }, + { + 'testcase_name': 'Real', + 'op': math_ops.real, + }, + { + 'testcase_name': 'PreventGradient', + 'op': lambda x: gen_array_ops.prevent_gradient(input=x), + }, +] + +BINARY_ANY_TYPE_OPS_WITH_BROADCASTING_SUPPORT = [ + { + 'testcase_name': 'Add', + 'op': math_ops.add + }, + { + 'testcase_name': 'Subtract', + 'op': math_ops.subtract + }, + { + 'testcase_name': 'Multiply', + 'op': math_ops.multiply + }, + { + 'testcase_name': 'Maximum', + 'op': gen_math_ops.maximum + }, + { + 'testcase_name': 'Minimum', + 'op': gen_math_ops.minimum + }, + { + 'testcase_name': 'Squared_Difference', + 'op': gen_math_ops.squared_difference + }, + { + 'testcase_name': 'GreaterEqual', + 'op': gen_math_ops.greater_equal + }, + { + 'testcase_name': 'Equal', + 'op': math_ops.equal + }, + { + 'testcase_name': 'NotEqual', + 'op': math_ops.not_equal + }, + { + 'testcase_name': 'LessEqual', + 'op': gen_math_ops.less_equal + }, + { + 'testcase_name': 'Less', + 'op': gen_math_ops.less + }, + { + 'testcase_name': 'Pow', + 'op': math_ops.pow + }, +] + +BINARY_FLOAT_OPS_WITH_BROADCASTING_SUPPORT = [ + { + 'testcase_name': 'Real_Divide', + 'op': math_ops.divide + }, + { + 'testcase_name': 'DivNoNan', + 'op': math_ops.div_no_nan + }, +] + BINARY_ANY_TYPE_OPS_WITH_BROADCASTING_SUPPORT + +BINARY_INT_OPS_WITH_BROADCASTING_SUPPORT = [ + { + 'testcase_name': 'LeftShift', + 'op': gen_bitwise_ops.left_shift + }, + { + 'testcase_name': 'RightShift', + 'op': gen_bitwise_ops.right_shift + }, + { + 'testcase_name': 'BitwiseOr', + 'op': gen_bitwise_ops.bitwise_or + }, + { + 'testcase_name': 'BitwiseAnd', + 'op': gen_bitwise_ops.bitwise_and + }, + { + 'testcase_name': 'BitwiseXor', + 'op': gen_bitwise_ops.bitwise_xor + }, + { + 'testcase_name': 'TruncateDiv', + 'op': gen_math_ops.truncate_div + }, + { + 'testcase_name': 'TruncateMod', + 'op': gen_math_ops.truncate_mod + }, +] + BINARY_ANY_TYPE_OPS_WITH_BROADCASTING_SUPPORT + +BINARY_BOOL_OPS = [{ + 'testcase_name': 'LogicalOr', + 'op': gen_math_ops.logical_or +}] + +BINARY_FLOAT_OPS = [ + { + 'testcase_name': 'RsqrtGrad', + 'op': lambda y, dy: gen_math_ops.rsqrt_grad(y=y, dy=dy) + }, + { + 'testcase_name': 'SqrtGrad', + 'op': lambda y, dy: gen_math_ops.sqrt_grad(y=y, dy=dy) + }, + { + 'testcase_name': 'Atan2', + 'op': gen_math_ops.atan2 + }, + { + 'testcase_name': 'Betainc', + 'op': lambda a, b: gen_math_ops.betainc(a, b, 1.0) + }, + { + 'testcase_name': 'Complex', + 'op': math_ops.complex + }, + { + 'testcase_name': + 'EluGrad', + 'op': (lambda x, y: gen_nn_ops.elu_grad( + gradients=x, outputs=y, name='op_elugrad')) + }, + { + 'testcase_name': 'Igamma', + 'op': gen_math_ops.igamma + }, + { + 'testcase_name': + 'IgammaGradA', + 'op': (lambda a, x: gen_math_ops.igamma_grad_a( + a=a, x=x, name='IgammaGradA')) + }, + { + 'testcase_name': + 'LeakyReluGrad', + 'op': + (lambda x, y: gen_nn_ops.leaky_relu_grad(gradients=x, features=y)), + }, + { + 'testcase_name': 'MulNoNan', + 'op': (lambda x, y: gen_math_ops.mul_no_nan(x=x, y=y)), + }, + { + 'testcase_name': 'NextAfter', + 'op': gen_math_ops.next_after, + }, + { + 'testcase_name': 'PolyGamma', + 'op': gen_math_ops.polygamma, + }, + { + 'testcase_name': 'SeluGrad', + 'op': (lambda x, y: gen_nn_ops.selu_grad(gradients=x, outputs=y)), + }, + { + 'testcase_name': 'Relu6Grad', + 'op': (lambda x, y: gen_nn_ops.relu6_grad(gradients=x, features=y)), + }, + { + 'testcase_name': 'ReciprocalGrad', + 'op': (lambda x, y: gen_math_ops.reciprocal_grad(y=x, dy=y)), + }, + { + 'testcase_name': 'Xdivy', + 'op': math_ops.xdivy, + }, + { + 'testcase_name': 'Xlog1py', + 'op': math_ops.xlog1py, + }, + { + 'testcase_name': 'Xlogy', + 'op': gen_math_ops.xlogy, + }, + { + 'testcase_name': 'Zeta', + 'op': gen_math_ops.zeta, + }, +] + BINARY_FLOAT_OPS_WITH_BROADCASTING_SUPPORT + +BINARY_INT_OPS = [] + BINARY_INT_OPS_WITH_BROADCASTING_SUPPORT + +REDUCTION_OPS = [ + { + 'testcase_name': 'Sum', + 'op': math_ops.reduce_sum + }, + { + 'testcase_name': 'Mean', + 'op': math_ops.reduce_mean + }, + { + 'testcase_name': 'Prod', + 'op': math_ops.reduce_prod + }, + { + 'testcase_name': 'Max', + 'op': math_ops.reduce_max + }, + { + 'testcase_name': 'Min', + 'op': math_ops.reduce_min + }, +] + +# TODO(b/171746536): added v2 rng ops here once supported. +RANDOM_OPS = [{ + 'testcase_name': 'StatelessNorm', + 'op': gen_stateless_random_ops.stateless_random_normal, + 'dtype': dtypes.float32, + 'op_version': 'V1' +}, { + 'testcase_name': 'StatelessTruncatedNorm', + 'op': gen_stateless_random_ops.stateless_truncated_normal, + 'dtype': dtypes.float32, + 'op_version': 'V1' +}, { + 'testcase_name': 'StatelessUniform', + 'op': gen_stateless_random_ops.stateless_random_uniform, + 'dtype': dtypes.float32, + 'op_version': 'V1' +}, { + 'testcase_name': 'StatelessUniformFullInt', + 'op': gen_stateless_random_ops.stateless_random_uniform_full_int, + 'dtype': dtypes.int32, + 'op_version': 'V1' +}, { + 'testcase_name': 'StatelessRandomUniformFullIntV2', + 'op': gen_stateless_random_ops_v2.stateless_random_uniform_full_int_v2, + 'dtype': dtypes.int32, + 'op_version': 'V2' +}, { + 'testcase_name': 'StatelessRandomNormalV2', + 'op': gen_stateless_random_ops_v2.stateless_random_normal_v2, + 'dtype': dtypes.float32, + 'op_version': 'V2' +}, { + 'testcase_name': 'StatelessTruncatedNormalV2', + 'op': gen_stateless_random_ops_v2.stateless_truncated_normal_v2, + 'dtype': dtypes.float32, + 'op_version': 'V2' +}, { + 'testcase_name': 'StatelessRandomUniformV2', + 'op': gen_stateless_random_ops_v2.stateless_random_uniform_v2, + 'dtype': dtypes.float32, + 'op_version': 'V2' +}, { + 'testcase_name': 'StatelessRandomUniformIntV2', + 'op': gen_stateless_random_ops_v2.stateless_random_uniform_int_v2, + 'dtype': dtypes.int32, + 'op_version': 'V2_RANGE' +}] + +# op(inputs()) is expected to return an NxM tensor (N, M both even) with a +# flexible output sharding, depending on the context `op` runs in. +EXPANSION_OPS = [ + dict( + testcase_name='TileFrom1x1Array', + inputs=lambda: (constant_op.constant([[1.]]), [4, 4]), + op=gen_array_ops.tile), + dict( + testcase_name='TileFrom2x2Array', + inputs=lambda: (constant_op.constant([[1., 2.], [3., 4.]]), [2, 4]), + op=gen_array_ops.tile), + dict( + testcase_name='Fill', + inputs=lambda: ([2, 4], constant_op.constant(1.)), + op=array_ops.fill), +] + +BATCH_PARALLEL_2D_WINDOW_OPS = [( + 'AvgPool', + nn_ops.avg_pool_v2, +)] + +BATCH_PARALLEL_3D_WINDOW_OPS = [( + 'MaxPool3D', + nn_ops.max_pool3d, +), ( + 'AvgPool3D', + nn_ops.avg_pool3d, +)] + +FFT_OPS = [( + 'FFT', + gen_spectral_ops.fft, + 1, +), ( + 'FFT2D', + gen_spectral_ops.fft2d, + 2, +), ( + 'FFT3D', + gen_spectral_ops.fft3d, + 3, +), ( + 'IFFT', + gen_spectral_ops.ifft, + 1, +), ( + 'IFFT2D', + gen_spectral_ops.ifft2d, + 2, +), ( + 'IFFT3D', + gen_spectral_ops.ifft3d, + 3, +)] + +RFFT_OPS = [( + 'IRFFT', + gen_spectral_ops.irfft, + 1, + dtypes.complex64, +), ( + 'IRFFT2D', + gen_spectral_ops.irfft2d, + 2, + dtypes.complex64, +), ( + 'IRFFT3D', + gen_spectral_ops.irfft3d, + 3, + dtypes.complex64, +), ( + 'RFFT', + gen_spectral_ops.rfft, + 1, + dtypes.float32, +), ( + 'RFFT2D', + gen_spectral_ops.rfft2d, + 2, + dtypes.float32, +), ( + 'RFFT3D', + gen_spectral_ops.rfft3d, + 3, + dtypes.float32, +)] + +PADDINGS = [ + { + 'testcase_name': 'SamePadding', + 'padding': 'SAME' + }, + { + 'testcase_name': 'ValidPadding', + 'padding': 'VALID' + }, +] +# pylint: enable=g-long-lambda +# pyformat: enable