Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/INSTALLER +1 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/METADATA +150 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/RECORD +1167 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/WHEEL +5 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/top_level.txt +1 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/__init__.py +78 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/__init__.py +20 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/__pycache__/api_export.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/__pycache__/version.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/api_export.py +49 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/backend_utils.py +140 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/code_stats.py +49 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/config.py +182 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/dataset_utils.py +763 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/dtype_utils.py +51 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/file_utils.py +518 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/image_dataset_utils.py +459 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/image_utils.py +457 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/io_utils.py +138 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/jax_layer.py +677 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/jax_utils.py +11 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/model_visualization.py +487 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/module_utils.py +61 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/naming.py +73 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/numerical_utils.py +225 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/progbar.py +269 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/python_utils.py +177 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/rng_utils.py +56 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/sequence_utils.py +139 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/summary_utils.py +443 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/text_dataset_utils.py +291 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/tf_utils.py +157 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/timeseries_dataset_utils.py +261 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/torch_utils.py +166 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py +241 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/tracking.py +290 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/version.py +9 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__init__.py +2 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/draw_bounding_boxes.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/draw_segmentation_masks.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/plot_bounding_box_gallery.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/plot_image_gallery.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/plot_segmentation_mask_gallery.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/draw_bounding_boxes.py +177 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/draw_segmentation_masks.py +109 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/plot_bounding_box_gallery.py +165 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/plot_image_gallery.py +165 -0
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/METADATA
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: keras
|
| 3 |
+
Version: 3.8.0
|
| 4 |
+
Summary: Multi-backend Keras
|
| 5 |
+
Author-email: Keras team <keras-users@googlegroups.com>
|
| 6 |
+
License: Apache License 2.0
|
| 7 |
+
Project-URL: Home, https://keras.io/
|
| 8 |
+
Project-URL: Repository, https://github.com/keras-team/keras
|
| 9 |
+
Classifier: Development Status :: 4 - Beta
|
| 10 |
+
Classifier: Programming Language :: Python :: 3
|
| 11 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 12 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 13 |
+
Classifier: Programming Language :: Python :: 3.11
|
| 14 |
+
Classifier: Programming Language :: Python :: 3 :: Only
|
| 15 |
+
Classifier: Operating System :: Unix
|
| 16 |
+
Classifier: Operating System :: MacOS
|
| 17 |
+
Classifier: Intended Audience :: Science/Research
|
| 18 |
+
Classifier: Topic :: Scientific/Engineering
|
| 19 |
+
Classifier: Topic :: Software Development
|
| 20 |
+
Requires-Python: >=3.9
|
| 21 |
+
Description-Content-Type: text/markdown
|
| 22 |
+
Requires-Dist: absl-py
|
| 23 |
+
Requires-Dist: numpy
|
| 24 |
+
Requires-Dist: rich
|
| 25 |
+
Requires-Dist: namex
|
| 26 |
+
Requires-Dist: h5py
|
| 27 |
+
Requires-Dist: optree
|
| 28 |
+
Requires-Dist: ml-dtypes
|
| 29 |
+
Requires-Dist: packaging
|
| 30 |
+
|
| 31 |
+
# Keras 3: Deep Learning for Humans
|
| 32 |
+
|
| 33 |
+
Keras 3 is a multi-backend deep learning framework, with support for JAX, TensorFlow, and PyTorch.
|
| 34 |
+
Effortlessly build and train models for computer vision, natural language processing, audio processing,
|
| 35 |
+
timeseries forecasting, recommender systems, etc.
|
| 36 |
+
|
| 37 |
+
- **Accelerated model development**: Ship deep learning solutions faster thanks to the high-level UX of Keras
|
| 38 |
+
and the availability of easy-to-debug runtimes like PyTorch or JAX eager execution.
|
| 39 |
+
- **State-of-the-art performance**: By picking the backend that is the fastest for your model architecture (often JAX!),
|
| 40 |
+
leverage speedups ranging from 20% to 350% compared to other frameworks. [Benchmark here](https://keras.io/getting_started/benchmarks/).
|
| 41 |
+
- **Datacenter-scale training**: Scale confidently from your laptop to large clusters of GPUs or TPUs.
|
| 42 |
+
|
| 43 |
+
Join nearly three million developers, from burgeoning startups to global enterprises, in harnessing the power of Keras 3.
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
## Installation
|
| 47 |
+
|
| 48 |
+
### Install with pip
|
| 49 |
+
|
| 50 |
+
Keras 3 is available on PyPI as `keras`. Note that Keras 2 remains available as the `tf-keras` package.
|
| 51 |
+
|
| 52 |
+
1. Install `keras`:
|
| 53 |
+
|
| 54 |
+
```
|
| 55 |
+
pip install keras --upgrade
|
| 56 |
+
```
|
| 57 |
+
|
| 58 |
+
2. Install backend package(s).
|
| 59 |
+
|
| 60 |
+
To use `keras`, you should also install the backend of choice: `tensorflow`, `jax`, or `torch`.
|
| 61 |
+
Note that `tensorflow` is required for using certain Keras 3 features: certain preprocessing layers
|
| 62 |
+
as well as `tf.data` pipelines.
|
| 63 |
+
|
| 64 |
+
### Local installation
|
| 65 |
+
|
| 66 |
+
#### Minimal installation
|
| 67 |
+
|
| 68 |
+
Keras 3 is compatible with Linux and MacOS systems. For Windows users, we recommend using WSL2 to run Keras.
|
| 69 |
+
To install a local development version:
|
| 70 |
+
|
| 71 |
+
1. Install dependencies:
|
| 72 |
+
|
| 73 |
+
```
|
| 74 |
+
pip install -r requirements.txt
|
| 75 |
+
```
|
| 76 |
+
|
| 77 |
+
2. Run installation command from the root directory.
|
| 78 |
+
|
| 79 |
+
```
|
| 80 |
+
python pip_build.py --install
|
| 81 |
+
```
|
| 82 |
+
|
| 83 |
+
3. Run API generation script when creating PRs that update `keras_export` public APIs:
|
| 84 |
+
|
| 85 |
+
```
|
| 86 |
+
./shell/api_gen.sh
|
| 87 |
+
```
|
| 88 |
+
|
| 89 |
+
#### Adding GPU support
|
| 90 |
+
|
| 91 |
+
The `requirements.txt` file will install a CPU-only version of TensorFlow, JAX, and PyTorch. For GPU support, we also
|
| 92 |
+
provide a separate `requirements-{backend}-cuda.txt` for TensorFlow, JAX, and PyTorch. These install all CUDA
|
| 93 |
+
dependencies via `pip` and expect a NVIDIA driver to be pre-installed. We recommend a clean python environment for each
|
| 94 |
+
backend to avoid CUDA version mismatches. As an example, here is how to create a Jax GPU environment with `conda`:
|
| 95 |
+
|
| 96 |
+
```shell
|
| 97 |
+
conda create -y -n keras-jax python=3.10
|
| 98 |
+
conda activate keras-jax
|
| 99 |
+
pip install -r requirements-jax-cuda.txt
|
| 100 |
+
python pip_build.py --install
|
| 101 |
+
```
|
| 102 |
+
|
| 103 |
+
## Configuring your backend
|
| 104 |
+
|
| 105 |
+
You can export the environment variable `KERAS_BACKEND` or you can edit your local config file at `~/.keras/keras.json`
|
| 106 |
+
to configure your backend. Available backend options are: `"tensorflow"`, `"jax"`, `"torch"`. Example:
|
| 107 |
+
|
| 108 |
+
```
|
| 109 |
+
export KERAS_BACKEND="jax"
|
| 110 |
+
```
|
| 111 |
+
|
| 112 |
+
In Colab, you can do:
|
| 113 |
+
|
| 114 |
+
```python
|
| 115 |
+
import os
|
| 116 |
+
os.environ["KERAS_BACKEND"] = "jax"
|
| 117 |
+
|
| 118 |
+
import keras
|
| 119 |
+
```
|
| 120 |
+
|
| 121 |
+
**Note:** The backend must be configured before importing `keras`, and the backend cannot be changed after
|
| 122 |
+
the package has been imported.
|
| 123 |
+
|
| 124 |
+
## Backwards compatibility
|
| 125 |
+
|
| 126 |
+
Keras 3 is intended to work as a drop-in replacement for `tf.keras` (when using the TensorFlow backend). Just take your
|
| 127 |
+
existing `tf.keras` code, make sure that your calls to `model.save()` are using the up-to-date `.keras` format, and you're
|
| 128 |
+
done.
|
| 129 |
+
|
| 130 |
+
If your `tf.keras` model does not include custom components, you can start running it on top of JAX or PyTorch immediately.
|
| 131 |
+
|
| 132 |
+
If it does include custom components (e.g. custom layers or a custom `train_step()`), it is usually possible to convert it
|
| 133 |
+
to a backend-agnostic implementation in just a few minutes.
|
| 134 |
+
|
| 135 |
+
In addition, Keras models can consume datasets in any format, regardless of the backend you're using:
|
| 136 |
+
you can train your models with your existing `tf.data.Dataset` pipelines or PyTorch `DataLoaders`.
|
| 137 |
+
|
| 138 |
+
## Why use Keras 3?
|
| 139 |
+
|
| 140 |
+
- Run your high-level Keras workflows on top of any framework -- benefiting at will from the advantages of each framework,
|
| 141 |
+
e.g. the scalability and performance of JAX or the production ecosystem options of TensorFlow.
|
| 142 |
+
- Write custom components (e.g. layers, models, metrics) that you can use in low-level workflows in any framework.
|
| 143 |
+
- You can take a Keras model and train it in a training loop written from scratch in native TF, JAX, or PyTorch.
|
| 144 |
+
- You can take a Keras model and use it as part of a PyTorch-native `Module` or as part of a JAX-native model function.
|
| 145 |
+
- Make your ML code future-proof by avoiding framework lock-in.
|
| 146 |
+
- As a PyTorch user: get access to power and usability of Keras, at last!
|
| 147 |
+
- As a JAX user: get access to a fully-featured, battle-tested, well-documented modeling and training library.
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
Read more in the [Keras 3 release announcement](https://keras.io/keras_3/).
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/RECORD
ADDED
|
@@ -0,0 +1,1167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
keras-3.8.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 2 |
+
keras-3.8.0.dist-info/METADATA,sha256=82_B-ikTuR7ULzYBq-XYOHikwgR5cbLdLAoMdjcuyis,5800
|
| 3 |
+
keras-3.8.0.dist-info/RECORD,,
|
| 4 |
+
keras-3.8.0.dist-info/WHEEL,sha256=A3WOREP4zgxI0fKrHUG8DC8013e3dK3n7a6HDbcEIwE,91
|
| 5 |
+
keras-3.8.0.dist-info/top_level.txt,sha256=ptcw_-QuGZ4ZDjMdwi_Z0clZm8QAqFdvzzFnDEOTs9o,6
|
| 6 |
+
keras/__init__.py,sha256=4RIBKrI0DyyQMR1WoeFHOw4MIl9Z7mnWNjwxU4bZJpk,2248
|
| 7 |
+
keras/__pycache__/__init__.cpython-310.pyc,,
|
| 8 |
+
keras/_tf_keras/__init__.py,sha256=KxzM_FebWUeTe1MInWSNmhQkhezwkhdgj9nIZruK_U4,34
|
| 9 |
+
keras/_tf_keras/__pycache__/__init__.cpython-310.pyc,,
|
| 10 |
+
keras/_tf_keras/keras/__init__.py,sha256=zZoJBLzgTf1Cnq_knTQeJW9nNsr3QPSx3d_9Gmftz38,2197
|
| 11 |
+
keras/_tf_keras/keras/__pycache__/__init__.cpython-310.pyc,,
|
| 12 |
+
keras/_tf_keras/keras/activations/__init__.py,sha256=4iq05YrT-fwYhwmVot_RntIXQw_0W0-t-G1hwXFFmDg,1963
|
| 13 |
+
keras/_tf_keras/keras/activations/__pycache__/__init__.cpython-310.pyc,,
|
| 14 |
+
keras/_tf_keras/keras/applications/__init__.py,sha256=OYAgbbrtjh7qLKOpcPHhC069N8lzyOVUvfx7SbTUPac,3295
|
| 15 |
+
keras/_tf_keras/keras/applications/__pycache__/__init__.cpython-310.pyc,,
|
| 16 |
+
keras/_tf_keras/keras/applications/convnext/__init__.py,sha256=CJtQ7VaafEq_qCfXqg803Wwgp7j63rIQl2KnVeWPnAI,535
|
| 17 |
+
keras/_tf_keras/keras/applications/convnext/__pycache__/__init__.cpython-310.pyc,,
|
| 18 |
+
keras/_tf_keras/keras/applications/densenet/__init__.py,sha256=P2LYU-mrO7j2cD0USIaDHAclR7WFSRte1A3Egw9ZNp0,414
|
| 19 |
+
keras/_tf_keras/keras/applications/densenet/__pycache__/__init__.cpython-310.pyc,,
|
| 20 |
+
keras/_tf_keras/keras/applications/efficientnet/__init__.py,sha256=8yc3DWUG029LT0SSLUS52fCqlKiIaJKWuLBFR9kRyKw,758
|
| 21 |
+
keras/_tf_keras/keras/applications/efficientnet/__pycache__/__init__.cpython-310.pyc,,
|
| 22 |
+
keras/_tf_keras/keras/applications/efficientnet_v2/__init__.py,sha256=vkOu63V-Dx3onrbE1ildBWdTuIMm6YP5YBXmKIwt8OE,733
|
| 23 |
+
keras/_tf_keras/keras/applications/efficientnet_v2/__pycache__/__init__.cpython-310.pyc,,
|
| 24 |
+
keras/_tf_keras/keras/applications/imagenet_utils/__init__.py,sha256=7UiZ1k9p_1KKJ3WhpzTw5IEzNKCA_HkjyA9HUT-P56c,258
|
| 25 |
+
keras/_tf_keras/keras/applications/imagenet_utils/__pycache__/__init__.cpython-310.pyc,,
|
| 26 |
+
keras/_tf_keras/keras/applications/inception_resnet_v2/__init__.py,sha256=z3b_vdJA3cgiXTC1YHOh1XECxr7gx5cTA9p7EHX57-o,341
|
| 27 |
+
keras/_tf_keras/keras/applications/inception_resnet_v2/__pycache__/__init__.cpython-310.pyc,,
|
| 28 |
+
keras/_tf_keras/keras/applications/inception_v3/__init__.py,sha256=UqUJ30KDEAKEoMjOX_JvuF_ZFdGUF3hZ1HxTnaJQi7Y,314
|
| 29 |
+
keras/_tf_keras/keras/applications/inception_v3/__pycache__/__init__.cpython-310.pyc,,
|
| 30 |
+
keras/_tf_keras/keras/applications/mobilenet/__init__.py,sha256=aZ-UclrXa5y18flDccJcXoQU_uiEqe36QFITVJKISZg,303
|
| 31 |
+
keras/_tf_keras/keras/applications/mobilenet/__pycache__/__init__.cpython-310.pyc,,
|
| 32 |
+
keras/_tf_keras/keras/applications/mobilenet_v2/__init__.py,sha256=QqEsqsY0XS7l7y16K8tDLnxTKDPztPg9Lquu8aOBtqk,314
|
| 33 |
+
keras/_tf_keras/keras/applications/mobilenet_v2/__pycache__/__init__.cpython-310.pyc,,
|
| 34 |
+
keras/_tf_keras/keras/applications/mobilenet_v3/__init__.py,sha256=dGfqu2jDH072Irmv9XhByY1hLGJSJnZPFjuSzzXtP3M,254
|
| 35 |
+
keras/_tf_keras/keras/applications/mobilenet_v3/__pycache__/__init__.cpython-310.pyc,,
|
| 36 |
+
keras/_tf_keras/keras/applications/nasnet/__init__.py,sha256=kKSKE7oLUQBlafjx-yZqltzqBVK3ZzSRUe_pdM5yMJI,351
|
| 37 |
+
keras/_tf_keras/keras/applications/nasnet/__pycache__/__init__.cpython-310.pyc,,
|
| 38 |
+
keras/_tf_keras/keras/applications/resnet/__init__.py,sha256=Jb8J5nfhmlM5OoXbeS8te-e9WcdcVThauyDgYjO41hI,397
|
| 39 |
+
keras/_tf_keras/keras/applications/resnet/__pycache__/__init__.cpython-310.pyc,,
|
| 40 |
+
keras/_tf_keras/keras/applications/resnet50/__init__.py,sha256=FDSlA76kxMfGbengnUhswy9wrXOcooytKwqVQlAiHCU,293
|
| 41 |
+
keras/_tf_keras/keras/applications/resnet50/__pycache__/__init__.cpython-310.pyc,,
|
| 42 |
+
keras/_tf_keras/keras/applications/resnet_v2/__init__.py,sha256=ZpnMiE2sXOmGHP4dh5coXw6Bw-gAG9Q4AFk3QeXNJAs,418
|
| 43 |
+
keras/_tf_keras/keras/applications/resnet_v2/__pycache__/__init__.cpython-310.pyc,,
|
| 44 |
+
keras/_tf_keras/keras/applications/vgg16/__init__.py,sha256=5zCCcsQpFp3_n0pxixssCtqt1J05ijsVOLeHL3nN_BA,287
|
| 45 |
+
keras/_tf_keras/keras/applications/vgg16/__pycache__/__init__.cpython-310.pyc,,
|
| 46 |
+
keras/_tf_keras/keras/applications/vgg19/__init__.py,sha256=EcXy9vHifqPxWA9FtBsFVyBE4-cGJk0kuYWqpN16VsA,287
|
| 47 |
+
keras/_tf_keras/keras/applications/vgg19/__pycache__/__init__.cpython-310.pyc,,
|
| 48 |
+
keras/_tf_keras/keras/applications/xception/__init__.py,sha256=7rCbsyBRr4q75NEAJXha_afJ9XQlVThvjiq1z1rUCMc,299
|
| 49 |
+
keras/_tf_keras/keras/applications/xception/__pycache__/__init__.cpython-310.pyc,,
|
| 50 |
+
keras/_tf_keras/keras/backend/__init__.py,sha256=kD-WLxwoqq9YPpBG4rFzbpqe43z1mDJCA-JcGOxOnws,6684
|
| 51 |
+
keras/_tf_keras/keras/backend/__pycache__/__init__.cpython-310.pyc,,
|
| 52 |
+
keras/_tf_keras/keras/callbacks/__init__.py,sha256=3hQGFKvDGo_GxxKAg7962P8sOjkAK3gYLAMoApk6NYQ,1044
|
| 53 |
+
keras/_tf_keras/keras/callbacks/__pycache__/__init__.cpython-310.pyc,,
|
| 54 |
+
keras/_tf_keras/keras/config/__init__.py,sha256=wdYtkLTOd4vDa3pWuTrWF6Je-xvPh_myrnzapx2gs20,1328
|
| 55 |
+
keras/_tf_keras/keras/config/__pycache__/__init__.cpython-310.pyc,,
|
| 56 |
+
keras/_tf_keras/keras/constraints/__init__.py,sha256=IwUc3HQMwy8RS0ylJwjNLSYEV6CCN5cy5vVL2OEouTY,797
|
| 57 |
+
keras/_tf_keras/keras/constraints/__pycache__/__init__.cpython-310.pyc,,
|
| 58 |
+
keras/_tf_keras/keras/datasets/__init__.py,sha256=TMj3G88kHwCFfFg0IP4OxTD1gCMOfZNscm97YJnX9_0,454
|
| 59 |
+
keras/_tf_keras/keras/datasets/__pycache__/__init__.cpython-310.pyc,,
|
| 60 |
+
keras/_tf_keras/keras/datasets/boston_housing/__init__.py,sha256=m-JFgF4Wg83j9kUIwG4WwwYxJuqx2X20su0cms-4AvQ,178
|
| 61 |
+
keras/_tf_keras/keras/datasets/boston_housing/__pycache__/__init__.cpython-310.pyc,,
|
| 62 |
+
keras/_tf_keras/keras/datasets/california_housing/__init__.py,sha256=ZTdBD-p_s7NUQ72-YvVu6zhpFbhogz7Dx2TC_F0wT6o,182
|
| 63 |
+
keras/_tf_keras/keras/datasets/california_housing/__pycache__/__init__.cpython-310.pyc,,
|
| 64 |
+
keras/_tf_keras/keras/datasets/cifar10/__init__.py,sha256=zE6qAroVmT1N-graOKMme7pMKd3pa-gXoE2YiA71G-k,171
|
| 65 |
+
keras/_tf_keras/keras/datasets/cifar10/__pycache__/__init__.cpython-310.pyc,,
|
| 66 |
+
keras/_tf_keras/keras/datasets/cifar100/__init__.py,sha256=ry24rVuxL-fMGlvTm_69E8BoWqs5RA4PBXc18q9_3nE,172
|
| 67 |
+
keras/_tf_keras/keras/datasets/cifar100/__pycache__/__init__.cpython-310.pyc,,
|
| 68 |
+
keras/_tf_keras/keras/datasets/fashion_mnist/__init__.py,sha256=XdTBzHTGNyjnROyxHbTnsPvM4aah7pW7OS7fdA7NDK4,177
|
| 69 |
+
keras/_tf_keras/keras/datasets/fashion_mnist/__pycache__/__init__.cpython-310.pyc,,
|
| 70 |
+
keras/_tf_keras/keras/datasets/imdb/__init__.py,sha256=UbWIDX0g49ou0oKn52cX9XO3GoxaSoAY8yTTayMhgBI,219
|
| 71 |
+
keras/_tf_keras/keras/datasets/imdb/__pycache__/__init__.cpython-310.pyc,,
|
| 72 |
+
keras/_tf_keras/keras/datasets/mnist/__init__.py,sha256=LtzLQyEHikIwIHBnHQpipXIoIzBBlD4ZymIYHFQsbXM,169
|
| 73 |
+
keras/_tf_keras/keras/datasets/mnist/__pycache__/__init__.cpython-310.pyc,,
|
| 74 |
+
keras/_tf_keras/keras/datasets/reuters/__init__.py,sha256=nveC8af7Nf1U11DgSGOXud9OG0OTMLmOzLKj_5meuv8,280
|
| 75 |
+
keras/_tf_keras/keras/datasets/reuters/__pycache__/__init__.cpython-310.pyc,,
|
| 76 |
+
keras/_tf_keras/keras/distribution/__init__.py,sha256=nMMN_Whe_0UBUxBi7hrgAmF58dKLRTJdHPNovets_TU,775
|
| 77 |
+
keras/_tf_keras/keras/distribution/__pycache__/__init__.cpython-310.pyc,,
|
| 78 |
+
keras/_tf_keras/keras/dtype_policies/__init__.py,sha256=5hre38f1WKuUCs0fVSrLdnWVmFGl9hjeSHMDcYGsXC8,605
|
| 79 |
+
keras/_tf_keras/keras/dtype_policies/__pycache__/__init__.cpython-310.pyc,,
|
| 80 |
+
keras/_tf_keras/keras/export/__init__.py,sha256=fDKQSQMFuL-o1L82NJ6oj4_EVZlERXIH1xUG6Z2NPq8,177
|
| 81 |
+
keras/_tf_keras/keras/export/__pycache__/__init__.cpython-310.pyc,,
|
| 82 |
+
keras/_tf_keras/keras/initializers/__init__.py,sha256=AC2ADbB6GI106OtDS-rkoyQISqDSANBkEv74ipc3PVo,3026
|
| 83 |
+
keras/_tf_keras/keras/initializers/__pycache__/__init__.cpython-310.pyc,,
|
| 84 |
+
keras/_tf_keras/keras/layers/__init__.py,sha256=JWGBZ3I5pbBplrrYlcf0RJwoozYdSJmmPKIgEsY_A-U,11908
|
| 85 |
+
keras/_tf_keras/keras/layers/__pycache__/__init__.cpython-310.pyc,,
|
| 86 |
+
keras/_tf_keras/keras/legacy/__init__.py,sha256=jnXr7nfdi2bWIIPHkecb4V8kJgWHlbLLGONQNtVIyoE,158
|
| 87 |
+
keras/_tf_keras/keras/legacy/__pycache__/__init__.cpython-310.pyc,,
|
| 88 |
+
keras/_tf_keras/keras/legacy/saving/__init__.py,sha256=Lo-SrioJr4I2Lwg7BtCBUrXhehEUi3PjA6XKC1RUTD0,270
|
| 89 |
+
keras/_tf_keras/keras/legacy/saving/__pycache__/__init__.cpython-310.pyc,,
|
| 90 |
+
keras/_tf_keras/keras/losses/__init__.py,sha256=97dwQVgjT9I25KZNWwYwaw7Xsee6VjiGtbdOPhheBMM,2974
|
| 91 |
+
keras/_tf_keras/keras/losses/__pycache__/__init__.cpython-310.pyc,,
|
| 92 |
+
keras/_tf_keras/keras/metrics/__init__.py,sha256=vdZLkgKKkjWnmikTXm4z6h2z_GD6ifqsGGVpJhYjjUM,4986
|
| 93 |
+
keras/_tf_keras/keras/metrics/__pycache__/__init__.cpython-310.pyc,,
|
| 94 |
+
keras/_tf_keras/keras/mixed_precision/__init__.py,sha256=5SAdEHsp61WWzpzR7LDAEVwEXDc10iSfICJ5X4fBOU4,636
|
| 95 |
+
keras/_tf_keras/keras/mixed_precision/__pycache__/__init__.cpython-310.pyc,,
|
| 96 |
+
keras/_tf_keras/keras/models/__init__.py,sha256=CoOZmRsB75JTFWmaAG2ozUkPod3tqKINHwZfz1vGc74,416
|
| 97 |
+
keras/_tf_keras/keras/models/__pycache__/__init__.cpython-310.pyc,,
|
| 98 |
+
keras/_tf_keras/keras/ops/__init__.py,sha256=o3d-0VDc_ROhfmylT2txfYM5kuIKSMu9ih4HF7xB5i0,10452
|
| 99 |
+
keras/_tf_keras/keras/ops/__pycache__/__init__.cpython-310.pyc,,
|
| 100 |
+
keras/_tf_keras/keras/ops/image/__init__.py,sha256=sR-AgA7J4SoQ9A2uasL2eVATbJF7tA93L4Bjc90djC0,528
|
| 101 |
+
keras/_tf_keras/keras/ops/image/__pycache__/__init__.cpython-310.pyc,,
|
| 102 |
+
keras/_tf_keras/keras/ops/linalg/__init__.py,sha256=S5FbsvccCzV39KKSuRiHi4NstVhHpQBiTJRF-I6H6Y8,595
|
| 103 |
+
keras/_tf_keras/keras/ops/linalg/__pycache__/__init__.cpython-310.pyc,,
|
| 104 |
+
keras/_tf_keras/keras/ops/nn/__init__.py,sha256=7t_xXtIzXauLbbghLVwdoJcM2n-ZP2hy9I0vJ2OZXwM,1937
|
| 105 |
+
keras/_tf_keras/keras/ops/nn/__pycache__/__init__.cpython-310.pyc,,
|
| 106 |
+
keras/_tf_keras/keras/ops/numpy/__init__.py,sha256=f5bKYDOsWCg_2kcv9h64niH_b6gGPiAtNA3jk9WJgeY,6426
|
| 107 |
+
keras/_tf_keras/keras/ops/numpy/__pycache__/__init__.cpython-310.pyc,,
|
| 108 |
+
keras/_tf_keras/keras/optimizers/__init__.py,sha256=vFg0VYhMqrF46b8DnZJPECQGTSLo2_JHIk_N88IESpk,1008
|
| 109 |
+
keras/_tf_keras/keras/optimizers/__pycache__/__init__.cpython-310.pyc,,
|
| 110 |
+
keras/_tf_keras/keras/optimizers/legacy/__init__.py,sha256=uIMQESCV80Q0FY-9ikQUjXYPyZqmTfAM3dfohQ5DzYs,516
|
| 111 |
+
keras/_tf_keras/keras/optimizers/legacy/__pycache__/__init__.cpython-310.pyc,,
|
| 112 |
+
keras/_tf_keras/keras/optimizers/schedules/__init__.py,sha256=Wj5RdkBgCZlb83cmMFLvXPMy3bWfi65DT-n6mc_jEm8,918
|
| 113 |
+
keras/_tf_keras/keras/optimizers/schedules/__pycache__/__init__.cpython-310.pyc,,
|
| 114 |
+
keras/_tf_keras/keras/preprocessing/__init__.py,sha256=S7kyp2DIP5zEHLu109zfMetURJ3Tphmp86i6v5dzv_Q,530
|
| 115 |
+
keras/_tf_keras/keras/preprocessing/__pycache__/__init__.cpython-310.pyc,,
|
| 116 |
+
keras/_tf_keras/keras/preprocessing/image/__init__.py,sha256=HxDuQEt6Fvk8LPHdU3ZIHK6rPI4HBnrO1iWWPv2vlLg,1240
|
| 117 |
+
keras/_tf_keras/keras/preprocessing/image/__pycache__/__init__.cpython-310.pyc,,
|
| 118 |
+
keras/_tf_keras/keras/preprocessing/sequence/__init__.py,sha256=SuRkLzfPAxJE9mJrOJUHIS1F49wHjKRRMTigoGzHnuw,385
|
| 119 |
+
keras/_tf_keras/keras/preprocessing/sequence/__pycache__/__init__.cpython-310.pyc,,
|
| 120 |
+
keras/_tf_keras/keras/preprocessing/text/__init__.py,sha256=ENE088kHs8mA7vSTt8OVn8EbRNmJB8pH_mHuyYLNSEI,436
|
| 121 |
+
keras/_tf_keras/keras/preprocessing/text/__pycache__/__init__.cpython-310.pyc,,
|
| 122 |
+
keras/_tf_keras/keras/quantizers/__init__.py,sha256=1uMzyYRCEZmbKf35VvtF7HPmqooNhHgxNgll--Ot21E,627
|
| 123 |
+
keras/_tf_keras/keras/quantizers/__pycache__/__init__.cpython-310.pyc,,
|
| 124 |
+
keras/_tf_keras/keras/random/__init__.py,sha256=Vp0WMSatNORPtXBd9PgL9czZCDJj-3EpS_vzDGBaq7U,628
|
| 125 |
+
keras/_tf_keras/keras/random/__pycache__/__init__.cpython-310.pyc,,
|
| 126 |
+
keras/_tf_keras/keras/regularizers/__init__.py,sha256=Dlz92XwBnM5yXdcWsrRYCLij3iHtmeVLrkWOF6G_9Sk,819
|
| 127 |
+
keras/_tf_keras/keras/regularizers/__pycache__/__init__.cpython-310.pyc,,
|
| 128 |
+
keras/_tf_keras/keras/saving/__init__.py,sha256=T-ae1TghLi2qL3ws-qCNVBSCriVJp6Obp3b1lXZcKZ8,980
|
| 129 |
+
keras/_tf_keras/keras/saving/__pycache__/__init__.cpython-310.pyc,,
|
| 130 |
+
keras/_tf_keras/keras/tree/__init__.py,sha256=LeyEGEQky2QkIpz2gaBdWmX0a_XbI1hVx-1N6T4SlPA,738
|
| 131 |
+
keras/_tf_keras/keras/tree/__pycache__/__init__.cpython-310.pyc,,
|
| 132 |
+
keras/_tf_keras/keras/utils/__init__.py,sha256=YhX48UKVikdCxjvy8ncVTbafMWMgZ7yo-KSJB1I-1po,2737
|
| 133 |
+
keras/_tf_keras/keras/utils/__pycache__/__init__.cpython-310.pyc,,
|
| 134 |
+
keras/_tf_keras/keras/utils/bounding_boxes/__init__.py,sha256=Cg2G9tvv1gf8QfmqAnPDFGUSp_qFx2HGCAl2l9j74dg,1119
|
| 135 |
+
keras/_tf_keras/keras/utils/bounding_boxes/__pycache__/__init__.cpython-310.pyc,,
|
| 136 |
+
keras/_tf_keras/keras/utils/legacy/__init__.py,sha256=Lo-SrioJr4I2Lwg7BtCBUrXhehEUi3PjA6XKC1RUTD0,270
|
| 137 |
+
keras/_tf_keras/keras/utils/legacy/__pycache__/__init__.cpython-310.pyc,,
|
| 138 |
+
keras/_tf_keras/keras/visualization/__init__.py,sha256=25wRQnlYw_E6GYMOUs-62PYrFko3VeYOy-QeoOKEFoQ,569
|
| 139 |
+
keras/_tf_keras/keras/visualization/__pycache__/__init__.cpython-310.pyc,,
|
| 140 |
+
keras/_tf_keras/keras/wrappers/__init__.py,sha256=Uytau8DAb7y3iDA2Hrf3LyOcA6HuhSk6CZZs0YkDnfo,317
|
| 141 |
+
keras/_tf_keras/keras/wrappers/__pycache__/__init__.cpython-310.pyc,,
|
| 142 |
+
keras/api/__init__.py,sha256=_j2pS8vM-cp0jlc6d4xv6gN7dqEEPkiQNi4s0vFQsFI,2167
|
| 143 |
+
keras/api/__pycache__/__init__.cpython-310.pyc,,
|
| 144 |
+
keras/api/activations/__init__.py,sha256=4iq05YrT-fwYhwmVot_RntIXQw_0W0-t-G1hwXFFmDg,1963
|
| 145 |
+
keras/api/activations/__pycache__/__init__.cpython-310.pyc,,
|
| 146 |
+
keras/api/applications/__init__.py,sha256=OYAgbbrtjh7qLKOpcPHhC069N8lzyOVUvfx7SbTUPac,3295
|
| 147 |
+
keras/api/applications/__pycache__/__init__.cpython-310.pyc,,
|
| 148 |
+
keras/api/applications/convnext/__init__.py,sha256=CJtQ7VaafEq_qCfXqg803Wwgp7j63rIQl2KnVeWPnAI,535
|
| 149 |
+
keras/api/applications/convnext/__pycache__/__init__.cpython-310.pyc,,
|
| 150 |
+
keras/api/applications/densenet/__init__.py,sha256=P2LYU-mrO7j2cD0USIaDHAclR7WFSRte1A3Egw9ZNp0,414
|
| 151 |
+
keras/api/applications/densenet/__pycache__/__init__.cpython-310.pyc,,
|
| 152 |
+
keras/api/applications/efficientnet/__init__.py,sha256=8yc3DWUG029LT0SSLUS52fCqlKiIaJKWuLBFR9kRyKw,758
|
| 153 |
+
keras/api/applications/efficientnet/__pycache__/__init__.cpython-310.pyc,,
|
| 154 |
+
keras/api/applications/efficientnet_v2/__init__.py,sha256=vkOu63V-Dx3onrbE1ildBWdTuIMm6YP5YBXmKIwt8OE,733
|
| 155 |
+
keras/api/applications/efficientnet_v2/__pycache__/__init__.cpython-310.pyc,,
|
| 156 |
+
keras/api/applications/imagenet_utils/__init__.py,sha256=7UiZ1k9p_1KKJ3WhpzTw5IEzNKCA_HkjyA9HUT-P56c,258
|
| 157 |
+
keras/api/applications/imagenet_utils/__pycache__/__init__.cpython-310.pyc,,
|
| 158 |
+
keras/api/applications/inception_resnet_v2/__init__.py,sha256=z3b_vdJA3cgiXTC1YHOh1XECxr7gx5cTA9p7EHX57-o,341
|
| 159 |
+
keras/api/applications/inception_resnet_v2/__pycache__/__init__.cpython-310.pyc,,
|
| 160 |
+
keras/api/applications/inception_v3/__init__.py,sha256=UqUJ30KDEAKEoMjOX_JvuF_ZFdGUF3hZ1HxTnaJQi7Y,314
|
| 161 |
+
keras/api/applications/inception_v3/__pycache__/__init__.cpython-310.pyc,,
|
| 162 |
+
keras/api/applications/mobilenet/__init__.py,sha256=aZ-UclrXa5y18flDccJcXoQU_uiEqe36QFITVJKISZg,303
|
| 163 |
+
keras/api/applications/mobilenet/__pycache__/__init__.cpython-310.pyc,,
|
| 164 |
+
keras/api/applications/mobilenet_v2/__init__.py,sha256=QqEsqsY0XS7l7y16K8tDLnxTKDPztPg9Lquu8aOBtqk,314
|
| 165 |
+
keras/api/applications/mobilenet_v2/__pycache__/__init__.cpython-310.pyc,,
|
| 166 |
+
keras/api/applications/mobilenet_v3/__init__.py,sha256=dGfqu2jDH072Irmv9XhByY1hLGJSJnZPFjuSzzXtP3M,254
|
| 167 |
+
keras/api/applications/mobilenet_v3/__pycache__/__init__.cpython-310.pyc,,
|
| 168 |
+
keras/api/applications/nasnet/__init__.py,sha256=kKSKE7oLUQBlafjx-yZqltzqBVK3ZzSRUe_pdM5yMJI,351
|
| 169 |
+
keras/api/applications/nasnet/__pycache__/__init__.cpython-310.pyc,,
|
| 170 |
+
keras/api/applications/resnet/__init__.py,sha256=Jb8J5nfhmlM5OoXbeS8te-e9WcdcVThauyDgYjO41hI,397
|
| 171 |
+
keras/api/applications/resnet/__pycache__/__init__.cpython-310.pyc,,
|
| 172 |
+
keras/api/applications/resnet50/__init__.py,sha256=FDSlA76kxMfGbengnUhswy9wrXOcooytKwqVQlAiHCU,293
|
| 173 |
+
keras/api/applications/resnet50/__pycache__/__init__.cpython-310.pyc,,
|
| 174 |
+
keras/api/applications/resnet_v2/__init__.py,sha256=ZpnMiE2sXOmGHP4dh5coXw6Bw-gAG9Q4AFk3QeXNJAs,418
|
| 175 |
+
keras/api/applications/resnet_v2/__pycache__/__init__.cpython-310.pyc,,
|
| 176 |
+
keras/api/applications/vgg16/__init__.py,sha256=5zCCcsQpFp3_n0pxixssCtqt1J05ijsVOLeHL3nN_BA,287
|
| 177 |
+
keras/api/applications/vgg16/__pycache__/__init__.cpython-310.pyc,,
|
| 178 |
+
keras/api/applications/vgg19/__init__.py,sha256=EcXy9vHifqPxWA9FtBsFVyBE4-cGJk0kuYWqpN16VsA,287
|
| 179 |
+
keras/api/applications/vgg19/__pycache__/__init__.cpython-310.pyc,,
|
| 180 |
+
keras/api/applications/xception/__init__.py,sha256=7rCbsyBRr4q75NEAJXha_afJ9XQlVThvjiq1z1rUCMc,299
|
| 181 |
+
keras/api/applications/xception/__pycache__/__init__.cpython-310.pyc,,
|
| 182 |
+
keras/api/backend/__init__.py,sha256=Iz8yqN_VWI6G8SzGW351lqwpY2aMEC_1-BazSReN_s4,883
|
| 183 |
+
keras/api/backend/__pycache__/__init__.cpython-310.pyc,,
|
| 184 |
+
keras/api/callbacks/__init__.py,sha256=3hQGFKvDGo_GxxKAg7962P8sOjkAK3gYLAMoApk6NYQ,1044
|
| 185 |
+
keras/api/callbacks/__pycache__/__init__.cpython-310.pyc,,
|
| 186 |
+
keras/api/config/__init__.py,sha256=wdYtkLTOd4vDa3pWuTrWF6Je-xvPh_myrnzapx2gs20,1328
|
| 187 |
+
keras/api/config/__pycache__/__init__.cpython-310.pyc,,
|
| 188 |
+
keras/api/constraints/__init__.py,sha256=IwUc3HQMwy8RS0ylJwjNLSYEV6CCN5cy5vVL2OEouTY,797
|
| 189 |
+
keras/api/constraints/__pycache__/__init__.cpython-310.pyc,,
|
| 190 |
+
keras/api/datasets/__init__.py,sha256=TMj3G88kHwCFfFg0IP4OxTD1gCMOfZNscm97YJnX9_0,454
|
| 191 |
+
keras/api/datasets/__pycache__/__init__.cpython-310.pyc,,
|
| 192 |
+
keras/api/datasets/boston_housing/__init__.py,sha256=m-JFgF4Wg83j9kUIwG4WwwYxJuqx2X20su0cms-4AvQ,178
|
| 193 |
+
keras/api/datasets/boston_housing/__pycache__/__init__.cpython-310.pyc,,
|
| 194 |
+
keras/api/datasets/california_housing/__init__.py,sha256=ZTdBD-p_s7NUQ72-YvVu6zhpFbhogz7Dx2TC_F0wT6o,182
|
| 195 |
+
keras/api/datasets/california_housing/__pycache__/__init__.cpython-310.pyc,,
|
| 196 |
+
keras/api/datasets/cifar10/__init__.py,sha256=zE6qAroVmT1N-graOKMme7pMKd3pa-gXoE2YiA71G-k,171
|
| 197 |
+
keras/api/datasets/cifar10/__pycache__/__init__.cpython-310.pyc,,
|
| 198 |
+
keras/api/datasets/cifar100/__init__.py,sha256=ry24rVuxL-fMGlvTm_69E8BoWqs5RA4PBXc18q9_3nE,172
|
| 199 |
+
keras/api/datasets/cifar100/__pycache__/__init__.cpython-310.pyc,,
|
| 200 |
+
keras/api/datasets/fashion_mnist/__init__.py,sha256=XdTBzHTGNyjnROyxHbTnsPvM4aah7pW7OS7fdA7NDK4,177
|
| 201 |
+
keras/api/datasets/fashion_mnist/__pycache__/__init__.cpython-310.pyc,,
|
| 202 |
+
keras/api/datasets/imdb/__init__.py,sha256=UbWIDX0g49ou0oKn52cX9XO3GoxaSoAY8yTTayMhgBI,219
|
| 203 |
+
keras/api/datasets/imdb/__pycache__/__init__.cpython-310.pyc,,
|
| 204 |
+
keras/api/datasets/mnist/__init__.py,sha256=LtzLQyEHikIwIHBnHQpipXIoIzBBlD4ZymIYHFQsbXM,169
|
| 205 |
+
keras/api/datasets/mnist/__pycache__/__init__.cpython-310.pyc,,
|
| 206 |
+
keras/api/datasets/reuters/__init__.py,sha256=nveC8af7Nf1U11DgSGOXud9OG0OTMLmOzLKj_5meuv8,280
|
| 207 |
+
keras/api/datasets/reuters/__pycache__/__init__.cpython-310.pyc,,
|
| 208 |
+
keras/api/distribution/__init__.py,sha256=nMMN_Whe_0UBUxBi7hrgAmF58dKLRTJdHPNovets_TU,775
|
| 209 |
+
keras/api/distribution/__pycache__/__init__.cpython-310.pyc,,
|
| 210 |
+
keras/api/dtype_policies/__init__.py,sha256=5hre38f1WKuUCs0fVSrLdnWVmFGl9hjeSHMDcYGsXC8,605
|
| 211 |
+
keras/api/dtype_policies/__pycache__/__init__.cpython-310.pyc,,
|
| 212 |
+
keras/api/export/__init__.py,sha256=fDKQSQMFuL-o1L82NJ6oj4_EVZlERXIH1xUG6Z2NPq8,177
|
| 213 |
+
keras/api/export/__pycache__/__init__.cpython-310.pyc,,
|
| 214 |
+
keras/api/initializers/__init__.py,sha256=AC2ADbB6GI106OtDS-rkoyQISqDSANBkEv74ipc3PVo,3026
|
| 215 |
+
keras/api/initializers/__pycache__/__init__.cpython-310.pyc,,
|
| 216 |
+
keras/api/layers/__init__.py,sha256=9wIEB_qtIVXxBZGjKUCo9CXZHghSGx-QIT-QFTXkR_8,11781
|
| 217 |
+
keras/api/layers/__pycache__/__init__.cpython-310.pyc,,
|
| 218 |
+
keras/api/legacy/__init__.py,sha256=jnXr7nfdi2bWIIPHkecb4V8kJgWHlbLLGONQNtVIyoE,158
|
| 219 |
+
keras/api/legacy/__pycache__/__init__.cpython-310.pyc,,
|
| 220 |
+
keras/api/legacy/saving/__init__.py,sha256=Lo-SrioJr4I2Lwg7BtCBUrXhehEUi3PjA6XKC1RUTD0,270
|
| 221 |
+
keras/api/legacy/saving/__pycache__/__init__.cpython-310.pyc,,
|
| 222 |
+
keras/api/losses/__init__.py,sha256=h0DLNCOHhzkXHFJIss9LjjAJ5wSaajI255xfSaPkzhY,2467
|
| 223 |
+
keras/api/losses/__pycache__/__init__.cpython-310.pyc,,
|
| 224 |
+
keras/api/metrics/__init__.py,sha256=_4b3MjwnTgp3am78le7zd7sJVmUaPdzs8PT18GqUMXU,4525
|
| 225 |
+
keras/api/metrics/__pycache__/__init__.cpython-310.pyc,,
|
| 226 |
+
keras/api/mixed_precision/__init__.py,sha256=5SAdEHsp61WWzpzR7LDAEVwEXDc10iSfICJ5X4fBOU4,636
|
| 227 |
+
keras/api/mixed_precision/__pycache__/__init__.cpython-310.pyc,,
|
| 228 |
+
keras/api/models/__init__.py,sha256=CoOZmRsB75JTFWmaAG2ozUkPod3tqKINHwZfz1vGc74,416
|
| 229 |
+
keras/api/models/__pycache__/__init__.cpython-310.pyc,,
|
| 230 |
+
keras/api/ops/__init__.py,sha256=o3d-0VDc_ROhfmylT2txfYM5kuIKSMu9ih4HF7xB5i0,10452
|
| 231 |
+
keras/api/ops/__pycache__/__init__.cpython-310.pyc,,
|
| 232 |
+
keras/api/ops/image/__init__.py,sha256=sR-AgA7J4SoQ9A2uasL2eVATbJF7tA93L4Bjc90djC0,528
|
| 233 |
+
keras/api/ops/image/__pycache__/__init__.cpython-310.pyc,,
|
| 234 |
+
keras/api/ops/linalg/__init__.py,sha256=S5FbsvccCzV39KKSuRiHi4NstVhHpQBiTJRF-I6H6Y8,595
|
| 235 |
+
keras/api/ops/linalg/__pycache__/__init__.cpython-310.pyc,,
|
| 236 |
+
keras/api/ops/nn/__init__.py,sha256=7t_xXtIzXauLbbghLVwdoJcM2n-ZP2hy9I0vJ2OZXwM,1937
|
| 237 |
+
keras/api/ops/nn/__pycache__/__init__.cpython-310.pyc,,
|
| 238 |
+
keras/api/ops/numpy/__init__.py,sha256=f5bKYDOsWCg_2kcv9h64niH_b6gGPiAtNA3jk9WJgeY,6426
|
| 239 |
+
keras/api/ops/numpy/__pycache__/__init__.cpython-310.pyc,,
|
| 240 |
+
keras/api/optimizers/__init__.py,sha256=vFg0VYhMqrF46b8DnZJPECQGTSLo2_JHIk_N88IESpk,1008
|
| 241 |
+
keras/api/optimizers/__pycache__/__init__.cpython-310.pyc,,
|
| 242 |
+
keras/api/optimizers/legacy/__init__.py,sha256=uIMQESCV80Q0FY-9ikQUjXYPyZqmTfAM3dfohQ5DzYs,516
|
| 243 |
+
keras/api/optimizers/legacy/__pycache__/__init__.cpython-310.pyc,,
|
| 244 |
+
keras/api/optimizers/schedules/__init__.py,sha256=Wj5RdkBgCZlb83cmMFLvXPMy3bWfi65DT-n6mc_jEm8,918
|
| 245 |
+
keras/api/optimizers/schedules/__pycache__/__init__.cpython-310.pyc,,
|
| 246 |
+
keras/api/preprocessing/__init__.py,sha256=mVbAwXBZ5UxJmrKdUFKQjwdN_DBPf9wNVac_XURBmSI,453
|
| 247 |
+
keras/api/preprocessing/__pycache__/__init__.cpython-310.pyc,,
|
| 248 |
+
keras/api/preprocessing/image/__init__.py,sha256=61dPt1CFgoX52QMmaA11D1RHvA3hpSf4I4_sDsS1zuc,379
|
| 249 |
+
keras/api/preprocessing/image/__pycache__/__init__.cpython-310.pyc,,
|
| 250 |
+
keras/api/preprocessing/sequence/__init__.py,sha256=nqJbuy_w9GxqAlk5i_gGwzQAodLu0gpPCsYCOXzQYXQ,179
|
| 251 |
+
keras/api/preprocessing/sequence/__pycache__/__init__.cpython-310.pyc,,
|
| 252 |
+
keras/api/quantizers/__init__.py,sha256=1uMzyYRCEZmbKf35VvtF7HPmqooNhHgxNgll--Ot21E,627
|
| 253 |
+
keras/api/quantizers/__pycache__/__init__.cpython-310.pyc,,
|
| 254 |
+
keras/api/random/__init__.py,sha256=Vp0WMSatNORPtXBd9PgL9czZCDJj-3EpS_vzDGBaq7U,628
|
| 255 |
+
keras/api/random/__pycache__/__init__.cpython-310.pyc,,
|
| 256 |
+
keras/api/regularizers/__init__.py,sha256=Dlz92XwBnM5yXdcWsrRYCLij3iHtmeVLrkWOF6G_9Sk,819
|
| 257 |
+
keras/api/regularizers/__pycache__/__init__.cpython-310.pyc,,
|
| 258 |
+
keras/api/saving/__init__.py,sha256=T-ae1TghLi2qL3ws-qCNVBSCriVJp6Obp3b1lXZcKZ8,980
|
| 259 |
+
keras/api/saving/__pycache__/__init__.cpython-310.pyc,,
|
| 260 |
+
keras/api/tree/__init__.py,sha256=LeyEGEQky2QkIpz2gaBdWmX0a_XbI1hVx-1N6T4SlPA,738
|
| 261 |
+
keras/api/tree/__pycache__/__init__.cpython-310.pyc,,
|
| 262 |
+
keras/api/utils/__init__.py,sha256=YhX48UKVikdCxjvy8ncVTbafMWMgZ7yo-KSJB1I-1po,2737
|
| 263 |
+
keras/api/utils/__pycache__/__init__.cpython-310.pyc,,
|
| 264 |
+
keras/api/utils/bounding_boxes/__init__.py,sha256=Cg2G9tvv1gf8QfmqAnPDFGUSp_qFx2HGCAl2l9j74dg,1119
|
| 265 |
+
keras/api/utils/bounding_boxes/__pycache__/__init__.cpython-310.pyc,,
|
| 266 |
+
keras/api/utils/legacy/__init__.py,sha256=Lo-SrioJr4I2Lwg7BtCBUrXhehEUi3PjA6XKC1RUTD0,270
|
| 267 |
+
keras/api/utils/legacy/__pycache__/__init__.cpython-310.pyc,,
|
| 268 |
+
keras/api/visualization/__init__.py,sha256=25wRQnlYw_E6GYMOUs-62PYrFko3VeYOy-QeoOKEFoQ,569
|
| 269 |
+
keras/api/visualization/__pycache__/__init__.cpython-310.pyc,,
|
| 270 |
+
keras/api/wrappers/__init__.py,sha256=Uytau8DAb7y3iDA2Hrf3LyOcA6HuhSk6CZZs0YkDnfo,317
|
| 271 |
+
keras/api/wrappers/__pycache__/__init__.cpython-310.pyc,,
|
| 272 |
+
keras/src/__init__.py,sha256=Gi4S7EiCMkE03PbdGNpFdaUYySWDs_FcAJ8Taz9Y1BE,684
|
| 273 |
+
keras/src/__pycache__/__init__.cpython-310.pyc,,
|
| 274 |
+
keras/src/__pycache__/api_export.cpython-310.pyc,,
|
| 275 |
+
keras/src/__pycache__/version.cpython-310.pyc,,
|
| 276 |
+
keras/src/activations/__init__.py,sha256=SgIXIccbRm8TFHfo8XNd_ClNOF5BxoXuq4p9sVqsBos,4321
|
| 277 |
+
keras/src/activations/__pycache__/__init__.cpython-310.pyc,,
|
| 278 |
+
keras/src/activations/__pycache__/activations.cpython-310.pyc,,
|
| 279 |
+
keras/src/activations/activations.py,sha256=F6nckJVrzd4BHGLw-DP1a83m-SUhvFgnR6scUZ73DPY,17114
|
| 280 |
+
keras/src/api_export.py,sha256=gXOkBOnmscV013WAc75lc4Up01-Kkg9EylIAT_QWctg,1173
|
| 281 |
+
keras/src/applications/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 282 |
+
keras/src/applications/__pycache__/__init__.cpython-310.pyc,,
|
| 283 |
+
keras/src/applications/__pycache__/convnext.cpython-310.pyc,,
|
| 284 |
+
keras/src/applications/__pycache__/densenet.cpython-310.pyc,,
|
| 285 |
+
keras/src/applications/__pycache__/efficientnet.cpython-310.pyc,,
|
| 286 |
+
keras/src/applications/__pycache__/efficientnet_v2.cpython-310.pyc,,
|
| 287 |
+
keras/src/applications/__pycache__/imagenet_utils.cpython-310.pyc,,
|
| 288 |
+
keras/src/applications/__pycache__/inception_resnet_v2.cpython-310.pyc,,
|
| 289 |
+
keras/src/applications/__pycache__/inception_v3.cpython-310.pyc,,
|
| 290 |
+
keras/src/applications/__pycache__/mobilenet.cpython-310.pyc,,
|
| 291 |
+
keras/src/applications/__pycache__/mobilenet_v2.cpython-310.pyc,,
|
| 292 |
+
keras/src/applications/__pycache__/mobilenet_v3.cpython-310.pyc,,
|
| 293 |
+
keras/src/applications/__pycache__/nasnet.cpython-310.pyc,,
|
| 294 |
+
keras/src/applications/__pycache__/resnet.cpython-310.pyc,,
|
| 295 |
+
keras/src/applications/__pycache__/resnet_v2.cpython-310.pyc,,
|
| 296 |
+
keras/src/applications/__pycache__/vgg16.cpython-310.pyc,,
|
| 297 |
+
keras/src/applications/__pycache__/vgg19.cpython-310.pyc,,
|
| 298 |
+
keras/src/applications/__pycache__/xception.cpython-310.pyc,,
|
| 299 |
+
keras/src/applications/convnext.py,sha256=Eqq8_J-7rHl36ihhVOb3S-rg3tOuS7KJg7F7mF7GgqA,25015
|
| 300 |
+
keras/src/applications/densenet.py,sha256=wE6Kz0KQJaRrJMVO3NSzek5QANqxCVOXrR9Lko6jrYM,17094
|
| 301 |
+
keras/src/applications/efficientnet.py,sha256=4ncUeMVCI4Opqi6ioZJOg6bw62JCcXCHSR4OvSUC3dw,25342
|
| 302 |
+
keras/src/applications/efficientnet_v2.py,sha256=zVhG7ovNXNpqmqOEDqZiFATdXHIlb24SI52Qtz6TAAg,40735
|
| 303 |
+
keras/src/applications/imagenet_utils.py,sha256=4zh4jPOYQPyTbs3vOHrAixqVWeqhbTjM-vkaCDatwVg,16034
|
| 304 |
+
keras/src/applications/inception_resnet_v2.py,sha256=zrwLxezhUigqj2x6ELkHkeKs_KmN0wscs_mlF8EwsVw,14570
|
| 305 |
+
keras/src/applications/inception_v3.py,sha256=Qcr_KFFvyTFsib4NKxUu2HcC61mG2aQeBkdVXT6pz3Q,15581
|
| 306 |
+
keras/src/applications/mobilenet.py,sha256=KQoFt1AL4JLkOsIBwdnSr9tcz1woZdNG9k3eVSX2Ths,17269
|
| 307 |
+
keras/src/applications/mobilenet_v2.py,sha256=Ftmh5-PM9BjNUujAdjxa2Z0LQU9loUksztEOwlkAvM0,18035
|
| 308 |
+
keras/src/applications/mobilenet_v3.py,sha256=iVwPqK66wfsBac-KwOW_p5LO1hS7w7mCIL1PyLj1MKg,23651
|
| 309 |
+
keras/src/applications/nasnet.py,sha256=W_yZZ84O7X2nSTbPAfV4MoyiJKV6jWiu7xGrF8d9ysE,30917
|
| 310 |
+
keras/src/applications/resnet.py,sha256=9QixLDppBqWlDlhzPGut_F_BjJ2rZeHbVnKDAMEVvdg,19521
|
| 311 |
+
keras/src/applications/resnet_v2.py,sha256=Lkcm5C052RAGJ814Ff_LFbFJ9EMvOGBmmIRcWFSvVs0,6755
|
| 312 |
+
keras/src/applications/vgg16.py,sha256=hQwypxWhnRTjACW29m0eR560MrwPtATXOa7d8q9GQtc,9173
|
| 313 |
+
keras/src/applications/vgg19.py,sha256=MmcoMicENz4_5rrtIBX-7NuzqEAYBsQxePF_P5zPCuI,9494
|
| 314 |
+
keras/src/applications/xception.py,sha256=tsIVYzsc2LJ_NSMXE7xclM44beibDSXGNrR6URucoL4,12786
|
| 315 |
+
keras/src/backend/__init__.py,sha256=b9xUJiQjfk-0_HzuCHpUn26u-_F_TDFHf31RduG2KAc,3088
|
| 316 |
+
keras/src/backend/__pycache__/__init__.cpython-310.pyc,,
|
| 317 |
+
keras/src/backend/__pycache__/config.cpython-310.pyc,,
|
| 318 |
+
keras/src/backend/common/__init__.py,sha256=q_z_xvW-5LnR7n8cVKPCPWVefEFpHTqTRKnteLYTovk,595
|
| 319 |
+
keras/src/backend/common/__pycache__/__init__.cpython-310.pyc,,
|
| 320 |
+
keras/src/backend/common/__pycache__/backend_utils.cpython-310.pyc,,
|
| 321 |
+
keras/src/backend/common/__pycache__/dtypes.cpython-310.pyc,,
|
| 322 |
+
keras/src/backend/common/__pycache__/global_state.cpython-310.pyc,,
|
| 323 |
+
keras/src/backend/common/__pycache__/keras_tensor.cpython-310.pyc,,
|
| 324 |
+
keras/src/backend/common/__pycache__/masking.cpython-310.pyc,,
|
| 325 |
+
keras/src/backend/common/__pycache__/name_scope.cpython-310.pyc,,
|
| 326 |
+
keras/src/backend/common/__pycache__/stateless_scope.cpython-310.pyc,,
|
| 327 |
+
keras/src/backend/common/__pycache__/symbolic_scope.cpython-310.pyc,,
|
| 328 |
+
keras/src/backend/common/__pycache__/tensor_attributes.cpython-310.pyc,,
|
| 329 |
+
keras/src/backend/common/__pycache__/variables.cpython-310.pyc,,
|
| 330 |
+
keras/src/backend/common/backend_utils.py,sha256=I_UdvvRl2E4VJvyPd8G8QFnkJpc5VZraN7IqmvN44H0,17509
|
| 331 |
+
keras/src/backend/common/dtypes.py,sha256=SP7UwC0_4Nz00ye4XtnTfD3mbq98OsU-vriOQDfuqUA,10227
|
| 332 |
+
keras/src/backend/common/global_state.py,sha256=0xWtrdgw_VOgtzH3Xl9D0qJJYYeP1AaqE9u2GHXwcu0,3412
|
| 333 |
+
keras/src/backend/common/keras_tensor.py,sha256=pc40I6xqHS_gmwkbsFh2809kkfFbvCf3RLjqBXAZD4s,10537
|
| 334 |
+
keras/src/backend/common/masking.py,sha256=JiC1uvxF_4psCMlaiawfAA_7UQEhF123xxFAnRyNg98,727
|
| 335 |
+
keras/src/backend/common/name_scope.py,sha256=p0kBTcaAhueiQEeOI-5--YJUrVsdInpwyEjTjS43dTQ,2545
|
| 336 |
+
keras/src/backend/common/stateless_scope.py,sha256=sRZvWOwMM6BWqhaB9v4mqIRwKXdWh2LTBAMFtBUHjes,3667
|
| 337 |
+
keras/src/backend/common/symbolic_scope.py,sha256=RfrfOAv2cbiZai-L6tHwir2WUpJhS6gGj0R2YjxDMVk,683
|
| 338 |
+
keras/src/backend/common/tensor_attributes.py,sha256=X5sYeGDu9YmVBIn8oX31IeE-v-bxjq2ovmIjLrVOa8g,1161
|
| 339 |
+
keras/src/backend/common/variables.py,sha256=FUZ6Ru38JsLsiuaoaIU6jDCN0pbmruYDkP_UUc8pI40,22743
|
| 340 |
+
keras/src/backend/config.py,sha256=katPyLBXsgGmUU8KSCEonKL88j-Mrw7EKgkHBBqBUgs,9526
|
| 341 |
+
keras/src/backend/jax/__init__.py,sha256=VvHxm5iubKY-DnNEl7CUmPvWjbGLXufTCxyk8UJC2lk,1380
|
| 342 |
+
keras/src/backend/jax/__pycache__/__init__.cpython-310.pyc,,
|
| 343 |
+
keras/src/backend/jax/__pycache__/core.cpython-310.pyc,,
|
| 344 |
+
keras/src/backend/jax/__pycache__/distribution_lib.cpython-310.pyc,,
|
| 345 |
+
keras/src/backend/jax/__pycache__/export.cpython-310.pyc,,
|
| 346 |
+
keras/src/backend/jax/__pycache__/image.cpython-310.pyc,,
|
| 347 |
+
keras/src/backend/jax/__pycache__/layer.cpython-310.pyc,,
|
| 348 |
+
keras/src/backend/jax/__pycache__/linalg.cpython-310.pyc,,
|
| 349 |
+
keras/src/backend/jax/__pycache__/math.cpython-310.pyc,,
|
| 350 |
+
keras/src/backend/jax/__pycache__/nn.cpython-310.pyc,,
|
| 351 |
+
keras/src/backend/jax/__pycache__/numpy.cpython-310.pyc,,
|
| 352 |
+
keras/src/backend/jax/__pycache__/optimizer.cpython-310.pyc,,
|
| 353 |
+
keras/src/backend/jax/__pycache__/random.cpython-310.pyc,,
|
| 354 |
+
keras/src/backend/jax/__pycache__/rnn.cpython-310.pyc,,
|
| 355 |
+
keras/src/backend/jax/__pycache__/sparse.cpython-310.pyc,,
|
| 356 |
+
keras/src/backend/jax/__pycache__/tensorboard.cpython-310.pyc,,
|
| 357 |
+
keras/src/backend/jax/__pycache__/trainer.cpython-310.pyc,,
|
| 358 |
+
keras/src/backend/jax/core.py,sha256=bwvmkcZ-1zgFyEWHCnoodGI1_i14wPeFClEgONV5EQE,13635
|
| 359 |
+
keras/src/backend/jax/distribution_lib.py,sha256=yOrp3aWzfsjwMaSlIU4Q6srNcJMUcruMpsZQl1bUMBM,9595
|
| 360 |
+
keras/src/backend/jax/export.py,sha256=BZImEaY54YYRQLw0GQa2IGN_x78YO-cNWY-gqoaxBpk,7852
|
| 361 |
+
keras/src/backend/jax/image.py,sha256=TUfpFg25_sPML8Qos52T3XvkvKSpksDRo2TEdfW7NpY,18873
|
| 362 |
+
keras/src/backend/jax/layer.py,sha256=kfxiy810I30GcAgDaxsODxHY3CY4V8yMNKi4pGBTNlg,25
|
| 363 |
+
keras/src/backend/jax/linalg.py,sha256=F2smqTVuZhDtLUpPLG1aQd89tEhDgt6hWEEUXicNok0,2188
|
| 364 |
+
keras/src/backend/jax/math.py,sha256=1IEDpdoF8e5ltu3D4wbDQuihzvJHhMXz8W9Z_E-eJqU,9391
|
| 365 |
+
keras/src/backend/jax/nn.py,sha256=_IQ4rL-2P75WsozQ9N5Ms8JcMy-Mc_fZ1LTD13YXAx8,37042
|
| 366 |
+
keras/src/backend/jax/numpy.py,sha256=N3XJBD98FlKHua_2mSxQUAV2Nh6fC_3dcQHO0o4wQqY,33472
|
| 367 |
+
keras/src/backend/jax/optimizer.py,sha256=JgYtDGitcRqDXesmGQCOye7za8NiuDnsxnDmBNM8Z0c,4142
|
| 368 |
+
keras/src/backend/jax/random.py,sha256=Uk2huGIk_dlzMrx5eDVrrr2TeCEMitn2vr4yzA0NXjs,3594
|
| 369 |
+
keras/src/backend/jax/rnn.py,sha256=bSnLID-CP2Pr-Xi-a0jT4NJbWwAh0JFcYP5YwiaEZws,7552
|
| 370 |
+
keras/src/backend/jax/sparse.py,sha256=yuxMCxssWj6dn0IC1FMfWZoZ8OkMDIc_uULZ_HR3lPo,13804
|
| 371 |
+
keras/src/backend/jax/tensorboard.py,sha256=48fhQ7hpP8vlL6WJ1-_YQ89VY1cVAmATO9YOqKjSvck,490
|
| 372 |
+
keras/src/backend/jax/trainer.py,sha256=GvNOqNNSL8vWNr9ForbtRMqaa72qWdbS_7n0qrFcSMA,38527
|
| 373 |
+
keras/src/backend/numpy/__init__.py,sha256=MmFlbB7yNLSJmb3KVAtczLhP48PgQ7cldXcXArY2oeQ,1240
|
| 374 |
+
keras/src/backend/numpy/__pycache__/__init__.cpython-310.pyc,,
|
| 375 |
+
keras/src/backend/numpy/__pycache__/core.cpython-310.pyc,,
|
| 376 |
+
keras/src/backend/numpy/__pycache__/export.cpython-310.pyc,,
|
| 377 |
+
keras/src/backend/numpy/__pycache__/image.cpython-310.pyc,,
|
| 378 |
+
keras/src/backend/numpy/__pycache__/layer.cpython-310.pyc,,
|
| 379 |
+
keras/src/backend/numpy/__pycache__/linalg.cpython-310.pyc,,
|
| 380 |
+
keras/src/backend/numpy/__pycache__/math.cpython-310.pyc,,
|
| 381 |
+
keras/src/backend/numpy/__pycache__/nn.cpython-310.pyc,,
|
| 382 |
+
keras/src/backend/numpy/__pycache__/numpy.cpython-310.pyc,,
|
| 383 |
+
keras/src/backend/numpy/__pycache__/random.cpython-310.pyc,,
|
| 384 |
+
keras/src/backend/numpy/__pycache__/rnn.cpython-310.pyc,,
|
| 385 |
+
keras/src/backend/numpy/__pycache__/trainer.cpython-310.pyc,,
|
| 386 |
+
keras/src/backend/numpy/core.py,sha256=u9XXXpnNwMZt6MFzjbHXTKJRDll8acwVKprbER6EH_M,13136
|
| 387 |
+
keras/src/backend/numpy/export.py,sha256=mXJ8egC2Rl_I-ggYOTe-NbPeeWiv55od39aWZimUheo,351
|
| 388 |
+
keras/src/backend/numpy/image.py,sha256=9aSFGNqqxsv5rmK-YywzKtliWtrKoBL1t03v6m1wWYA,17250
|
| 389 |
+
keras/src/backend/numpy/layer.py,sha256=dTk7W7ql7vRgll7JbOXK5PlIhQw5VHdpSjKciHd8vec,27
|
| 390 |
+
keras/src/backend/numpy/linalg.py,sha256=oCeHcCnqm7jJvT2Pt75vlSApFAQi0X85jo5h8hsVP6s,2102
|
| 391 |
+
keras/src/backend/numpy/math.py,sha256=gZ5ozBT5E5SwwY-le1oz5-Rh5emChDdrM9CQ_zqoIaQ,10170
|
| 392 |
+
keras/src/backend/numpy/nn.py,sha256=oupZ8T9WiWUT5YBeQnmrmPX_EUzW3HQkh0zYOahrjAg,36048
|
| 393 |
+
keras/src/backend/numpy/numpy.py,sha256=5FLDT07269ghQtH467DCxgW2G0gLqgWJyC41cmNKv3o,30760
|
| 394 |
+
keras/src/backend/numpy/random.py,sha256=wx2nE75q7L2cBMjtQlQx8yKMj4Ie3puFMDQsbrZO8SA,3961
|
| 395 |
+
keras/src/backend/numpy/rnn.py,sha256=_3QChpBwSdvjSNsSi2zD2ljXsM5vAFBnXuwxwBbA4b4,7652
|
| 396 |
+
keras/src/backend/numpy/trainer.py,sha256=SBvvtYQxmCOKmuUjlKHMNv-DOFMijf9Jf5MakRgeweQ,11139
|
| 397 |
+
keras/src/backend/openvino/__init__.py,sha256=xzkB1NlX2Gy__RUrD45bM1qXJr7pcF1wxFN-SjcwLHA,1255
|
| 398 |
+
keras/src/backend/openvino/__pycache__/__init__.cpython-310.pyc,,
|
| 399 |
+
keras/src/backend/openvino/__pycache__/core.cpython-310.pyc,,
|
| 400 |
+
keras/src/backend/openvino/__pycache__/export.cpython-310.pyc,,
|
| 401 |
+
keras/src/backend/openvino/__pycache__/image.cpython-310.pyc,,
|
| 402 |
+
keras/src/backend/openvino/__pycache__/layer.cpython-310.pyc,,
|
| 403 |
+
keras/src/backend/openvino/__pycache__/linalg.cpython-310.pyc,,
|
| 404 |
+
keras/src/backend/openvino/__pycache__/math.cpython-310.pyc,,
|
| 405 |
+
keras/src/backend/openvino/__pycache__/nn.cpython-310.pyc,,
|
| 406 |
+
keras/src/backend/openvino/__pycache__/numpy.cpython-310.pyc,,
|
| 407 |
+
keras/src/backend/openvino/__pycache__/random.cpython-310.pyc,,
|
| 408 |
+
keras/src/backend/openvino/__pycache__/rnn.cpython-310.pyc,,
|
| 409 |
+
keras/src/backend/openvino/__pycache__/trainer.cpython-310.pyc,,
|
| 410 |
+
keras/src/backend/openvino/core.py,sha256=8-0-DylNrE207E025BO5gw8qzzQC1prN5Hn2_INKZB8,19474
|
| 411 |
+
keras/src/backend/openvino/export.py,sha256=eDDZmCTXIyii3YXEPMEDXYVUI_z07BlHJaD0NovEoXE,360
|
| 412 |
+
keras/src/backend/openvino/image.py,sha256=mJzfbUE-WW6xLFZUO5APR8Un4jkSxjChA-YEeV_WC3M,938
|
| 413 |
+
keras/src/backend/openvino/layer.py,sha256=5RdvaH1yOyPAphjKiuQAK1H_yZFYKE1Hp7c5bZ1pkRk,30
|
| 414 |
+
keras/src/backend/openvino/linalg.py,sha256=7PtMY_-R94bcBE2xgCGZoXqi_6q8AX9805L3yOfXOs4,1326
|
| 415 |
+
keras/src/backend/openvino/math.py,sha256=T0zSJ3lvnS41tPhw_sPyenM1xn0jTSF4n3sp5gYV9BI,3492
|
| 416 |
+
keras/src/backend/openvino/nn.py,sha256=R1g-OMaQvnwYu9b1w_iNyG10JWOBEsac9ph1I8sk1do,14838
|
| 417 |
+
keras/src/backend/openvino/numpy.py,sha256=6qJLgTlRedWyJN53kc9b_zG10VHXAAXtT3-eXSmXQJ4,31837
|
| 418 |
+
keras/src/backend/openvino/random.py,sha256=MviLk8kg0h-DcgyqkjSLjVuQBaT55iQWJR6lr-Acxvo,3651
|
| 419 |
+
keras/src/backend/openvino/rnn.py,sha256=ErmuZLPSgG9qU-NfYPPvBZ6Ysy8k-fA4g19Vhqq7OVQ,866
|
| 420 |
+
keras/src/backend/openvino/trainer.py,sha256=wx2bdW71RMkOzJa9mNk5aLjjqq70qALpnfYE8jTRWbE,9069
|
| 421 |
+
keras/src/backend/tensorflow/__init__.py,sha256=DDqA8UAqSH9tEtx0mz5ib2PqrqVm3neQ1IPiSmjTDKg,1583
|
| 422 |
+
keras/src/backend/tensorflow/__pycache__/__init__.cpython-310.pyc,,
|
| 423 |
+
keras/src/backend/tensorflow/__pycache__/core.cpython-310.pyc,,
|
| 424 |
+
keras/src/backend/tensorflow/__pycache__/distribution_lib.cpython-310.pyc,,
|
| 425 |
+
keras/src/backend/tensorflow/__pycache__/export.cpython-310.pyc,,
|
| 426 |
+
keras/src/backend/tensorflow/__pycache__/image.cpython-310.pyc,,
|
| 427 |
+
keras/src/backend/tensorflow/__pycache__/layer.cpython-310.pyc,,
|
| 428 |
+
keras/src/backend/tensorflow/__pycache__/linalg.cpython-310.pyc,,
|
| 429 |
+
keras/src/backend/tensorflow/__pycache__/math.cpython-310.pyc,,
|
| 430 |
+
keras/src/backend/tensorflow/__pycache__/nn.cpython-310.pyc,,
|
| 431 |
+
keras/src/backend/tensorflow/__pycache__/numpy.cpython-310.pyc,,
|
| 432 |
+
keras/src/backend/tensorflow/__pycache__/optimizer.cpython-310.pyc,,
|
| 433 |
+
keras/src/backend/tensorflow/__pycache__/random.cpython-310.pyc,,
|
| 434 |
+
keras/src/backend/tensorflow/__pycache__/rnn.cpython-310.pyc,,
|
| 435 |
+
keras/src/backend/tensorflow/__pycache__/sparse.cpython-310.pyc,,
|
| 436 |
+
keras/src/backend/tensorflow/__pycache__/tensorboard.cpython-310.pyc,,
|
| 437 |
+
keras/src/backend/tensorflow/__pycache__/trackable.cpython-310.pyc,,
|
| 438 |
+
keras/src/backend/tensorflow/__pycache__/trainer.cpython-310.pyc,,
|
| 439 |
+
keras/src/backend/tensorflow/core.py,sha256=_tPIg4o-JC3hQ_vMDzVv5_HUwvg7ZPfG7HynhXXWtfY,21828
|
| 440 |
+
keras/src/backend/tensorflow/distribution_lib.py,sha256=blbl6frgrsdhxZTIXO88rq9drNtaqo_gE5rk7k8Qdzc,2747
|
| 441 |
+
keras/src/backend/tensorflow/export.py,sha256=pyynwLWfGdDBaHFq8Nq4wZ1ihn1Mmp8zAikXhS2ADaI,1326
|
| 442 |
+
keras/src/backend/tensorflow/image.py,sha256=dCCTezxx0_0bJwTM1Kw1lOBUKwqaRbEfTkU4azSGWyM,16997
|
| 443 |
+
keras/src/backend/tensorflow/layer.py,sha256=iE6XYSZENEoTpNhoXrEOm7gnIOHwOjETZd_p9J_16f0,4334
|
| 444 |
+
keras/src/backend/tensorflow/linalg.py,sha256=SOskCo6JJSjEP9xjbCSCh-CCu6_DRbl9QCobXXZ0P-Y,7624
|
| 445 |
+
keras/src/backend/tensorflow/math.py,sha256=eZSrriwIW9AC3R5ZNCMuLOV1uiyxGFwFdjA5T7f6iI0,12368
|
| 446 |
+
keras/src/backend/tensorflow/nn.py,sha256=lVBidWmieuqtFwnQ-vPXGwvorcODDTnxprnSeJR6IvM,34138
|
| 447 |
+
keras/src/backend/tensorflow/numpy.py,sha256=hBxEt21bfOBbIhJEgSA9Pn0JYsXBuVBQ1fWzy6QrPU4,84300
|
| 448 |
+
keras/src/backend/tensorflow/optimizer.py,sha256=kFlyEOnGjEYdLpd8mpwhUeku78__xBfZbbrDWpJrq60,9307
|
| 449 |
+
keras/src/backend/tensorflow/random.py,sha256=iO8V_soaDXZm9ewyAVbjudhsMj08C348c9Bz64nxXC4,6475
|
| 450 |
+
keras/src/backend/tensorflow/rnn.py,sha256=SwKOW9j4CYcSYmrlm1vYK34xU0TcVgBcz52fRUT50aM,34600
|
| 451 |
+
keras/src/backend/tensorflow/sparse.py,sha256=8oriKe2vp1GqbemKo0F4bkVIbb0tIcDTsgq3er1K4mo,32268
|
| 452 |
+
keras/src/backend/tensorflow/tensorboard.py,sha256=e7pXicuMfQjuCmq1wOmixWhWt2EbjLMBo_JPAqCbZRk,504
|
| 453 |
+
keras/src/backend/tensorflow/trackable.py,sha256=QZn0JvpBJ7Kx4e6zM2IVIWz9ADcWDB-dHN6vjoQBa9Q,1993
|
| 454 |
+
keras/src/backend/tensorflow/trainer.py,sha256=Y8VUyzvUoARe_T223Ux839PDkdCVtYBDjnx4jSKdEiM,36087
|
| 455 |
+
keras/src/backend/torch/__init__.py,sha256=NFqFuuDvd9Vq2nQR3oVe48ULQjJbGjFdp4b9jNX5W_g,2066
|
| 456 |
+
keras/src/backend/torch/__pycache__/__init__.cpython-310.pyc,,
|
| 457 |
+
keras/src/backend/torch/__pycache__/core.cpython-310.pyc,,
|
| 458 |
+
keras/src/backend/torch/__pycache__/export.cpython-310.pyc,,
|
| 459 |
+
keras/src/backend/torch/__pycache__/image.cpython-310.pyc,,
|
| 460 |
+
keras/src/backend/torch/__pycache__/layer.cpython-310.pyc,,
|
| 461 |
+
keras/src/backend/torch/__pycache__/linalg.cpython-310.pyc,,
|
| 462 |
+
keras/src/backend/torch/__pycache__/math.cpython-310.pyc,,
|
| 463 |
+
keras/src/backend/torch/__pycache__/nn.cpython-310.pyc,,
|
| 464 |
+
keras/src/backend/torch/__pycache__/numpy.cpython-310.pyc,,
|
| 465 |
+
keras/src/backend/torch/__pycache__/random.cpython-310.pyc,,
|
| 466 |
+
keras/src/backend/torch/__pycache__/rnn.cpython-310.pyc,,
|
| 467 |
+
keras/src/backend/torch/__pycache__/trainer.cpython-310.pyc,,
|
| 468 |
+
keras/src/backend/torch/core.py,sha256=dhTAEXD9IUnxef-aaZHp0wmGPTY9baB_WGqFJiLJ_fI,23764
|
| 469 |
+
keras/src/backend/torch/export.py,sha256=XPlZxynbCPY5iSA8StzT560Mra_CmLy7gHoART7V5VU,4855
|
| 470 |
+
keras/src/backend/torch/image.py,sha256=Rn24Z7mRbHWT-57dwsfCuxpwVrmzTrdCEmOEZxb8jWo,17862
|
| 471 |
+
keras/src/backend/torch/layer.py,sha256=vwPiyCMmF0Z_IlauaNkJEuyCY4jG7eirSWi-r2UEUPQ,2205
|
| 472 |
+
keras/src/backend/torch/linalg.py,sha256=5jmtd1oOfTlnf1_qVHVUG_I0QatYoesANETQ6FNZF7s,1875
|
| 473 |
+
keras/src/backend/torch/math.py,sha256=gXYOCjLPF6W1H3fzLEi3RPLfiJgvUApkDCh7Q43UACg,14316
|
| 474 |
+
keras/src/backend/torch/nn.py,sha256=wHpPvcA6uj95g79XBSFCmq2JwFD3UjjRseBo0ZsoLhM,32327
|
| 475 |
+
keras/src/backend/torch/numpy.py,sha256=w7J5ffuvqirekg-gMooM5UHUjOI8Q2uA3SW71s50bXQ,50797
|
| 476 |
+
keras/src/backend/torch/optimizers/__init__.py,sha256=yvqiyKgMEh-nGpacssdpsMySujyYB6lPy-Wil3onXvo,78
|
| 477 |
+
keras/src/backend/torch/optimizers/__pycache__/__init__.cpython-310.pyc,,
|
| 478 |
+
keras/src/backend/torch/optimizers/__pycache__/torch_adadelta.cpython-310.pyc,,
|
| 479 |
+
keras/src/backend/torch/optimizers/__pycache__/torch_adagrad.cpython-310.pyc,,
|
| 480 |
+
keras/src/backend/torch/optimizers/__pycache__/torch_adam.cpython-310.pyc,,
|
| 481 |
+
keras/src/backend/torch/optimizers/__pycache__/torch_adamax.cpython-310.pyc,,
|
| 482 |
+
keras/src/backend/torch/optimizers/__pycache__/torch_adamw.cpython-310.pyc,,
|
| 483 |
+
keras/src/backend/torch/optimizers/__pycache__/torch_lion.cpython-310.pyc,,
|
| 484 |
+
keras/src/backend/torch/optimizers/__pycache__/torch_nadam.cpython-310.pyc,,
|
| 485 |
+
keras/src/backend/torch/optimizers/__pycache__/torch_optimizer.cpython-310.pyc,,
|
| 486 |
+
keras/src/backend/torch/optimizers/__pycache__/torch_parallel_optimizer.cpython-310.pyc,,
|
| 487 |
+
keras/src/backend/torch/optimizers/__pycache__/torch_rmsprop.cpython-310.pyc,,
|
| 488 |
+
keras/src/backend/torch/optimizers/__pycache__/torch_sgd.cpython-310.pyc,,
|
| 489 |
+
keras/src/backend/torch/optimizers/torch_adadelta.py,sha256=iPjGHvD7q_VD0WaMNxuNcvz8uIWd0smRyEMzMqryUD4,1672
|
| 490 |
+
keras/src/backend/torch/optimizers/torch_adagrad.py,sha256=Mg0jEGVur0fXFGm9LjPxi55qMQFoaVPfOFtnkliZeXA,1041
|
| 491 |
+
keras/src/backend/torch/optimizers/torch_adam.py,sha256=qwbiK7OZS2OhxRXd-EaS5xJDxShQnVFNAL8OqHLF60E,1889
|
| 492 |
+
keras/src/backend/torch/optimizers/torch_adamax.py,sha256=8nkMw4dYj7agkigmFBpePb6nSNhJKrRVVtIjqLA0J1M,1483
|
| 493 |
+
keras/src/backend/torch/optimizers/torch_adamw.py,sha256=JcAtOdadgNPLH5cAlHkw_OSJ_wkGCyK5pQE3MQNk_Ps,150
|
| 494 |
+
keras/src/backend/torch/optimizers/torch_lion.py,sha256=JMik6y-n4FWgv6Ug5y8rGyl_eCHMQ7OXAFBNE9p5GC8,1041
|
| 495 |
+
keras/src/backend/torch/optimizers/torch_nadam.py,sha256=L7jC1fxvZOcAN7VxA1bi0WYpe_JVyfP5l1bfNKmj62k,2421
|
| 496 |
+
keras/src/backend/torch/optimizers/torch_optimizer.py,sha256=yiCcsZcbRY3HEtiXADDUJxqS74iRmrMwnEFtX5GFh9Q,1803
|
| 497 |
+
keras/src/backend/torch/optimizers/torch_parallel_optimizer.py,sha256=MXlJzuE7GKF_a6A0qspRorM2bQCSBAE2BOfKw9a5mnw,783
|
| 498 |
+
keras/src/backend/torch/optimizers/torch_rmsprop.py,sha256=BkxPLHL_8Qq-rt-CYLp4MO0L8hMjAKfrcKSgfgPA-_E,2053
|
| 499 |
+
keras/src/backend/torch/optimizers/torch_sgd.py,sha256=7BUKY8HtoWG_gdaTk_8SDUM9hR4Tbcld68qSLcFItiQ,1175
|
| 500 |
+
keras/src/backend/torch/random.py,sha256=YhLfC7qkGpzlU_i6gGPVormo3BMSo7OUA3TC3GCehrA,8292
|
| 501 |
+
keras/src/backend/torch/rnn.py,sha256=faunVsKvNOUehdYywLoMMAHXDVDYYLinXRnjA7u5Id0,13704
|
| 502 |
+
keras/src/backend/torch/trainer.py,sha256=-QMfgTrxGUX_AH2misXCc9nLgW1z-Nwksz005zS7rEk,17556
|
| 503 |
+
keras/src/callbacks/__init__.py,sha256=1W0PW4onBURqIZOth1ZU0KWXv-ZJQVcSdjh6fNdpz2A,922
|
| 504 |
+
keras/src/callbacks/__pycache__/__init__.cpython-310.pyc,,
|
| 505 |
+
keras/src/callbacks/__pycache__/backup_and_restore.cpython-310.pyc,,
|
| 506 |
+
keras/src/callbacks/__pycache__/callback.cpython-310.pyc,,
|
| 507 |
+
keras/src/callbacks/__pycache__/callback_list.cpython-310.pyc,,
|
| 508 |
+
keras/src/callbacks/__pycache__/csv_logger.cpython-310.pyc,,
|
| 509 |
+
keras/src/callbacks/__pycache__/early_stopping.cpython-310.pyc,,
|
| 510 |
+
keras/src/callbacks/__pycache__/history.cpython-310.pyc,,
|
| 511 |
+
keras/src/callbacks/__pycache__/lambda_callback.cpython-310.pyc,,
|
| 512 |
+
keras/src/callbacks/__pycache__/learning_rate_scheduler.cpython-310.pyc,,
|
| 513 |
+
keras/src/callbacks/__pycache__/model_checkpoint.cpython-310.pyc,,
|
| 514 |
+
keras/src/callbacks/__pycache__/progbar_logger.cpython-310.pyc,,
|
| 515 |
+
keras/src/callbacks/__pycache__/reduce_lr_on_plateau.cpython-310.pyc,,
|
| 516 |
+
keras/src/callbacks/__pycache__/remote_monitor.cpython-310.pyc,,
|
| 517 |
+
keras/src/callbacks/__pycache__/swap_ema_weights.cpython-310.pyc,,
|
| 518 |
+
keras/src/callbacks/__pycache__/tensorboard.cpython-310.pyc,,
|
| 519 |
+
keras/src/callbacks/__pycache__/terminate_on_nan.cpython-310.pyc,,
|
| 520 |
+
keras/src/callbacks/backup_and_restore.py,sha256=QsF_8rJIh9s-6g91fsgVYDJBXmCtse5MPqasUf4SXCM,9361
|
| 521 |
+
keras/src/callbacks/callback.py,sha256=SzoISmhmI4OWwslTmh-ROGpNU93fCEcf4JaENGudtt8,10153
|
| 522 |
+
keras/src/callbacks/callback_list.py,sha256=352U3o-oFLiM5TfmOLdgmpWPMuvvx9fkR6foOH2nsZI,8736
|
| 523 |
+
keras/src/callbacks/csv_logger.py,sha256=SX0vUniaMSrlBOVCLCZmiDYD-LM0kGH0fynVBQCom-A,3206
|
| 524 |
+
keras/src/callbacks/early_stopping.py,sha256=tIkxCQGsfs7VEv6MTTiHe1L8dj21_At2WDgF-cp0y0c,8910
|
| 525 |
+
keras/src/callbacks/history.py,sha256=Ed2lKv0Z-JgTZpS4PKKA7vkBP1EFzbLJXmsH_tXZ3_s,1301
|
| 526 |
+
keras/src/callbacks/lambda_callback.py,sha256=UWzsVV5zqPq034SALBg-jpWNIvnmzrXqPmX_9FWbRbs,3441
|
| 527 |
+
keras/src/callbacks/learning_rate_scheduler.py,sha256=II0SLxltUX3omRbGTYffd9KTWLRKtzW57SDRe70_t7E,2965
|
| 528 |
+
keras/src/callbacks/model_checkpoint.py,sha256=QdaTYF2HYiQRPU23IsJ8ch7OmEiOOKXpmTJ9sSb1ARk,18615
|
| 529 |
+
keras/src/callbacks/progbar_logger.py,sha256=BqddKoOyc8vxxtKriq5QD3n5JhVPUxkuWF2u1UlCriQ,3104
|
| 530 |
+
keras/src/callbacks/reduce_lr_on_plateau.py,sha256=IIn633i7saAFKla7Qf1OEdBggNKnYinQ1hW_lp65ITo,5340
|
| 531 |
+
keras/src/callbacks/remote_monitor.py,sha256=VDbNzCdddCDe_ZoeVvwV50oJkwOehhT_IDDYD8LzFOg,2727
|
| 532 |
+
keras/src/callbacks/swap_ema_weights.py,sha256=JFp0E2BDTBWxVMdsGgVFuArfX3OaNKdtD9pG9wnFV6o,6843
|
| 533 |
+
keras/src/callbacks/tensorboard.py,sha256=SnlWocoHpgTOmW7yrguBAkPHsHf3-UU9jMjhgvRyAsE,26973
|
| 534 |
+
keras/src/callbacks/terminate_on_nan.py,sha256=WWrXVVa927N7-vwzegcORMFAP3rk4eVqPzL8XvfSaHw,669
|
| 535 |
+
keras/src/constraints/__init__.py,sha256=3bDz814Sz2haFYT3puoLzv1Nqm9Uf2AwQqqamgqULPk,1715
|
| 536 |
+
keras/src/constraints/__pycache__/__init__.cpython-310.pyc,,
|
| 537 |
+
keras/src/constraints/__pycache__/constraints.cpython-310.pyc,,
|
| 538 |
+
keras/src/constraints/constraints.py,sha256=bn9uGKb-GuOoEd3SGJfFqc7SDS0ziGUeggozc5Yna_0,7333
|
| 539 |
+
keras/src/datasets/__init__.py,sha256=ivEFJkqLxwU5BEYqWsWTd66kJ96YMKFKiYQGHm2CX68,383
|
| 540 |
+
keras/src/datasets/__pycache__/__init__.cpython-310.pyc,,
|
| 541 |
+
keras/src/datasets/__pycache__/boston_housing.cpython-310.pyc,,
|
| 542 |
+
keras/src/datasets/__pycache__/california_housing.cpython-310.pyc,,
|
| 543 |
+
keras/src/datasets/__pycache__/cifar.cpython-310.pyc,,
|
| 544 |
+
keras/src/datasets/__pycache__/cifar10.cpython-310.pyc,,
|
| 545 |
+
keras/src/datasets/__pycache__/cifar100.cpython-310.pyc,,
|
| 546 |
+
keras/src/datasets/__pycache__/fashion_mnist.cpython-310.pyc,,
|
| 547 |
+
keras/src/datasets/__pycache__/imdb.cpython-310.pyc,,
|
| 548 |
+
keras/src/datasets/__pycache__/mnist.cpython-310.pyc,,
|
| 549 |
+
keras/src/datasets/__pycache__/reuters.cpython-310.pyc,,
|
| 550 |
+
keras/src/datasets/boston_housing.py,sha256=tWTEhV2LHaBaNviUU72ZIa7nr_nAEuSu_bXFh4kvkG0,2644
|
| 551 |
+
keras/src/datasets/california_housing.py,sha256=d7cceyP0hnKDaHYUF_VP5GWLJznxAPEqMuMkhnugVns,3850
|
| 552 |
+
keras/src/datasets/cifar.py,sha256=nnv0GQKypj68qnK8gMEjTY4h6orkO1g70huKQqdJmAQ,704
|
| 553 |
+
keras/src/datasets/cifar10.py,sha256=wnX2QW5UnMYaH931H-YZ6fdijiQQjtjJtj_z5K6MVkA,3189
|
| 554 |
+
keras/src/datasets/cifar100.py,sha256=XbPTtVIiYVsRXWI8sQxksf7nPEB9tMv7qyGMuHTiTLs,2973
|
| 555 |
+
keras/src/datasets/fashion_mnist.py,sha256=iAQoY3e7ln15BZ7nNIEWU4rT7ORsMiltDZdFgvC-dcI,2929
|
| 556 |
+
keras/src/datasets/imdb.py,sha256=0y7AHRu7p-9FyHqo9cjmm1zkRZJrgS716xm5h_zDXDg,7201
|
| 557 |
+
keras/src/datasets/mnist.py,sha256=VjVTM4Q8iucAS2hTXsUtjT6hktGDUHBfaGu4kNUwUYc,2393
|
| 558 |
+
keras/src/datasets/reuters.py,sha256=q7lveC4NfeBcTJrM0qBYXJTlafpVoonEGyMkLY8GubU,7214
|
| 559 |
+
keras/src/distribution/__init__.py,sha256=pseLHx387oTmXROr95tU7kNWjPL8-JB4kZs8nUHsOiU,718
|
| 560 |
+
keras/src/distribution/__pycache__/__init__.cpython-310.pyc,,
|
| 561 |
+
keras/src/distribution/__pycache__/distribution_lib.cpython-310.pyc,,
|
| 562 |
+
keras/src/distribution/distribution_lib.py,sha256=_b5ZJejY2LsRuGoEpaRagYhtEgd2bkbhOGTnbZSE69g,31552
|
| 563 |
+
keras/src/dtype_policies/__init__.py,sha256=qYQQC3MvU0BujZcP0IN7_0awcu926rtSRukjcV2TU5w,3545
|
| 564 |
+
keras/src/dtype_policies/__pycache__/__init__.cpython-310.pyc,,
|
| 565 |
+
keras/src/dtype_policies/__pycache__/dtype_policy.cpython-310.pyc,,
|
| 566 |
+
keras/src/dtype_policies/__pycache__/dtype_policy_map.cpython-310.pyc,,
|
| 567 |
+
keras/src/dtype_policies/dtype_policy.py,sha256=RNjKHjdTZeHJpf51crSr2TwLz_fi59YN8p_7k3UabVw,12745
|
| 568 |
+
keras/src/dtype_policies/dtype_policy_map.py,sha256=23Rm2NZlZ4DK8TESGKzQAbr1gwc4jJsyCVc1KBXUt-A,7902
|
| 569 |
+
keras/src/export/__init__.py,sha256=Mhd9QeM1sMbm316M8Gr9bEBInVzhWZZIEy3lFlD66eQ,211
|
| 570 |
+
keras/src/export/__pycache__/__init__.cpython-310.pyc,,
|
| 571 |
+
keras/src/export/__pycache__/export_utils.cpython-310.pyc,,
|
| 572 |
+
keras/src/export/__pycache__/onnx.cpython-310.pyc,,
|
| 573 |
+
keras/src/export/__pycache__/saved_model.cpython-310.pyc,,
|
| 574 |
+
keras/src/export/__pycache__/tf2onnx_lib.cpython-310.pyc,,
|
| 575 |
+
keras/src/export/__pycache__/tfsm_layer.cpython-310.pyc,,
|
| 576 |
+
keras/src/export/export_utils.py,sha256=wxiObhc7R07UqsKHWwK_KlDlN3iBFOaY1zUI3WKz2uw,4041
|
| 577 |
+
keras/src/export/onnx.py,sha256=d8NuR1xipci7wVCubPQac18BRO59Gj4rky5PKsXAGhI,5843
|
| 578 |
+
keras/src/export/saved_model.py,sha256=YpAfbC6RNs8DdxMz9mudAEY3KI40vLDDRP0d5yw5k-o,27334
|
| 579 |
+
keras/src/export/tf2onnx_lib.py,sha256=u3AP1458GHvFHIFNnqyu_yEgTKlgUYhmbYBc9YKQKKE,7210
|
| 580 |
+
keras/src/export/tfsm_layer.py,sha256=5psADcAXkJN_AR1A8nmIxh8kjaTAuAo8u-bGnEkK98c,5701
|
| 581 |
+
keras/src/initializers/__init__.py,sha256=tG7qxC2J0PDhO_L2W95sJXNIduL7F5lqHvUuJ7EIhXE,5662
|
| 582 |
+
keras/src/initializers/__pycache__/__init__.cpython-310.pyc,,
|
| 583 |
+
keras/src/initializers/__pycache__/constant_initializers.cpython-310.pyc,,
|
| 584 |
+
keras/src/initializers/__pycache__/initializer.cpython-310.pyc,,
|
| 585 |
+
keras/src/initializers/__pycache__/random_initializers.cpython-310.pyc,,
|
| 586 |
+
keras/src/initializers/constant_initializers.py,sha256=celz5tGkp2opqyuORykexWkMIQJe0AenJ9dVcGbf-ZY,9960
|
| 587 |
+
keras/src/initializers/initializer.py,sha256=kNAyRA8CzBdtknT6ZUt5XIO2_Z9NzpN119CId7wT1Vg,2632
|
| 588 |
+
keras/src/initializers/random_initializers.py,sha256=AuUeQ3YZGakDKTCs8njQLhozE6iWYHwP6-VstnEMOaQ,23631
|
| 589 |
+
keras/src/layers/__init__.py,sha256=8yLpcxh_y_AzXGAmsKinZdL5uGoEXOcq_SPFd3-AOPk,10716
|
| 590 |
+
keras/src/layers/__pycache__/__init__.cpython-310.pyc,,
|
| 591 |
+
keras/src/layers/__pycache__/input_spec.cpython-310.pyc,,
|
| 592 |
+
keras/src/layers/__pycache__/layer.cpython-310.pyc,,
|
| 593 |
+
keras/src/layers/activations/__init__.py,sha256=MhPBye8WWLSf_iDel3BuuqYk4nx6Sym8s4dZKb1KTqQ,272
|
| 594 |
+
keras/src/layers/activations/__pycache__/__init__.cpython-310.pyc,,
|
| 595 |
+
keras/src/layers/activations/__pycache__/activation.cpython-310.pyc,,
|
| 596 |
+
keras/src/layers/activations/__pycache__/elu.cpython-310.pyc,,
|
| 597 |
+
keras/src/layers/activations/__pycache__/leaky_relu.cpython-310.pyc,,
|
| 598 |
+
keras/src/layers/activations/__pycache__/prelu.cpython-310.pyc,,
|
| 599 |
+
keras/src/layers/activations/__pycache__/relu.cpython-310.pyc,,
|
| 600 |
+
keras/src/layers/activations/__pycache__/softmax.cpython-310.pyc,,
|
| 601 |
+
keras/src/layers/activations/activation.py,sha256=JOnb1NjMUcaccpxVBslnQkvCBbAKgbwOrJIpkELdNMo,1282
|
| 602 |
+
keras/src/layers/activations/elu.py,sha256=rhRvrQzgWO2d4D4UlwD05g-PwkorYKHefh__cbY4uto,835
|
| 603 |
+
keras/src/layers/activations/leaky_relu.py,sha256=M2l1H1-iYU_Rkhkc9WTnzuwgQxmOXTDD59bvXYZ651k,1926
|
| 604 |
+
keras/src/layers/activations/prelu.py,sha256=39aa5muLXt_eO_aLyE_z0_3aNed94wCKHmtDc7IGzdo,3454
|
| 605 |
+
keras/src/layers/activations/relu.py,sha256=7pWkLOzTbMdbBAboCJPHXMm1GlFJ4sW0AH7RMdmxwWs,2684
|
| 606 |
+
keras/src/layers/activations/softmax.py,sha256=XMbpJdvtRGnOf7QKHGrIH2cnrQMs6kGB2un98Ddm37I,2264
|
| 607 |
+
keras/src/layers/attention/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 608 |
+
keras/src/layers/attention/__pycache__/__init__.cpython-310.pyc,,
|
| 609 |
+
keras/src/layers/attention/__pycache__/additive_attention.cpython-310.pyc,,
|
| 610 |
+
keras/src/layers/attention/__pycache__/attention.cpython-310.pyc,,
|
| 611 |
+
keras/src/layers/attention/__pycache__/grouped_query_attention.cpython-310.pyc,,
|
| 612 |
+
keras/src/layers/attention/__pycache__/multi_head_attention.cpython-310.pyc,,
|
| 613 |
+
keras/src/layers/attention/additive_attention.py,sha256=Es5Ca_IV06G67xy3v5T1Kt2sjM70L_P5Fmiy9FfTE28,4335
|
| 614 |
+
keras/src/layers/attention/attention.py,sha256=-P7wDcua_AdHLjWyc8rW9dstYTAjjW9K1XBm9km7y9s,13494
|
| 615 |
+
keras/src/layers/attention/grouped_query_attention.py,sha256=6c-z2qWlkhX52Kjz6KShHfL6No_dMGaqgD76wTKUfRg,21042
|
| 616 |
+
keras/src/layers/attention/multi_head_attention.py,sha256=ph4R_K9Fxcir1iZJFx3AgKuokR_1eQkBF45I_BFIWh0,32006
|
| 617 |
+
keras/src/layers/convolutional/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 618 |
+
keras/src/layers/convolutional/__pycache__/__init__.cpython-310.pyc,,
|
| 619 |
+
keras/src/layers/convolutional/__pycache__/base_conv.cpython-310.pyc,,
|
| 620 |
+
keras/src/layers/convolutional/__pycache__/base_conv_transpose.cpython-310.pyc,,
|
| 621 |
+
keras/src/layers/convolutional/__pycache__/base_depthwise_conv.cpython-310.pyc,,
|
| 622 |
+
keras/src/layers/convolutional/__pycache__/base_separable_conv.cpython-310.pyc,,
|
| 623 |
+
keras/src/layers/convolutional/__pycache__/conv1d.cpython-310.pyc,,
|
| 624 |
+
keras/src/layers/convolutional/__pycache__/conv1d_transpose.cpython-310.pyc,,
|
| 625 |
+
keras/src/layers/convolutional/__pycache__/conv2d.cpython-310.pyc,,
|
| 626 |
+
keras/src/layers/convolutional/__pycache__/conv2d_transpose.cpython-310.pyc,,
|
| 627 |
+
keras/src/layers/convolutional/__pycache__/conv3d.cpython-310.pyc,,
|
| 628 |
+
keras/src/layers/convolutional/__pycache__/conv3d_transpose.cpython-310.pyc,,
|
| 629 |
+
keras/src/layers/convolutional/__pycache__/depthwise_conv1d.cpython-310.pyc,,
|
| 630 |
+
keras/src/layers/convolutional/__pycache__/depthwise_conv2d.cpython-310.pyc,,
|
| 631 |
+
keras/src/layers/convolutional/__pycache__/separable_conv1d.cpython-310.pyc,,
|
| 632 |
+
keras/src/layers/convolutional/__pycache__/separable_conv2d.cpython-310.pyc,,
|
| 633 |
+
keras/src/layers/convolutional/base_conv.py,sha256=OJXpjrTfqkYMANwsLYMQ7WKeaePWEaCuI6Bkwbrb9U0,17280
|
| 634 |
+
keras/src/layers/convolutional/base_conv_transpose.py,sha256=Z2CF1hiOu6k1bbThliIeag9vQQeLo89PIrDRaAOVxe8,10712
|
| 635 |
+
keras/src/layers/convolutional/base_depthwise_conv.py,sha256=0LLxZif9xy7jf5UgZpN9y2n1riesdjrQbhw5-X4LgDo,11634
|
| 636 |
+
keras/src/layers/convolutional/base_separable_conv.py,sha256=qoD03-NxthlkpHhd6DVMx57wvPt7tcx8lJFnR9Bg-tA,12660
|
| 637 |
+
keras/src/layers/convolutional/conv1d.py,sha256=2RV1hjQi7A4oj-issZ6_kRoWEA-J9WXqON7N_mbhifA,7321
|
| 638 |
+
keras/src/layers/convolutional/conv1d_transpose.py,sha256=Mg4g5cd-RNf4QTCHMaUUQoZrJDLrzEukcO9UwK2CBN0,5575
|
| 639 |
+
keras/src/layers/convolutional/conv2d.py,sha256=c1VaoYr8YSZj5YCTa-1zKOXDQANryBrJptqWi5sbZOE,5689
|
| 640 |
+
keras/src/layers/convolutional/conv2d_transpose.py,sha256=mlGI66wHkEUIdhExtGgShFN2xPrGm79xkrYoLwQ58k8,5695
|
| 641 |
+
keras/src/layers/convolutional/conv3d.py,sha256=ZVHcutPZBEeGB9fV88B7yEZD21VqlnBIovFP3lvviX8,5918
|
| 642 |
+
keras/src/layers/convolutional/conv3d_transpose.py,sha256=TB3oGatWi9PQHPsLNuD_NChS3UvuWhiYDimbwSntcD4,5901
|
| 643 |
+
keras/src/layers/convolutional/depthwise_conv1d.py,sha256=ekylnBEKTDUgPB3OkoqZx3M7xgrHabzCA-ww_wEqVFY,6003
|
| 644 |
+
keras/src/layers/convolutional/depthwise_conv2d.py,sha256=rnCd_S3UVeNdVotjKW1WloTEZIGY2diNhKuQmmpnjxM,6100
|
| 645 |
+
keras/src/layers/convolutional/separable_conv1d.py,sha256=vL5qzdaSOOTgyn1A6y9IZZbQOEeB6FedPk9JJI5wqSY,6452
|
| 646 |
+
keras/src/layers/convolutional/separable_conv2d.py,sha256=ZkLOnA6l5UV3GuJufwlOHMOm1S-xkt6sdF-qmP4PDjw,6533
|
| 647 |
+
keras/src/layers/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 648 |
+
keras/src/layers/core/__pycache__/__init__.cpython-310.pyc,,
|
| 649 |
+
keras/src/layers/core/__pycache__/dense.cpython-310.pyc,,
|
| 650 |
+
keras/src/layers/core/__pycache__/einsum_dense.cpython-310.pyc,,
|
| 651 |
+
keras/src/layers/core/__pycache__/embedding.cpython-310.pyc,,
|
| 652 |
+
keras/src/layers/core/__pycache__/identity.cpython-310.pyc,,
|
| 653 |
+
keras/src/layers/core/__pycache__/input_layer.cpython-310.pyc,,
|
| 654 |
+
keras/src/layers/core/__pycache__/lambda_layer.cpython-310.pyc,,
|
| 655 |
+
keras/src/layers/core/__pycache__/masking.cpython-310.pyc,,
|
| 656 |
+
keras/src/layers/core/__pycache__/wrapper.cpython-310.pyc,,
|
| 657 |
+
keras/src/layers/core/dense.py,sha256=A3VfqlPzQjChSVXzTo1o2OIUM-H1dZWtphCqA_-64Rk,23572
|
| 658 |
+
keras/src/layers/core/einsum_dense.py,sha256=etMN2e-EPWxOPCKCAXBUWtCL8b60hxIjfnf2R6XkvF4,41261
|
| 659 |
+
keras/src/layers/core/embedding.py,sha256=qAE_J9owpNPXMwHbMmj2Gvad5B19guGB4niXwU9AdEA,16183
|
| 660 |
+
keras/src/layers/core/identity.py,sha256=jI9teEM3ZMT8blcC8d_3yCBaj8CbTuMELez5H39gkbM,843
|
| 661 |
+
keras/src/layers/core/input_layer.py,sha256=sW3GdZ1pWdMZtTG5H9458qnf8AMR4ko6tF-qsRMUqW8,7369
|
| 662 |
+
keras/src/layers/core/lambda_layer.py,sha256=wCb8VFqwlO0iWwTAEs2wQIQIJW27l1xfybFfhUbNSzw,9194
|
| 663 |
+
keras/src/layers/core/masking.py,sha256=-EBbTAjeCBw-BPWwg1-imyKzAeRxfO-YYwM2AzAMzGE,2574
|
| 664 |
+
keras/src/layers/core/wrapper.py,sha256=nhgyWdLqHfxWhYDQZ1mU7Fw9lmXZRKHIknBDaywLbeU,1535
|
| 665 |
+
keras/src/layers/input_spec.py,sha256=M52SiBu_4uogdrMYW8BoyeWSElb4ahwa5X04yDkpbs0,9849
|
| 666 |
+
keras/src/layers/layer.py,sha256=iYmZiEAbtrewh0z7kFFHp7mRc2X8bdsT0NOkIi6wmRc,68738
|
| 667 |
+
keras/src/layers/merging/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 668 |
+
keras/src/layers/merging/__pycache__/__init__.cpython-310.pyc,,
|
| 669 |
+
keras/src/layers/merging/__pycache__/add.cpython-310.pyc,,
|
| 670 |
+
keras/src/layers/merging/__pycache__/average.cpython-310.pyc,,
|
| 671 |
+
keras/src/layers/merging/__pycache__/base_merge.cpython-310.pyc,,
|
| 672 |
+
keras/src/layers/merging/__pycache__/concatenate.cpython-310.pyc,,
|
| 673 |
+
keras/src/layers/merging/__pycache__/dot.cpython-310.pyc,,
|
| 674 |
+
keras/src/layers/merging/__pycache__/maximum.cpython-310.pyc,,
|
| 675 |
+
keras/src/layers/merging/__pycache__/minimum.cpython-310.pyc,,
|
| 676 |
+
keras/src/layers/merging/__pycache__/multiply.cpython-310.pyc,,
|
| 677 |
+
keras/src/layers/merging/__pycache__/subtract.cpython-310.pyc,,
|
| 678 |
+
keras/src/layers/merging/add.py,sha256=icbh3RwZ3QUP3bFNCi7GbrHj2hFdKu1Dsv8djSa13co,2150
|
| 679 |
+
keras/src/layers/merging/average.py,sha256=RPW8Lpj0U3ebMdvhyI451Iw_Qn7p6tKAEgdgDds19Co,2214
|
| 680 |
+
keras/src/layers/merging/base_merge.py,sha256=NpkijhQvcCpU_Wq3OOK6bhZZv2HqHAx6WX-Alm3WHgc,10800
|
| 681 |
+
keras/src/layers/merging/concatenate.py,sha256=WZGtrV863hawY3JkTbElPRPkAZA8G3oK6XVcrUEEq5A,6798
|
| 682 |
+
keras/src/layers/merging/dot.py,sha256=XR3KiuhdEF6tatDndYWvfngwJj2MWXHb4NprLZWQWJ0,12807
|
| 683 |
+
keras/src/layers/merging/maximum.py,sha256=5lF8X0raVikM8YimdXJlZlbwT6-BGFD3O61sDsPidcw,2142
|
| 684 |
+
keras/src/layers/merging/minimum.py,sha256=f8RN1O5yYzDqJbXuVTBKC0TKdEw_VU4bC4pZX2zE35A,2140
|
| 685 |
+
keras/src/layers/merging/multiply.py,sha256=WvBX5gOpouqfQYnpioKMw2Tj6HRQQ2LNBuvKsRo_6P0,3185
|
| 686 |
+
keras/src/layers/merging/subtract.py,sha256=ijpJDomo1JSMCw97Rn55LXiVLsI50lcvUxmZiv_HIzo,2684
|
| 687 |
+
keras/src/layers/normalization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 688 |
+
keras/src/layers/normalization/__pycache__/__init__.cpython-310.pyc,,
|
| 689 |
+
keras/src/layers/normalization/__pycache__/batch_normalization.cpython-310.pyc,,
|
| 690 |
+
keras/src/layers/normalization/__pycache__/group_normalization.cpython-310.pyc,,
|
| 691 |
+
keras/src/layers/normalization/__pycache__/layer_normalization.cpython-310.pyc,,
|
| 692 |
+
keras/src/layers/normalization/__pycache__/spectral_normalization.cpython-310.pyc,,
|
| 693 |
+
keras/src/layers/normalization/__pycache__/unit_normalization.cpython-310.pyc,,
|
| 694 |
+
keras/src/layers/normalization/batch_normalization.py,sha256=PzT-Ucj5p-qLGrgN0RxbqL_ICVViaJR15PsFVNVEi60,14161
|
| 695 |
+
keras/src/layers/normalization/group_normalization.py,sha256=S8w40kMCi_aEN079vwDPxaV7K02Ny0HocZJ1ATX4SpA,9367
|
| 696 |
+
keras/src/layers/normalization/layer_normalization.py,sha256=68Al0piqjcxBWWb-9q_AiorPuguXRMapE06CM5mwp8w,10265
|
| 697 |
+
keras/src/layers/normalization/spectral_normalization.py,sha256=HTzypVIzBID26wsB80OSkyvBR0IO48XGRrMSF-u6rdE,4304
|
| 698 |
+
keras/src/layers/normalization/unit_normalization.py,sha256=7YJphfXGpXGrZcaUj6RKYDpgd0EqPUs2tgK3DbybCAI,2059
|
| 699 |
+
keras/src/layers/pooling/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 700 |
+
keras/src/layers/pooling/__pycache__/__init__.cpython-310.pyc,,
|
| 701 |
+
keras/src/layers/pooling/__pycache__/average_pooling1d.cpython-310.pyc,,
|
| 702 |
+
keras/src/layers/pooling/__pycache__/average_pooling2d.cpython-310.pyc,,
|
| 703 |
+
keras/src/layers/pooling/__pycache__/average_pooling3d.cpython-310.pyc,,
|
| 704 |
+
keras/src/layers/pooling/__pycache__/base_global_pooling.cpython-310.pyc,,
|
| 705 |
+
keras/src/layers/pooling/__pycache__/base_pooling.cpython-310.pyc,,
|
| 706 |
+
keras/src/layers/pooling/__pycache__/global_average_pooling1d.cpython-310.pyc,,
|
| 707 |
+
keras/src/layers/pooling/__pycache__/global_average_pooling2d.cpython-310.pyc,,
|
| 708 |
+
keras/src/layers/pooling/__pycache__/global_average_pooling3d.cpython-310.pyc,,
|
| 709 |
+
keras/src/layers/pooling/__pycache__/global_max_pooling1d.cpython-310.pyc,,
|
| 710 |
+
keras/src/layers/pooling/__pycache__/global_max_pooling2d.cpython-310.pyc,,
|
| 711 |
+
keras/src/layers/pooling/__pycache__/global_max_pooling3d.cpython-310.pyc,,
|
| 712 |
+
keras/src/layers/pooling/__pycache__/max_pooling1d.cpython-310.pyc,,
|
| 713 |
+
keras/src/layers/pooling/__pycache__/max_pooling2d.cpython-310.pyc,,
|
| 714 |
+
keras/src/layers/pooling/__pycache__/max_pooling3d.cpython-310.pyc,,
|
| 715 |
+
keras/src/layers/pooling/average_pooling1d.py,sha256=bFtZmRxvmQCUMA5vLE2SOHc8wQO0acKa7WJu8lTNgfU,3347
|
| 716 |
+
keras/src/layers/pooling/average_pooling2d.py,sha256=SAihWGF5q0YNsxHVX8lYF3RrGt3RWuZesgMIVUqfRDI,4153
|
| 717 |
+
keras/src/layers/pooling/average_pooling3d.py,sha256=ukbXgxotazAuDec_RsuKWj8khmmmCEnZdPQKg8J6SNA,3238
|
| 718 |
+
keras/src/layers/pooling/base_global_pooling.py,sha256=_d1a2c2twJxzLJ0ULAXf5444Prr8SDsFomoTRxs4vwI,1486
|
| 719 |
+
keras/src/layers/pooling/base_pooling.py,sha256=KNyul-L6f3UnIueC_04OQAf-c1JvpL_S1BpwxEZNV4E,2451
|
| 720 |
+
keras/src/layers/pooling/global_average_pooling1d.py,sha256=h9zAVA0Dpxwk_-tn15v1NS-E0YZ_d4YGBS-IqOPxF94,3131
|
| 721 |
+
keras/src/layers/pooling/global_average_pooling2d.py,sha256=hVzDSoG7VLExX1N0YZ_kTAvONRSr5UVsjqpvvCpFZmI,2469
|
| 722 |
+
keras/src/layers/pooling/global_average_pooling3d.py,sha256=jyL1rQmuoUcynfqhEAxyB1Y83WcTasAZ9pZHoWB8ER8,2603
|
| 723 |
+
keras/src/layers/pooling/global_max_pooling1d.py,sha256=1RpUDPbnvHCltb0DZY38FHqg9_ruWgLT4G-FZUsy4H4,2357
|
| 724 |
+
keras/src/layers/pooling/global_max_pooling2d.py,sha256=9d5ELOYLxeWyxp-PxSBo8AKIOoh0Vcv8FAGs0Xd87k0,2451
|
| 725 |
+
keras/src/layers/pooling/global_max_pooling3d.py,sha256=NfsKoJHgKiEnCd8yMia6VyjRJXQIH1d-WnfIZIYqDRE,2585
|
| 726 |
+
keras/src/layers/pooling/max_pooling1d.py,sha256=tcUlxUaxW-TWSO_XLcc1_ObDHCMNUADDZ993pwYmDAc,3346
|
| 727 |
+
keras/src/layers/pooling/max_pooling2d.py,sha256=c8-EZmzYZRLgwE8TiWb3HRMiJiI_fplOELjrFUH5x2c,4128
|
| 728 |
+
keras/src/layers/pooling/max_pooling3d.py,sha256=xVsJd6KPyu1m9jCVuwT3MZwpwT27TSx0k9cI_PhB2_8,3228
|
| 729 |
+
keras/src/layers/preprocessing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 730 |
+
keras/src/layers/preprocessing/__pycache__/__init__.cpython-310.pyc,,
|
| 731 |
+
keras/src/layers/preprocessing/__pycache__/category_encoding.cpython-310.pyc,,
|
| 732 |
+
keras/src/layers/preprocessing/__pycache__/discretization.cpython-310.pyc,,
|
| 733 |
+
keras/src/layers/preprocessing/__pycache__/feature_space.cpython-310.pyc,,
|
| 734 |
+
keras/src/layers/preprocessing/__pycache__/hashed_crossing.cpython-310.pyc,,
|
| 735 |
+
keras/src/layers/preprocessing/__pycache__/hashing.cpython-310.pyc,,
|
| 736 |
+
keras/src/layers/preprocessing/__pycache__/index_lookup.cpython-310.pyc,,
|
| 737 |
+
keras/src/layers/preprocessing/__pycache__/integer_lookup.cpython-310.pyc,,
|
| 738 |
+
keras/src/layers/preprocessing/__pycache__/mel_spectrogram.cpython-310.pyc,,
|
| 739 |
+
keras/src/layers/preprocessing/__pycache__/normalization.cpython-310.pyc,,
|
| 740 |
+
keras/src/layers/preprocessing/__pycache__/pipeline.cpython-310.pyc,,
|
| 741 |
+
keras/src/layers/preprocessing/__pycache__/rescaling.cpython-310.pyc,,
|
| 742 |
+
keras/src/layers/preprocessing/__pycache__/stft_spectrogram.cpython-310.pyc,,
|
| 743 |
+
keras/src/layers/preprocessing/__pycache__/string_lookup.cpython-310.pyc,,
|
| 744 |
+
keras/src/layers/preprocessing/__pycache__/text_vectorization.cpython-310.pyc,,
|
| 745 |
+
keras/src/layers/preprocessing/__pycache__/tf_data_layer.cpython-310.pyc,,
|
| 746 |
+
keras/src/layers/preprocessing/category_encoding.py,sha256=_8VZN-AoH07m0wOCmDB_Bf2xuQT7bZjUtVhI-MULU6o,6922
|
| 747 |
+
keras/src/layers/preprocessing/discretization.py,sha256=2KvXZ2NSTaUP3IBMDydCANK7RNa3EwxvW9S5s4kIPsM,13080
|
| 748 |
+
keras/src/layers/preprocessing/feature_space.py,sha256=-uA-gxQpCccwXuoC6LLcQIShoa_2DymnUspl89zRPTg,30196
|
| 749 |
+
keras/src/layers/preprocessing/hashed_crossing.py,sha256=4ajEp1MHtLc0UKTbpO6f4wFGAZZIMjdPMCYm6qFZJA4,8488
|
| 750 |
+
keras/src/layers/preprocessing/hashing.py,sha256=CtVKFmvr11tRTslGZ2q8PsHVrfK94BoVzlq_Z1keQyw,11189
|
| 751 |
+
keras/src/layers/preprocessing/image_preprocessing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 752 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/__init__.cpython-310.pyc,,
|
| 753 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/auto_contrast.cpython-310.pyc,,
|
| 754 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/base_image_preprocessing_layer.cpython-310.pyc,,
|
| 755 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/center_crop.cpython-310.pyc,,
|
| 756 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/equalization.cpython-310.pyc,,
|
| 757 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/max_num_bounding_box.cpython-310.pyc,,
|
| 758 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/mix_up.cpython-310.pyc,,
|
| 759 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/rand_augment.cpython-310.pyc,,
|
| 760 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_brightness.cpython-310.pyc,,
|
| 761 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_color_degeneration.cpython-310.pyc,,
|
| 762 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_color_jitter.cpython-310.pyc,,
|
| 763 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_contrast.cpython-310.pyc,,
|
| 764 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_crop.cpython-310.pyc,,
|
| 765 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_flip.cpython-310.pyc,,
|
| 766 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_grayscale.cpython-310.pyc,,
|
| 767 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_hue.cpython-310.pyc,,
|
| 768 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_posterization.cpython-310.pyc,,
|
| 769 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_rotation.cpython-310.pyc,,
|
| 770 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_saturation.cpython-310.pyc,,
|
| 771 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_sharpness.cpython-310.pyc,,
|
| 772 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_shear.cpython-310.pyc,,
|
| 773 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_translation.cpython-310.pyc,,
|
| 774 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/random_zoom.cpython-310.pyc,,
|
| 775 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/resizing.cpython-310.pyc,,
|
| 776 |
+
keras/src/layers/preprocessing/image_preprocessing/__pycache__/solarization.cpython-310.pyc,,
|
| 777 |
+
keras/src/layers/preprocessing/image_preprocessing/auto_contrast.py,sha256=GF23qTCPknYYTJzYeC136vRosZls432YbJe8S_YdpPg,3799
|
| 778 |
+
keras/src/layers/preprocessing/image_preprocessing/base_image_preprocessing_layer.py,sha256=vK9tLyfuxXYVfoH8tTMmN_tH5joToe1ctqfv7jZLpB8,13860
|
| 779 |
+
keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 780 |
+
keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/__init__.cpython-310.pyc,,
|
| 781 |
+
keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/bounding_box.cpython-310.pyc,,
|
| 782 |
+
keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/converters.cpython-310.pyc,,
|
| 783 |
+
keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/formats.cpython-310.pyc,,
|
| 784 |
+
keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/iou.cpython-310.pyc,,
|
| 785 |
+
keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__pycache__/validation.cpython-310.pyc,,
|
| 786 |
+
keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/bounding_box.py,sha256=aI9u1OvCcnEPBUqMFHS-49xwFLZnhuKgFgB1b3NCAUQ,16270
|
| 787 |
+
keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/converters.py,sha256=opXlmX5SRiWEU2_M1PqkiBVi8LRNfIIDMPMfMY_2Yp0,15999
|
| 788 |
+
keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/formats.py,sha256=b4v7nskUauUvk7Ub4rgImPUysJrDl4m5oBTGD1MEnTI,3377
|
| 789 |
+
keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/iou.py,sha256=dJPC9FL3ViQSItURsiFKu6IZ5oelUQ91KFguWdBu8qA,10144
|
| 790 |
+
keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/validation.py,sha256=aNeC8VBG2xY7qvNAcbTpb824SDyOf88iGMNIohwsjQk,7189
|
| 791 |
+
keras/src/layers/preprocessing/image_preprocessing/center_crop.py,sha256=67il9tcOnt69j55lA4hYDrJJboppFCgnQptBL2dvF38,10022
|
| 792 |
+
keras/src/layers/preprocessing/image_preprocessing/equalization.py,sha256=V5flpM63vc--L7lMMKfEDXQZ8hu-no0qXNudkUTeCms,8648
|
| 793 |
+
keras/src/layers/preprocessing/image_preprocessing/max_num_bounding_box.py,sha256=VJ3eq2GwSeFUWJgQLqe2lHqrVn1qJyoJLUycOv4Xqjo,3304
|
| 794 |
+
keras/src/layers/preprocessing/image_preprocessing/mix_up.py,sha256=wtT7wvlKaVRWu7XbpToESNVNI0KDNmuUCdM8RRTnSms,6520
|
| 795 |
+
keras/src/layers/preprocessing/image_preprocessing/rand_augment.py,sha256=zQRxI_F_9GvKoOQ6LRFyLZ2nuzeRDIEelbNjaHyQi2I,7625
|
| 796 |
+
keras/src/layers/preprocessing/image_preprocessing/random_brightness.py,sha256=-I9ovcx_0Ok0XS9NdtY4Q0MBo-izSpChAVqy8rCWlkE,6072
|
| 797 |
+
keras/src/layers/preprocessing/image_preprocessing/random_color_degeneration.py,sha256=JAItpxwEXaZibMCjna56fj8aciCcmmz95zT3HeVFSu0,4765
|
| 798 |
+
keras/src/layers/preprocessing/image_preprocessing/random_color_jitter.py,sha256=dqgQiCVXLYKIlS4mf3krrdYiZMzDrLqsz60vex0bi6I,9333
|
| 799 |
+
keras/src/layers/preprocessing/image_preprocessing/random_contrast.py,sha256=cMVHg9LC0NGHDk1PYL7UMDyZpIbTixX4Rmm32Wr3_7g,5463
|
| 800 |
+
keras/src/layers/preprocessing/image_preprocessing/random_crop.py,sha256=3DPcHVyYj3g8SZ8hV6eTJlVt41Nv6pMGVjLf_Wca5kc,10542
|
| 801 |
+
keras/src/layers/preprocessing/image_preprocessing/random_flip.py,sha256=fQurnkSGbehqFhfyVHQCxpfidU-hnq3mx9nhBEK21Eg,8046
|
| 802 |
+
keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py,sha256=STScLoNMZtwr_54jj8rcQcmVU_SLUBUXSQna85hVvAU,4260
|
| 803 |
+
keras/src/layers/preprocessing/image_preprocessing/random_hue.py,sha256=XdlmKw81K9z6YUFmEKgu9kYQtFtdT1zJelXX_TxtN_c,6335
|
| 804 |
+
keras/src/layers/preprocessing/image_preprocessing/random_posterization.py,sha256=w_MxVjDPqVjQgFiPmbRWzGDZg_BvZEYV5-KPA6Dg9Ik,5036
|
| 805 |
+
keras/src/layers/preprocessing/image_preprocessing/random_rotation.py,sha256=uhZiuLac4VHbrUO9Uyk0bt4yfulw-LZ8Z177z-6yfXM,9624
|
| 806 |
+
keras/src/layers/preprocessing/image_preprocessing/random_saturation.py,sha256=3CqJ5-kvWaENMrsiTL8TbImL3ToOiGRD3C5gfoz3J24,5924
|
| 807 |
+
keras/src/layers/preprocessing/image_preprocessing/random_sharpness.py,sha256=qczPlJ_wl5636T1oqrlp_dE9jSn4xpfXNROOhmF3zKI,6021
|
| 808 |
+
keras/src/layers/preprocessing/image_preprocessing/random_shear.py,sha256=fS33rneM_r4UAqfdtGXi9Tki6ooUh9Il_qXqIwxkRXI,14889
|
| 809 |
+
keras/src/layers/preprocessing/image_preprocessing/random_translation.py,sha256=4hOk8oynwzTNPNIHGlGjOey7yaQQ8xn1Ac8R9Wbd3nI,14921
|
| 810 |
+
keras/src/layers/preprocessing/image_preprocessing/random_zoom.py,sha256=g62ZOZkf_Vv3g9eC92X4Vk8NBnh0pQ1HP6Qu6Xrl1RY,16462
|
| 811 |
+
keras/src/layers/preprocessing/image_preprocessing/resizing.py,sha256=VjVOiLolCfZ-i2OTaLuiPndPGKskMrqTM6FxtWfUn2Q,11812
|
| 812 |
+
keras/src/layers/preprocessing/image_preprocessing/solarization.py,sha256=xfOlqPf_CA2t4dP1rFw1tcUP1mhDyliJTipCajYR5u0,7884
|
| 813 |
+
keras/src/layers/preprocessing/index_lookup.py,sha256=DCf_TKmJx8wftMfjJ_ETpKz6Tq3RsDUXR7gbwIhcvT8,41996
|
| 814 |
+
keras/src/layers/preprocessing/integer_lookup.py,sha256=4rlZ03HLx3g-t7r9u0K9gymKYo1-iDw8NYRjkQmL23o,18458
|
| 815 |
+
keras/src/layers/preprocessing/mel_spectrogram.py,sha256=siDkgfjItBQlq0ZxDwuyVFWUEWfxK-_4OV-ePVDvINU,14572
|
| 816 |
+
keras/src/layers/preprocessing/normalization.py,sha256=qtJAzfr6JH2fsigGGydbV_tuY-JVlffqB45cPOgxNgc,14973
|
| 817 |
+
keras/src/layers/preprocessing/pipeline.py,sha256=D6dd1LQTW9m9jUaeorTn29rY19gRmkSXXaUxj02kUxc,2533
|
| 818 |
+
keras/src/layers/preprocessing/rescaling.py,sha256=OkjATRt1n3ncO2FL26zM2kj8NC3bu3fJGORT4nAyG8I,2798
|
| 819 |
+
keras/src/layers/preprocessing/stft_spectrogram.py,sha256=r02Qko8raSF1vQrlL_SNlXW7Rjt8UiZOF-Y68-WkmGU,15059
|
| 820 |
+
keras/src/layers/preprocessing/string_lookup.py,sha256=a5r6C7Y39M58JCkMd2851HmQYjagKdAltve2NExsawU,17745
|
| 821 |
+
keras/src/layers/preprocessing/text_vectorization.py,sha256=kqCXKpRBkqrrX5SZPBrwwruw7ByOxew-xQ6OZoREuRA,27816
|
| 822 |
+
keras/src/layers/preprocessing/tf_data_layer.py,sha256=ps0Az4BbFcxdwdZ2dYzOPFQQ8tYTOzKyiNSpu5dwAFU,2628
|
| 823 |
+
keras/src/layers/regularization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 824 |
+
keras/src/layers/regularization/__pycache__/__init__.cpython-310.pyc,,
|
| 825 |
+
keras/src/layers/regularization/__pycache__/activity_regularization.cpython-310.pyc,,
|
| 826 |
+
keras/src/layers/regularization/__pycache__/alpha_dropout.cpython-310.pyc,,
|
| 827 |
+
keras/src/layers/regularization/__pycache__/dropout.cpython-310.pyc,,
|
| 828 |
+
keras/src/layers/regularization/__pycache__/gaussian_dropout.cpython-310.pyc,,
|
| 829 |
+
keras/src/layers/regularization/__pycache__/gaussian_noise.cpython-310.pyc,,
|
| 830 |
+
keras/src/layers/regularization/__pycache__/spatial_dropout.cpython-310.pyc,,
|
| 831 |
+
keras/src/layers/regularization/activity_regularization.py,sha256=m7E0xA2dqT0m-qLj1LBNAitszaaqtLlCuScc587-BpA,1278
|
| 832 |
+
keras/src/layers/regularization/alpha_dropout.py,sha256=KSJOFE249x0XUrdXmotAPjwEHrFvw7o6Q6X6D6Eg2OQ,3620
|
| 833 |
+
keras/src/layers/regularization/dropout.py,sha256=j4ludUTtz804NxscjvavNb2aTQbdNjIMsFcIYDtJzWY,3004
|
| 834 |
+
keras/src/layers/regularization/gaussian_dropout.py,sha256=_iTmmmSK0qCKXdtHYRIK6zSE3G9DRfGH67zT3EAx9D4,2067
|
| 835 |
+
keras/src/layers/regularization/gaussian_noise.py,sha256=KQ0Z8MWzVb5iuM8eTtTxOUF5TcYX2rAcqy4S55s6klY,2115
|
| 836 |
+
keras/src/layers/regularization/spatial_dropout.py,sha256=8SORBywkWwdM-id_xnFquDCrRKhiLqNrMtXlyll-AR0,7300
|
| 837 |
+
keras/src/layers/reshaping/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 838 |
+
keras/src/layers/reshaping/__pycache__/__init__.cpython-310.pyc,,
|
| 839 |
+
keras/src/layers/reshaping/__pycache__/cropping1d.cpython-310.pyc,,
|
| 840 |
+
keras/src/layers/reshaping/__pycache__/cropping2d.cpython-310.pyc,,
|
| 841 |
+
keras/src/layers/reshaping/__pycache__/cropping3d.cpython-310.pyc,,
|
| 842 |
+
keras/src/layers/reshaping/__pycache__/flatten.cpython-310.pyc,,
|
| 843 |
+
keras/src/layers/reshaping/__pycache__/permute.cpython-310.pyc,,
|
| 844 |
+
keras/src/layers/reshaping/__pycache__/repeat_vector.cpython-310.pyc,,
|
| 845 |
+
keras/src/layers/reshaping/__pycache__/reshape.cpython-310.pyc,,
|
| 846 |
+
keras/src/layers/reshaping/__pycache__/up_sampling1d.cpython-310.pyc,,
|
| 847 |
+
keras/src/layers/reshaping/__pycache__/up_sampling2d.cpython-310.pyc,,
|
| 848 |
+
keras/src/layers/reshaping/__pycache__/up_sampling3d.cpython-310.pyc,,
|
| 849 |
+
keras/src/layers/reshaping/__pycache__/zero_padding1d.cpython-310.pyc,,
|
| 850 |
+
keras/src/layers/reshaping/__pycache__/zero_padding2d.cpython-310.pyc,,
|
| 851 |
+
keras/src/layers/reshaping/__pycache__/zero_padding3d.cpython-310.pyc,,
|
| 852 |
+
keras/src/layers/reshaping/cropping1d.py,sha256=jrSIsn5Zvwe8R73YyC1fhF3mDZTOC5ymhvkGKH2M75g,2760
|
| 853 |
+
keras/src/layers/reshaping/cropping2d.py,sha256=N7r1-tuAkhC9QWH0Tt005iZnHimWT6cQBMbbWR5-tUQ,9044
|
| 854 |
+
keras/src/layers/reshaping/cropping3d.py,sha256=Hm176o-duFkIXiAYjvjRAY6mWypY_vSEmGpQU1Eh8yU,11265
|
| 855 |
+
keras/src/layers/reshaping/flatten.py,sha256=La8OFnWq0UisPjTsMMGNyFuzxJlnpqGCYX9kLgLg92Q,3059
|
| 856 |
+
keras/src/layers/reshaping/permute.py,sha256=F3BxIPmPBnQGSmK2CxW4udFRRAuGKuZaomt-C2luUTs,2090
|
| 857 |
+
keras/src/layers/reshaping/repeat_vector.py,sha256=Gv8DRO145ooHBriDLvzitmKQJtx-ek0o7EPStPx_Pac,1335
|
| 858 |
+
keras/src/layers/reshaping/reshape.py,sha256=aAgYnt-rs_rqu2SppXZW6KkyBkCX2w1amBG9PhGDavY,2322
|
| 859 |
+
keras/src/layers/reshaping/up_sampling1d.py,sha256=xJUqfpYUyc9x461UV_TMPDaCcy1_whKAknIHLkCcbhI,1591
|
| 860 |
+
keras/src/layers/reshaping/up_sampling2d.py,sha256=exYZP8lo_lLVLsIgdlbyRVv_h8N9NHOXQ6SkY6nOSVQ,6035
|
| 861 |
+
keras/src/layers/reshaping/up_sampling3d.py,sha256=nlK1wE5UCuTUsCGJKYkZixOGvxVE20f-H26hTnCyUU4,4910
|
| 862 |
+
keras/src/layers/reshaping/zero_padding1d.py,sha256=t_WxXso0weqfouc-3Ij06YPi3r-9WYDLly_JPfIcHBM,3362
|
| 863 |
+
keras/src/layers/reshaping/zero_padding2d.py,sha256=tDz2m1cfQaxvak2XbOWw7YDkOzUmM5SsaejDOBSMvt4,4646
|
| 864 |
+
keras/src/layers/reshaping/zero_padding3d.py,sha256=XaorgfwHCjgaVtdiQWW6wrwHpoz-c2nkjWW5Ww6nTfE,5060
|
| 865 |
+
keras/src/layers/rnn/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 866 |
+
keras/src/layers/rnn/__pycache__/__init__.cpython-310.pyc,,
|
| 867 |
+
keras/src/layers/rnn/__pycache__/bidirectional.cpython-310.pyc,,
|
| 868 |
+
keras/src/layers/rnn/__pycache__/conv_lstm.cpython-310.pyc,,
|
| 869 |
+
keras/src/layers/rnn/__pycache__/conv_lstm1d.cpython-310.pyc,,
|
| 870 |
+
keras/src/layers/rnn/__pycache__/conv_lstm2d.cpython-310.pyc,,
|
| 871 |
+
keras/src/layers/rnn/__pycache__/conv_lstm3d.cpython-310.pyc,,
|
| 872 |
+
keras/src/layers/rnn/__pycache__/dropout_rnn_cell.cpython-310.pyc,,
|
| 873 |
+
keras/src/layers/rnn/__pycache__/gru.cpython-310.pyc,,
|
| 874 |
+
keras/src/layers/rnn/__pycache__/lstm.cpython-310.pyc,,
|
| 875 |
+
keras/src/layers/rnn/__pycache__/rnn.cpython-310.pyc,,
|
| 876 |
+
keras/src/layers/rnn/__pycache__/simple_rnn.cpython-310.pyc,,
|
| 877 |
+
keras/src/layers/rnn/__pycache__/stacked_rnn_cells.cpython-310.pyc,,
|
| 878 |
+
keras/src/layers/rnn/__pycache__/time_distributed.cpython-310.pyc,,
|
| 879 |
+
keras/src/layers/rnn/bidirectional.py,sha256=Jbce73SzJteMd3NNCrjwrymz_lWF03Qr1ejrAtzERrQ,13235
|
| 880 |
+
keras/src/layers/rnn/conv_lstm.py,sha256=Tc6hjC_Z2WwQzZNB0XyZ2SU-gwylNP1OhDMdHN1-lTA,27621
|
| 881 |
+
keras/src/layers/rnn/conv_lstm1d.py,sha256=7Al9iXoc5CbdywW8O4CIP_HeRQD4fTZ0Ph_3a_lx4So,8296
|
| 882 |
+
keras/src/layers/rnn/conv_lstm2d.py,sha256=N9qTryL8AgNZxOhbqt8YgFYXeb88qGn0CTgKICXlRpw,8381
|
| 883 |
+
keras/src/layers/rnn/conv_lstm3d.py,sha256=khYSWkfVqI3RGrQuthK93TqlWX13itKCjpi0I6CPKkU,8289
|
| 884 |
+
keras/src/layers/rnn/dropout_rnn_cell.py,sha256=S9TM2G9n1I9xsOSoS3ZKHhPbq_-0xh2P__sBNfYE98E,2524
|
| 885 |
+
keras/src/layers/rnn/gru.py,sha256=Isofd5zrFOvzP341MQ2ZbYXMWkY82hAqtJYQ_PsxwWU,28798
|
| 886 |
+
keras/src/layers/rnn/lstm.py,sha256=tHSDDprfhyZbczEbRIxRKKD3eS3d7QrlPtBrHgQ87jw,27686
|
| 887 |
+
keras/src/layers/rnn/rnn.py,sha256=-U1H8rFM6TMCPBKCdyI1NOUtjYO__4EMILv5C6OI1uU,18984
|
| 888 |
+
keras/src/layers/rnn/simple_rnn.py,sha256=w8veFLz2qsZbFJpZyyDrWfeRu4wjWnmRAD6-Im9rXTo,17542
|
| 889 |
+
keras/src/layers/rnn/stacked_rnn_cells.py,sha256=RQU16cJjGZcyUTh5GqEJUUxmydNNXsR06K5kycrks5Y,4943
|
| 890 |
+
keras/src/layers/rnn/time_distributed.py,sha256=BUYeXP_RslRhq_k-VZ6t65n2bQKq_pQImXFTh4d4emc,4800
|
| 891 |
+
keras/src/legacy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 892 |
+
keras/src/legacy/__pycache__/__init__.cpython-310.pyc,,
|
| 893 |
+
keras/src/legacy/__pycache__/backend.cpython-310.pyc,,
|
| 894 |
+
keras/src/legacy/__pycache__/layers.cpython-310.pyc,,
|
| 895 |
+
keras/src/legacy/__pycache__/losses.cpython-310.pyc,,
|
| 896 |
+
keras/src/legacy/backend.py,sha256=9EJkBgzhUvSXZPN9vX9i58g3AOTtGIqutYVC_SwLo_A,70277
|
| 897 |
+
keras/src/legacy/layers.py,sha256=oOaFtRtroSZpKL0z4tDWOpUbsrJhmuef6twESrSOmx8,8396
|
| 898 |
+
keras/src/legacy/losses.py,sha256=pprb6guwHwBv-5zo2qZhLkji4z-L0plE5k6CoS7tsr8,523
|
| 899 |
+
keras/src/legacy/preprocessing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 900 |
+
keras/src/legacy/preprocessing/__pycache__/__init__.cpython-310.pyc,,
|
| 901 |
+
keras/src/legacy/preprocessing/__pycache__/image.cpython-310.pyc,,
|
| 902 |
+
keras/src/legacy/preprocessing/__pycache__/sequence.cpython-310.pyc,,
|
| 903 |
+
keras/src/legacy/preprocessing/__pycache__/text.cpython-310.pyc,,
|
| 904 |
+
keras/src/legacy/preprocessing/image.py,sha256=zxY_utToHOHn4RYaX_qGB-BcLnnWr5o6nrK-nHJhuGk,65545
|
| 905 |
+
keras/src/legacy/preprocessing/sequence.py,sha256=jyot2KR3652vRxuzmLkWjRd5MivMysH_3jZ1HgGvF80,11172
|
| 906 |
+
keras/src/legacy/preprocessing/text.py,sha256=1NCgRIVZhZoWPSv0GKPGZ2r0D6SvcnHQsLpvFSnVals,11103
|
| 907 |
+
keras/src/legacy/saving/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 908 |
+
keras/src/legacy/saving/__pycache__/__init__.cpython-310.pyc,,
|
| 909 |
+
keras/src/legacy/saving/__pycache__/json_utils.cpython-310.pyc,,
|
| 910 |
+
keras/src/legacy/saving/__pycache__/legacy_h5_format.cpython-310.pyc,,
|
| 911 |
+
keras/src/legacy/saving/__pycache__/saving_options.cpython-310.pyc,,
|
| 912 |
+
keras/src/legacy/saving/__pycache__/saving_utils.cpython-310.pyc,,
|
| 913 |
+
keras/src/legacy/saving/__pycache__/serialization.cpython-310.pyc,,
|
| 914 |
+
keras/src/legacy/saving/json_utils.py,sha256=JIGZu1OJylkP71N6h3IBLoG_e9qnCQAC9H4GdDdUIOc,7296
|
| 915 |
+
keras/src/legacy/saving/legacy_h5_format.py,sha256=Vcw71ftgO9e_0e14XG5opxVwGquKUjVQW_3asiWkdOI,22605
|
| 916 |
+
keras/src/legacy/saving/saving_options.py,sha256=ZUyOHYsTf0rBLBAOlSaeqVNv9tGjWA9LsNyPk5WTXRI,485
|
| 917 |
+
keras/src/legacy/saving/saving_utils.py,sha256=Mk4wGzXa4B_9CrjRdPFQkWuQGm5ySg5aKybXnLzsj1c,9275
|
| 918 |
+
keras/src/legacy/saving/serialization.py,sha256=s4qrdywzIRnMccfXRmxbSqqfquQyohIIf7TdjRQCsBc,21808
|
| 919 |
+
keras/src/losses/__init__.py,sha256=rt63Ye0f7YdAR0eV0EOj2J61DI6xNdp2ojonx6rB3wE,6595
|
| 920 |
+
keras/src/losses/__pycache__/__init__.cpython-310.pyc,,
|
| 921 |
+
keras/src/losses/__pycache__/loss.cpython-310.pyc,,
|
| 922 |
+
keras/src/losses/__pycache__/losses.cpython-310.pyc,,
|
| 923 |
+
keras/src/losses/loss.py,sha256=BjtYoghA3jfpJ4_bG7c3NRK3rk7omzMSCuK9ZNlaYGs,8787
|
| 924 |
+
keras/src/losses/losses.py,sha256=vrN_LMRIWwOJQ-zc96YzgcDaAiKxLYytYc_lun-8EIA,93333
|
| 925 |
+
keras/src/metrics/__init__.py,sha256=CydJsY38PR2lRN4irhO_wnlvgruTEAgSHp8eUYE0lwY,7410
|
| 926 |
+
keras/src/metrics/__pycache__/__init__.cpython-310.pyc,,
|
| 927 |
+
keras/src/metrics/__pycache__/accuracy_metrics.cpython-310.pyc,,
|
| 928 |
+
keras/src/metrics/__pycache__/confusion_metrics.cpython-310.pyc,,
|
| 929 |
+
keras/src/metrics/__pycache__/correlation_metrics.cpython-310.pyc,,
|
| 930 |
+
keras/src/metrics/__pycache__/f_score_metrics.cpython-310.pyc,,
|
| 931 |
+
keras/src/metrics/__pycache__/hinge_metrics.cpython-310.pyc,,
|
| 932 |
+
keras/src/metrics/__pycache__/iou_metrics.cpython-310.pyc,,
|
| 933 |
+
keras/src/metrics/__pycache__/metric.cpython-310.pyc,,
|
| 934 |
+
keras/src/metrics/__pycache__/metrics_utils.cpython-310.pyc,,
|
| 935 |
+
keras/src/metrics/__pycache__/probabilistic_metrics.cpython-310.pyc,,
|
| 936 |
+
keras/src/metrics/__pycache__/reduction_metrics.cpython-310.pyc,,
|
| 937 |
+
keras/src/metrics/__pycache__/regression_metrics.cpython-310.pyc,,
|
| 938 |
+
keras/src/metrics/accuracy_metrics.py,sha256=cDHR0jyFjtFz_oY20JMxXSpbKGnJq2lkZJt3N3NDG7g,18283
|
| 939 |
+
keras/src/metrics/confusion_metrics.py,sha256=zVBnnk1n0rFuE7XiXp5mB24aO4pY5JIQCgMqyp6Epfw,61530
|
| 940 |
+
keras/src/metrics/correlation_metrics.py,sha256=AKLlFGiByNSM_Dd4CIsQrjKpxPX53CGl6fbsvz3DY7A,6905
|
| 941 |
+
keras/src/metrics/f_score_metrics.py,sha256=B6SBXpXikgayvre6yQJSEsbIpWlvUveSicEKdeGkaUs,11743
|
| 942 |
+
keras/src/metrics/hinge_metrics.py,sha256=hmlZY6wijxvW3RpOt4RUA1Kn3US5mR7h98o-jIZsbcs,3255
|
| 943 |
+
keras/src/metrics/iou_metrics.py,sha256=pk0Bskqdh3HdQDXMwxpCUj-2Re6a4sH1e6wQlHiTm40,27572
|
| 944 |
+
keras/src/metrics/metric.py,sha256=tBcGhhWUebMD0c78algCXwMYwuaSt3lLZOZ-DtRe_IQ,8720
|
| 945 |
+
keras/src/metrics/metrics_utils.py,sha256=NFrVJxNBBRGS62dOGONpcLI1zJS7w5X8Z7FzrMajKLQ,26616
|
| 946 |
+
keras/src/metrics/probabilistic_metrics.py,sha256=cyDuxohv3eqbVjGhTljwo507wzriuXG20OVsCXd0Fo8,10640
|
| 947 |
+
keras/src/metrics/reduction_metrics.py,sha256=-imgCBWg9Kdfx_k4Shq81h07feoHDquB_J704NgFQ1g,7345
|
| 948 |
+
keras/src/metrics/regression_metrics.py,sha256=eLacV_8CKtzA26BJDJuncUDATuL1x8O6SRHqLA9eSFc,19756
|
| 949 |
+
keras/src/models/__init__.py,sha256=DPbBPSfIGgsufTfJH5U5xJOeN_Ef4FMadT7KKYg3Kjg,143
|
| 950 |
+
keras/src/models/__pycache__/__init__.cpython-310.pyc,,
|
| 951 |
+
keras/src/models/__pycache__/cloning.cpython-310.pyc,,
|
| 952 |
+
keras/src/models/__pycache__/functional.cpython-310.pyc,,
|
| 953 |
+
keras/src/models/__pycache__/model.cpython-310.pyc,,
|
| 954 |
+
keras/src/models/__pycache__/sequential.cpython-310.pyc,,
|
| 955 |
+
keras/src/models/__pycache__/variable_mapping.cpython-310.pyc,,
|
| 956 |
+
keras/src/models/cloning.py,sha256=BFFiu9lYRkNSL7EmzP-o8hx0kBsV-gBnTtT2-7C-ZWM,15413
|
| 957 |
+
keras/src/models/functional.py,sha256=VQyiBKQOWCAOl91PIxZhS5zHYwqc5waKKCd-2At46Ps,33505
|
| 958 |
+
keras/src/models/model.py,sha256=HQIagTsARHPhuea9YBXvJjubJ58hYLRzpr94eye-l_A,32035
|
| 959 |
+
keras/src/models/sequential.py,sha256=S0APRXF1iTvostQG6DD6ofF6b-uf0f1zusXimzdNxVg,13826
|
| 960 |
+
keras/src/models/variable_mapping.py,sha256=FVtcgjBRqOxtvkzOE6kjG9SpcB9keDg2gS5LOTlXvG0,2181
|
| 961 |
+
keras/src/ops/__init__.py,sha256=aORlvnrqY_eQl0EFLWdpHsXHnQ6JLSw1qhwJMr-VXJ0,644
|
| 962 |
+
keras/src/ops/__pycache__/__init__.cpython-310.pyc,,
|
| 963 |
+
keras/src/ops/__pycache__/core.cpython-310.pyc,,
|
| 964 |
+
keras/src/ops/__pycache__/function.cpython-310.pyc,,
|
| 965 |
+
keras/src/ops/__pycache__/image.cpython-310.pyc,,
|
| 966 |
+
keras/src/ops/__pycache__/linalg.cpython-310.pyc,,
|
| 967 |
+
keras/src/ops/__pycache__/math.cpython-310.pyc,,
|
| 968 |
+
keras/src/ops/__pycache__/nn.cpython-310.pyc,,
|
| 969 |
+
keras/src/ops/__pycache__/node.cpython-310.pyc,,
|
| 970 |
+
keras/src/ops/__pycache__/numpy.cpython-310.pyc,,
|
| 971 |
+
keras/src/ops/__pycache__/operation.cpython-310.pyc,,
|
| 972 |
+
keras/src/ops/__pycache__/operation_utils.cpython-310.pyc,,
|
| 973 |
+
keras/src/ops/__pycache__/symbolic_arguments.cpython-310.pyc,,
|
| 974 |
+
keras/src/ops/core.py,sha256=7WMuT86C5eIq_wPushi-Y2XJFGZc1Qf7orVmc35DbbQ,39591
|
| 975 |
+
keras/src/ops/function.py,sha256=H2HDbfS5Y4-zkV8WpBj38xZeFkIBizHYZhbItbb7EJk,16285
|
| 976 |
+
keras/src/ops/image.py,sha256=w0bHwOIxEYl96gVg2L31ftfO1HzgJeLralUjlEQMWQA,43372
|
| 977 |
+
keras/src/ops/linalg.py,sha256=_yLcKA5xvCKsZSoldYcDPQ5MCw8d7YetMKrg1us6NtA,21251
|
| 978 |
+
keras/src/ops/math.py,sha256=QeIgeWM5KZ9R9O1sSIuafHL3qgfs2pQfHvRNod3Pdcw,34519
|
| 979 |
+
keras/src/ops/nn.py,sha256=XiozjlYU3hxynUFvikMoNwB1Vqq_4bvfr0niz2btp7U,82056
|
| 980 |
+
keras/src/ops/node.py,sha256=aJgn9D-GkteE--Bbt2cZ9JjVxb2W2uS1OWEKoeLsl3Y,5583
|
| 981 |
+
keras/src/ops/numpy.py,sha256=SfXR2RvvemVJg7oaNo3nAhnkrDkILvVnRQ3a8IVil2c,216928
|
| 982 |
+
keras/src/ops/operation.py,sha256=2YHXy1bhcTxhvJM0CUWGzY-7EpbHDeNlYLrSbmb6Bck,11903
|
| 983 |
+
keras/src/ops/operation_utils.py,sha256=McVlxvb-iD826m6Rpm_1UvnImhaLZLs3tzlCZE6S8Xo,14402
|
| 984 |
+
keras/src/ops/symbolic_arguments.py,sha256=MKwXxZYkyouD9BPmQ1uUNxILdcwPvTayAqXaUV3P3o4,1628
|
| 985 |
+
keras/src/optimizers/__init__.py,sha256=obSfcJtrRgVj1rCOxrNyeDGPS0_m16tDZzUphEy3iR4,3931
|
| 986 |
+
keras/src/optimizers/__pycache__/__init__.cpython-310.pyc,,
|
| 987 |
+
keras/src/optimizers/__pycache__/adadelta.cpython-310.pyc,,
|
| 988 |
+
keras/src/optimizers/__pycache__/adafactor.cpython-310.pyc,,
|
| 989 |
+
keras/src/optimizers/__pycache__/adagrad.cpython-310.pyc,,
|
| 990 |
+
keras/src/optimizers/__pycache__/adam.cpython-310.pyc,,
|
| 991 |
+
keras/src/optimizers/__pycache__/adamax.cpython-310.pyc,,
|
| 992 |
+
keras/src/optimizers/__pycache__/adamw.cpython-310.pyc,,
|
| 993 |
+
keras/src/optimizers/__pycache__/base_optimizer.cpython-310.pyc,,
|
| 994 |
+
keras/src/optimizers/__pycache__/ftrl.cpython-310.pyc,,
|
| 995 |
+
keras/src/optimizers/__pycache__/lamb.cpython-310.pyc,,
|
| 996 |
+
keras/src/optimizers/__pycache__/lion.cpython-310.pyc,,
|
| 997 |
+
keras/src/optimizers/__pycache__/loss_scale_optimizer.cpython-310.pyc,,
|
| 998 |
+
keras/src/optimizers/__pycache__/nadam.cpython-310.pyc,,
|
| 999 |
+
keras/src/optimizers/__pycache__/optimizer.cpython-310.pyc,,
|
| 1000 |
+
keras/src/optimizers/__pycache__/rmsprop.cpython-310.pyc,,
|
| 1001 |
+
keras/src/optimizers/__pycache__/sgd.cpython-310.pyc,,
|
| 1002 |
+
keras/src/optimizers/adadelta.py,sha256=nRWBuAJGBrofDN2fUb-vNvGz5nudZIjlBx7OBWSRXuM,4759
|
| 1003 |
+
keras/src/optimizers/adafactor.py,sha256=BAKcQ7ptahNHfzd6X_p5XMIV4TYr7FH-28DtpCUEMoU,7637
|
| 1004 |
+
keras/src/optimizers/adagrad.py,sha256=wv7cGmH4I0cB7nabSDmGrC4aqwz-j1CfXlQZKyvDLQc,3918
|
| 1005 |
+
keras/src/optimizers/adam.py,sha256=nzzVTAaalAbYcUDStCfK4BZw2FV3uPedAjRdmkIpBF0,5909
|
| 1006 |
+
keras/src/optimizers/adamax.py,sha256=d31aAVPkJ9GVPq0hTjEPd0I_gB63DsD-IS7yUI1GRmI,5082
|
| 1007 |
+
keras/src/optimizers/adamw.py,sha256=TVnjn1JQMwy_cghTbFi9WGnLco45Oq9YZ8qME8ej3r4,3785
|
| 1008 |
+
keras/src/optimizers/base_optimizer.py,sha256=pUhxkquSRnulTfyM4k7WwRamaDlQ2GggRnjBWboCSro,45232
|
| 1009 |
+
keras/src/optimizers/ftrl.py,sha256=cnfneb2m7nGiIZjGbR0cOOZbqXHBixrzyLnrcU6VchY,9099
|
| 1010 |
+
keras/src/optimizers/lamb.py,sha256=5_PWBd6uWKOVRk89h_j4tOMSowLvsq7Va2QLGTfJP_w,5276
|
| 1011 |
+
keras/src/optimizers/lion.py,sha256=15ML1_C7XGCFMgML90GqjYlXq_wRm2T9xR1WbwGus9A,4969
|
| 1012 |
+
keras/src/optimizers/loss_scale_optimizer.py,sha256=Kj-NSrfWr22uwF8VlYvn8Aao_7TOsEuerzlny6qJqco,11619
|
| 1013 |
+
keras/src/optimizers/nadam.py,sha256=tsRouI2vO5uU2Gy106YSgrSlRg9nSF9sbp7alqcVOhI,5926
|
| 1014 |
+
keras/src/optimizers/optimizer.py,sha256=cZtZwu42plSGjZBqoS6KThwJvWjEcPz9g97nZCSrwOA,870
|
| 1015 |
+
keras/src/optimizers/rmsprop.py,sha256=-uklCRqdptFxUlkK0_J6Ww7PptVhpsw7ywJj_L54jWM,6003
|
| 1016 |
+
keras/src/optimizers/schedules/__init__.py,sha256=vuUuHNTev8sD2-swsuq7zqyYbmaOhDyiIE6F3dGGSZU,546
|
| 1017 |
+
keras/src/optimizers/schedules/__pycache__/__init__.cpython-310.pyc,,
|
| 1018 |
+
keras/src/optimizers/schedules/__pycache__/learning_rate_schedule.cpython-310.pyc,,
|
| 1019 |
+
keras/src/optimizers/schedules/learning_rate_schedule.py,sha256=Oe3zk_IjeIN9TFNz1895RTN2rCk9uZY8iYbqFb9E06c,35507
|
| 1020 |
+
keras/src/optimizers/sgd.py,sha256=T-JFtmCVnLLAvN3S3qtWoKWci53AmxH2xBMKzeC11N4,4556
|
| 1021 |
+
keras/src/quantizers/__init__.py,sha256=Ssm4dFHi_pZh_erToRAiFHt4gyoftPS9CepipyhMStY,1784
|
| 1022 |
+
keras/src/quantizers/__pycache__/__init__.cpython-310.pyc,,
|
| 1023 |
+
keras/src/quantizers/__pycache__/quantizers.cpython-310.pyc,,
|
| 1024 |
+
keras/src/quantizers/quantizers.py,sha256=O-6FO6pKwOJup4quT3_WIZ5Kuwlt1X9PBDC1IfL-KJQ,5689
|
| 1025 |
+
keras/src/random/__init__.py,sha256=BmXVYPzxbhADohoLtAEEzB3cesP7YBFDsp1qc6BWWlg,420
|
| 1026 |
+
keras/src/random/__pycache__/__init__.cpython-310.pyc,,
|
| 1027 |
+
keras/src/random/__pycache__/random.cpython-310.pyc,,
|
| 1028 |
+
keras/src/random/__pycache__/seed_generator.cpython-310.pyc,,
|
| 1029 |
+
keras/src/random/random.py,sha256=bUADZIVDuCghwIWTk0qBxXTxUdiNGWIdsRi8QJ3ePg4,17581
|
| 1030 |
+
keras/src/random/seed_generator.py,sha256=XYukdqfFArfWDwzBzip8hmoC1Ta2AAK-bF1eUy2cM3I,5593
|
| 1031 |
+
keras/src/regularizers/__init__.py,sha256=GzK9FTKL2Xxd5H55GfG9gxDqt4eZoVHFWICgb2VW8qM,1731
|
| 1032 |
+
keras/src/regularizers/__pycache__/__init__.cpython-310.pyc,,
|
| 1033 |
+
keras/src/regularizers/__pycache__/regularizers.cpython-310.pyc,,
|
| 1034 |
+
keras/src/regularizers/regularizers.py,sha256=urXNmMGuqHT7lOmS-yQPl3At3Ny-37Xlo389ErCg84A,11799
|
| 1035 |
+
keras/src/saving/__init__.py,sha256=vnrtfvnzW7Gwtxe5COhaMoEnVYB5iDe2YlqJ-DvqFIk,614
|
| 1036 |
+
keras/src/saving/__pycache__/__init__.cpython-310.pyc,,
|
| 1037 |
+
keras/src/saving/__pycache__/file_editor.cpython-310.pyc,,
|
| 1038 |
+
keras/src/saving/__pycache__/keras_saveable.cpython-310.pyc,,
|
| 1039 |
+
keras/src/saving/__pycache__/object_registration.cpython-310.pyc,,
|
| 1040 |
+
keras/src/saving/__pycache__/saving_api.cpython-310.pyc,,
|
| 1041 |
+
keras/src/saving/__pycache__/saving_lib.cpython-310.pyc,,
|
| 1042 |
+
keras/src/saving/__pycache__/serialization_lib.cpython-310.pyc,,
|
| 1043 |
+
keras/src/saving/file_editor.py,sha256=XAl9O3XK2VO2IuAB-Mm40z-WpdRw9aQDS6sKJawAE1A,28980
|
| 1044 |
+
keras/src/saving/keras_saveable.py,sha256=aGIt1ajtsaamfUq18LM6ql8JEoQzi3HwzJEuwQ9bmKE,1285
|
| 1045 |
+
keras/src/saving/object_registration.py,sha256=aZmmFrJP5GjjNpLNmq4k6D-PqdAH8PMBGk7BXI7eogE,7358
|
| 1046 |
+
keras/src/saving/saving_api.py,sha256=UWdusfIT2tamCZD_LuDyAZxi-9jdfpgWbm5_XzObWaU,10419
|
| 1047 |
+
keras/src/saving/saving_lib.py,sha256=nCAu7h1zphpic0XqN-kZuRa2qT2XRhl2oPrzjDUozJs,40458
|
| 1048 |
+
keras/src/saving/serialization_lib.py,sha256=PjD60iyHE42G6wtv37JzC5ikRD981OWD_4V5Q4bgUFk,28759
|
| 1049 |
+
keras/src/testing/__init__.py,sha256=xOZf-VBOf3wrXu47PgII2TNfXgxUse60HCinBryHiK8,266
|
| 1050 |
+
keras/src/testing/__pycache__/__init__.cpython-310.pyc,,
|
| 1051 |
+
keras/src/testing/__pycache__/test_case.cpython-310.pyc,,
|
| 1052 |
+
keras/src/testing/__pycache__/test_utils.cpython-310.pyc,,
|
| 1053 |
+
keras/src/testing/test_case.py,sha256=-S-acWAfOOc9SQTuBuroQ_-hqvtc8e0V96JTCp27Yw4,31514
|
| 1054 |
+
keras/src/testing/test_utils.py,sha256=6Vb8tJIyjU1ay63w3jvXNNhh7sSNrosQll4ii1NXELQ,6197
|
| 1055 |
+
keras/src/trainers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 1056 |
+
keras/src/trainers/__pycache__/__init__.cpython-310.pyc,,
|
| 1057 |
+
keras/src/trainers/__pycache__/compile_utils.cpython-310.pyc,,
|
| 1058 |
+
keras/src/trainers/__pycache__/epoch_iterator.cpython-310.pyc,,
|
| 1059 |
+
keras/src/trainers/__pycache__/trainer.cpython-310.pyc,,
|
| 1060 |
+
keras/src/trainers/compile_utils.py,sha256=1xfQpgjiHRvueieijoJYVAMPgAs89CC-2AANC_dspE4,30253
|
| 1061 |
+
keras/src/trainers/data_adapters/__init__.py,sha256=GZa9Y4uzIEu_VEZnVPHAS0s_Jd4Z1sIn71-Vk4zj-wY,5934
|
| 1062 |
+
keras/src/trainers/data_adapters/__pycache__/__init__.cpython-310.pyc,,
|
| 1063 |
+
keras/src/trainers/data_adapters/__pycache__/array_data_adapter.cpython-310.pyc,,
|
| 1064 |
+
keras/src/trainers/data_adapters/__pycache__/array_slicing.cpython-310.pyc,,
|
| 1065 |
+
keras/src/trainers/data_adapters/__pycache__/data_adapter.cpython-310.pyc,,
|
| 1066 |
+
keras/src/trainers/data_adapters/__pycache__/data_adapter_utils.cpython-310.pyc,,
|
| 1067 |
+
keras/src/trainers/data_adapters/__pycache__/generator_data_adapter.cpython-310.pyc,,
|
| 1068 |
+
keras/src/trainers/data_adapters/__pycache__/py_dataset_adapter.cpython-310.pyc,,
|
| 1069 |
+
keras/src/trainers/data_adapters/__pycache__/tf_dataset_adapter.cpython-310.pyc,,
|
| 1070 |
+
keras/src/trainers/data_adapters/__pycache__/torch_data_loader_adapter.cpython-310.pyc,,
|
| 1071 |
+
keras/src/trainers/data_adapters/array_data_adapter.py,sha256=T7_AmjlxGPxK0-sWqKzgFMwp8W-d8zzWirBxeO2Frxc,14219
|
| 1072 |
+
keras/src/trainers/data_adapters/array_slicing.py,sha256=y79A04eUEjtyQ3XAzwjMJyG-rk_aiCW4EQNFsQYKjoU,17315
|
| 1073 |
+
keras/src/trainers/data_adapters/data_adapter.py,sha256=NGBEr2cAFfmrtRl7a8f7iCEaaCN2k8bw3l-3QxlEzRM,3261
|
| 1074 |
+
keras/src/trainers/data_adapters/data_adapter_utils.py,sha256=Tdk-5a2Qhi3BDhye8DYOTub4gmV2ymirASj52LuE570,10520
|
| 1075 |
+
keras/src/trainers/data_adapters/generator_data_adapter.py,sha256=-bqQwJT-Gu-ec4aK0ejPb1FQQGVmlkbxHA4_11TFRPc,3118
|
| 1076 |
+
keras/src/trainers/data_adapters/py_dataset_adapter.py,sha256=HngaKe2jU2YOld2LpKx8yeBY5iT1OIxupjMskhDPqQ8,23580
|
| 1077 |
+
keras/src/trainers/data_adapters/tf_dataset_adapter.py,sha256=BUwA_o1nLu7JMfXCXiY0Q8t4z3a9CVzPGGId8Jyn4bQ,4950
|
| 1078 |
+
keras/src/trainers/data_adapters/torch_data_loader_adapter.py,sha256=RvM3n5-l3k5TMDVtemv4cQoSTrEtB8q1glmS7s1dKVM,2544
|
| 1079 |
+
keras/src/trainers/epoch_iterator.py,sha256=kCn6-j5qIv0LTvR1uMhINUU9GPbpiwX812zumtWvUds,4916
|
| 1080 |
+
keras/src/trainers/trainer.py,sha256=3S9ULMmxkVZUuQIoKfGJeLvDzXv59Qf0snUdrqNU_3U,51768
|
| 1081 |
+
keras/src/tree/__init__.py,sha256=GFevGbI_JtGccMAcA-382UO6ATdJap_YkpI50smCrv4,629
|
| 1082 |
+
keras/src/tree/__pycache__/__init__.cpython-310.pyc,,
|
| 1083 |
+
keras/src/tree/__pycache__/dmtree_impl.cpython-310.pyc,,
|
| 1084 |
+
keras/src/tree/__pycache__/optree_impl.cpython-310.pyc,,
|
| 1085 |
+
keras/src/tree/__pycache__/tree_api.cpython-310.pyc,,
|
| 1086 |
+
keras/src/tree/dmtree_impl.py,sha256=f5iwagX0Fejin-w-5_J6x3OyjTvekESCKcpmYQQkXug,13537
|
| 1087 |
+
keras/src/tree/optree_impl.py,sha256=XGWkj7TQ5CcUwRLgH8-vUnWKNZudALU1mfQEMlp87mQ,5760
|
| 1088 |
+
keras/src/tree/tree_api.py,sha256=cSOp6EMOe8p0DUIbbvELrzIjABTIYX0Fw7CBfqi8pcY,14093
|
| 1089 |
+
keras/src/utils/__init__.py,sha256=WSmTldk6M-XV0X84XR5vryg0BTR8KsTfxNIyRaNkqq0,1423
|
| 1090 |
+
keras/src/utils/__pycache__/__init__.cpython-310.pyc,,
|
| 1091 |
+
keras/src/utils/__pycache__/argument_validation.cpython-310.pyc,,
|
| 1092 |
+
keras/src/utils/__pycache__/audio_dataset_utils.cpython-310.pyc,,
|
| 1093 |
+
keras/src/utils/__pycache__/backend_utils.cpython-310.pyc,,
|
| 1094 |
+
keras/src/utils/__pycache__/code_stats.cpython-310.pyc,,
|
| 1095 |
+
keras/src/utils/__pycache__/config.cpython-310.pyc,,
|
| 1096 |
+
keras/src/utils/__pycache__/dataset_utils.cpython-310.pyc,,
|
| 1097 |
+
keras/src/utils/__pycache__/dtype_utils.cpython-310.pyc,,
|
| 1098 |
+
keras/src/utils/__pycache__/file_utils.cpython-310.pyc,,
|
| 1099 |
+
keras/src/utils/__pycache__/image_dataset_utils.cpython-310.pyc,,
|
| 1100 |
+
keras/src/utils/__pycache__/image_utils.cpython-310.pyc,,
|
| 1101 |
+
keras/src/utils/__pycache__/io_utils.cpython-310.pyc,,
|
| 1102 |
+
keras/src/utils/__pycache__/jax_layer.cpython-310.pyc,,
|
| 1103 |
+
keras/src/utils/__pycache__/jax_utils.cpython-310.pyc,,
|
| 1104 |
+
keras/src/utils/__pycache__/model_visualization.cpython-310.pyc,,
|
| 1105 |
+
keras/src/utils/__pycache__/module_utils.cpython-310.pyc,,
|
| 1106 |
+
keras/src/utils/__pycache__/naming.cpython-310.pyc,,
|
| 1107 |
+
keras/src/utils/__pycache__/numerical_utils.cpython-310.pyc,,
|
| 1108 |
+
keras/src/utils/__pycache__/progbar.cpython-310.pyc,,
|
| 1109 |
+
keras/src/utils/__pycache__/python_utils.cpython-310.pyc,,
|
| 1110 |
+
keras/src/utils/__pycache__/rng_utils.cpython-310.pyc,,
|
| 1111 |
+
keras/src/utils/__pycache__/sequence_utils.cpython-310.pyc,,
|
| 1112 |
+
keras/src/utils/__pycache__/summary_utils.cpython-310.pyc,,
|
| 1113 |
+
keras/src/utils/__pycache__/text_dataset_utils.cpython-310.pyc,,
|
| 1114 |
+
keras/src/utils/__pycache__/tf_utils.cpython-310.pyc,,
|
| 1115 |
+
keras/src/utils/__pycache__/timeseries_dataset_utils.cpython-310.pyc,,
|
| 1116 |
+
keras/src/utils/__pycache__/torch_utils.cpython-310.pyc,,
|
| 1117 |
+
keras/src/utils/__pycache__/traceback_utils.cpython-310.pyc,,
|
| 1118 |
+
keras/src/utils/__pycache__/tracking.cpython-310.pyc,,
|
| 1119 |
+
keras/src/utils/argument_validation.py,sha256=uRFoLNJu3L2J8CM8L7uXGqhYi7ji8whh0H8nSHuRUXg,2876
|
| 1120 |
+
keras/src/utils/audio_dataset_utils.py,sha256=pxg3jOHgZMFhEkuJmCjI-dcrFyv7OlHyWW-49eedKN0,15114
|
| 1121 |
+
keras/src/utils/backend_utils.py,sha256=wp9i8Bie9mpkf6qdOAhZZ35-7tjSpgThWrlHcbRT8Xg,4618
|
| 1122 |
+
keras/src/utils/code_stats.py,sha256=1h4ifpAH5Jezm8BVrKM_WyzcG9uxrUiyzP1kcS4uqlo,1442
|
| 1123 |
+
keras/src/utils/config.py,sha256=3VhENVcng0DeazR-5rvjSnW_sovvOw-skEP-t3xWCEY,4643
|
| 1124 |
+
keras/src/utils/dataset_utils.py,sha256=IgVqIdnRf3sYHrLqJa5cigCIZUG1WxwollhNmr_4zDc,28195
|
| 1125 |
+
keras/src/utils/dtype_utils.py,sha256=wL_WaWYoDzDDmQW6EQGdpBb9O5QJ9OaEJsvY0Mir4uc,1483
|
| 1126 |
+
keras/src/utils/file_utils.py,sha256=fLlsHbcqkLr1xMgA7MmRrmoZwY_tLPUGOSdEzk_VE7w,17267
|
| 1127 |
+
keras/src/utils/image_dataset_utils.py,sha256=doL8q0q4DciFnlO-IyKN1v2Emh_gP4sI2rDhgeKL5qs,16964
|
| 1128 |
+
keras/src/utils/image_utils.py,sha256=HUI7Zcgqvsmm8a1xwfMwr7pOhnG4lsChP8Owv-xlCTM,16703
|
| 1129 |
+
keras/src/utils/io_utils.py,sha256=SreGeSMF3TGts4jaTMbf4yoSBhAgeJw-g2cOMyuePYA,4172
|
| 1130 |
+
keras/src/utils/jax_layer.py,sha256=zVHKEh4t4Rr8YseQlY-2EUxAjjVhVrQ2ngOetWIRv8w,26571
|
| 1131 |
+
keras/src/utils/jax_utils.py,sha256=vY3P4S9mfWEjdirLd81ocKqeCm-UVfgQ1yTi6UHdBiM,322
|
| 1132 |
+
keras/src/utils/model_visualization.py,sha256=JmATPQMI7nrIc32o2jCwm20M0XTivQnQZMFLes3-g00,16331
|
| 1133 |
+
keras/src/utils/module_utils.py,sha256=cKwmZaFoy0H0Q5e3WwlmK3X36DHburdBiEqmHkKBoAc,1988
|
| 1134 |
+
keras/src/utils/naming.py,sha256=bPowKBlgiVP_6XtVlNVHxrxheKuJy2c0e-oEM8ocZQY,1776
|
| 1135 |
+
keras/src/utils/numerical_utils.py,sha256=7XmtN-AFIYhbioLsbOTiHHiJsTrEPpiJpNJpG6GvnDg,7228
|
| 1136 |
+
keras/src/utils/progbar.py,sha256=Hud-bqGoixlyilD9NZnmcSOe3fT686Cv9GAUO9gPpvs,10349
|
| 1137 |
+
keras/src/utils/python_utils.py,sha256=sOjnW2s5WOkWBVEGgAQDKqhuV8YeOMESjH4VF6zOIio,4697
|
| 1138 |
+
keras/src/utils/rng_utils.py,sha256=XCokkeBtb0xDjLkvKsvJoTLoalM3c_tJHfTbysqpNvo,1677
|
| 1139 |
+
keras/src/utils/sequence_utils.py,sha256=CveyJ5VM5KJ4pFlo6LWT9omzd_xDeMRjTgczIKekP3Y,4716
|
| 1140 |
+
keras/src/utils/summary_utils.py,sha256=jjbTB6NTqMniSWXPKeNY6dvpn-U37WJdwqdfl8uX5nI,15447
|
| 1141 |
+
keras/src/utils/text_dataset_utils.py,sha256=JUqDauTec6uRZs71SbKeVjxHx_CNqqOWkoXQ1Q7ldRs,10701
|
| 1142 |
+
keras/src/utils/tf_utils.py,sha256=PC6SCcXouR5WjZ_e_MzAgWj1x9-bW4bQBiph6bOKf0c,4931
|
| 1143 |
+
keras/src/utils/timeseries_dataset_utils.py,sha256=rVxSuqlYLpzw_dVo8Ym5HSE2jFmndS8MAv4Uewycojo,9842
|
| 1144 |
+
keras/src/utils/torch_utils.py,sha256=QQNDA4hw_JVYlXXZH089s0Ev6JBqVGfxSeA7JL3ncFU,5226
|
| 1145 |
+
keras/src/utils/traceback_utils.py,sha256=VI8VJ8QjTDc3-cx3xfR9H7g68D2KVH7VknHi_JrVMuU,8997
|
| 1146 |
+
keras/src/utils/tracking.py,sha256=mVig-TS5LZbModoyAOnN3msazudKggW62hxUq4XzT2I,8844
|
| 1147 |
+
keras/src/version.py,sha256=e0Jbyh9_5DxOOWI-5lPqEq75DpaN7-F_BBuP3wJfygQ,189
|
| 1148 |
+
keras/src/visualization/__init__.py,sha256=bDdV3eLKeLKoUwUDBFuZxMO560OyFZND0zBn8vaG6rg,111
|
| 1149 |
+
keras/src/visualization/__pycache__/__init__.cpython-310.pyc,,
|
| 1150 |
+
keras/src/visualization/__pycache__/draw_bounding_boxes.cpython-310.pyc,,
|
| 1151 |
+
keras/src/visualization/__pycache__/draw_segmentation_masks.cpython-310.pyc,,
|
| 1152 |
+
keras/src/visualization/__pycache__/plot_bounding_box_gallery.cpython-310.pyc,,
|
| 1153 |
+
keras/src/visualization/__pycache__/plot_image_gallery.cpython-310.pyc,,
|
| 1154 |
+
keras/src/visualization/__pycache__/plot_segmentation_mask_gallery.cpython-310.pyc,,
|
| 1155 |
+
keras/src/visualization/draw_bounding_boxes.py,sha256=Gs7gNburpgwXr8CahiyQgZWhBD5ffVeoUG7kzIFL92g,6649
|
| 1156 |
+
keras/src/visualization/draw_segmentation_masks.py,sha256=C9zPIcHgQK8DKPhTvyiE13LTVU11zvIKK6q-YR249Tg,4746
|
| 1157 |
+
keras/src/visualization/plot_bounding_box_gallery.py,sha256=RBuNOnXHi0D6HiL7WmBfD1YeUsYunB1cHsusxmPct_s,6355
|
| 1158 |
+
keras/src/visualization/plot_image_gallery.py,sha256=JI75R1CquqtfHxWO-s2eHDT1dJi_w-V3lwqLE_PnsRU,5582
|
| 1159 |
+
keras/src/visualization/plot_segmentation_mask_gallery.py,sha256=gJnp5VowF7gIyPFuOzU3EBamQpDfpbS6ElqmgWDi4Y8,4335
|
| 1160 |
+
keras/src/wrappers/__init__.py,sha256=6QhlmdgtjERTkrI6uxtq9yTyHazeMOCPJVP6XEFskaw,270
|
| 1161 |
+
keras/src/wrappers/__pycache__/__init__.cpython-310.pyc,,
|
| 1162 |
+
keras/src/wrappers/__pycache__/fixes.cpython-310.pyc,,
|
| 1163 |
+
keras/src/wrappers/__pycache__/sklearn_wrapper.cpython-310.pyc,,
|
| 1164 |
+
keras/src/wrappers/__pycache__/utils.cpython-310.pyc,,
|
| 1165 |
+
keras/src/wrappers/fixes.py,sha256=iWAf_DHsvQAvmMXw0fVNECAomZs7wlGL8ckAARh8SsI,2591
|
| 1166 |
+
keras/src/wrappers/sklearn_wrapper.py,sha256=dlJp61cvLrY4UQYYvJs8lsChCZBrdO7JYyluroOBfN8,17479
|
| 1167 |
+
keras/src/wrappers/utils.py,sha256=UuRxqJhIOMtaTNX8J3FFmPZOTjn9rNyBO5IP9qEB5Qc,2383
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: setuptools (75.7.0)
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-any
|
| 5 |
+
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras-3.8.0.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
keras
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/__init__.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# DO NOT EDIT. Generated by api_gen.sh
|
| 2 |
+
from keras.api import DTypePolicy
|
| 3 |
+
from keras.api import FloatDTypePolicy
|
| 4 |
+
from keras.api import Function
|
| 5 |
+
from keras.api import Initializer
|
| 6 |
+
from keras.api import Input
|
| 7 |
+
from keras.api import InputSpec
|
| 8 |
+
from keras.api import KerasTensor
|
| 9 |
+
from keras.api import Layer
|
| 10 |
+
from keras.api import Loss
|
| 11 |
+
from keras.api import Metric
|
| 12 |
+
from keras.api import Model
|
| 13 |
+
from keras.api import Operation
|
| 14 |
+
from keras.api import Optimizer
|
| 15 |
+
from keras.api import Quantizer
|
| 16 |
+
from keras.api import Regularizer
|
| 17 |
+
from keras.api import Sequential
|
| 18 |
+
from keras.api import StatelessScope
|
| 19 |
+
from keras.api import SymbolicScope
|
| 20 |
+
from keras.api import Variable
|
| 21 |
+
from keras.api import __version__
|
| 22 |
+
from keras.api import activations
|
| 23 |
+
from keras.api import applications
|
| 24 |
+
from keras.api import backend
|
| 25 |
+
from keras.api import callbacks
|
| 26 |
+
from keras.api import config
|
| 27 |
+
from keras.api import constraints
|
| 28 |
+
from keras.api import datasets
|
| 29 |
+
from keras.api import device
|
| 30 |
+
from keras.api import distribution
|
| 31 |
+
from keras.api import dtype_policies
|
| 32 |
+
from keras.api import export
|
| 33 |
+
from keras.api import initializers
|
| 34 |
+
from keras.api import layers
|
| 35 |
+
from keras.api import legacy
|
| 36 |
+
from keras.api import losses
|
| 37 |
+
from keras.api import metrics
|
| 38 |
+
from keras.api import mixed_precision
|
| 39 |
+
from keras.api import models
|
| 40 |
+
from keras.api import name_scope
|
| 41 |
+
from keras.api import ops
|
| 42 |
+
from keras.api import optimizers
|
| 43 |
+
from keras.api import preprocessing
|
| 44 |
+
from keras.api import quantizers
|
| 45 |
+
from keras.api import random
|
| 46 |
+
from keras.api import regularizers
|
| 47 |
+
from keras.api import saving
|
| 48 |
+
from keras.api import tree
|
| 49 |
+
from keras.api import utils
|
| 50 |
+
from keras.api import version
|
| 51 |
+
from keras.api import visualization
|
| 52 |
+
from keras.api import wrappers
|
| 53 |
+
|
| 54 |
+
# END DO NOT EDIT.
|
| 55 |
+
|
| 56 |
+
import os # isort: skip
|
| 57 |
+
|
| 58 |
+
# Add everything in /api/ to the module search path.
|
| 59 |
+
__path__.append(os.path.join(os.path.dirname(__file__), "api")) # noqa: F405
|
| 60 |
+
|
| 61 |
+
# Don't pollute namespace.
|
| 62 |
+
del os
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
# Never autocomplete `.src` or `.api` on an imported keras object.
|
| 66 |
+
def __dir__():
|
| 67 |
+
keys = dict.fromkeys((globals().keys()))
|
| 68 |
+
keys.pop("src")
|
| 69 |
+
keys.pop("api")
|
| 70 |
+
return list(keys)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
# Don't import `.src` or `.api` during `from keras import *`.
|
| 74 |
+
__all__ = [
|
| 75 |
+
name
|
| 76 |
+
for name in globals().keys()
|
| 77 |
+
if not (name.startswith("_") or name in ("src", "api"))
|
| 78 |
+
]
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (2.33 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/__init__.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import activations
|
| 2 |
+
from keras.src import applications
|
| 3 |
+
from keras.src import backend
|
| 4 |
+
from keras.src import constraints
|
| 5 |
+
from keras.src import datasets
|
| 6 |
+
from keras.src import initializers
|
| 7 |
+
from keras.src import layers
|
| 8 |
+
from keras.src import models
|
| 9 |
+
from keras.src import ops
|
| 10 |
+
from keras.src import optimizers
|
| 11 |
+
from keras.src import regularizers
|
| 12 |
+
from keras.src import utils
|
| 13 |
+
from keras.src import visualization
|
| 14 |
+
from keras.src.backend import KerasTensor
|
| 15 |
+
from keras.src.layers import Input
|
| 16 |
+
from keras.src.layers import Layer
|
| 17 |
+
from keras.src.models import Functional
|
| 18 |
+
from keras.src.models import Model
|
| 19 |
+
from keras.src.models import Sequential
|
| 20 |
+
from keras.src.version import __version__
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (903 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/__pycache__/api_export.cpython-310.pyc
ADDED
|
Binary file (1.71 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/__pycache__/version.cpython-310.pyc
ADDED
|
Binary file (396 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/api_export.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
try:
|
| 2 |
+
import namex
|
| 3 |
+
except ImportError:
|
| 4 |
+
namex = None
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# These dicts reference "canonical names" only
|
| 8 |
+
# (i.e. the first name an object was registered with).
|
| 9 |
+
REGISTERED_NAMES_TO_OBJS = {}
|
| 10 |
+
REGISTERED_OBJS_TO_NAMES = {}
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def register_internal_serializable(path, symbol):
|
| 14 |
+
global REGISTERED_NAMES_TO_OBJS
|
| 15 |
+
if isinstance(path, (list, tuple)):
|
| 16 |
+
name = path[0]
|
| 17 |
+
else:
|
| 18 |
+
name = path
|
| 19 |
+
REGISTERED_NAMES_TO_OBJS[name] = symbol
|
| 20 |
+
REGISTERED_OBJS_TO_NAMES[symbol] = name
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def get_symbol_from_name(name):
|
| 24 |
+
return REGISTERED_NAMES_TO_OBJS.get(name, None)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def get_name_from_symbol(symbol):
|
| 28 |
+
return REGISTERED_OBJS_TO_NAMES.get(symbol, None)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
if namex:
|
| 32 |
+
|
| 33 |
+
class keras_export(namex.export):
|
| 34 |
+
def __init__(self, path):
|
| 35 |
+
super().__init__(package="keras", path=path)
|
| 36 |
+
|
| 37 |
+
def __call__(self, symbol):
|
| 38 |
+
register_internal_serializable(self.path, symbol)
|
| 39 |
+
return super().__call__(symbol)
|
| 40 |
+
|
| 41 |
+
else:
|
| 42 |
+
|
| 43 |
+
class keras_export:
|
| 44 |
+
def __init__(self, path):
|
| 45 |
+
self.path = path
|
| 46 |
+
|
| 47 |
+
def __call__(self, symbol):
|
| 48 |
+
register_internal_serializable(self.path, symbol)
|
| 49 |
+
return symbol
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/backend_utils.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import importlib
|
| 3 |
+
import os
|
| 4 |
+
import sys
|
| 5 |
+
|
| 6 |
+
from keras.src import backend as backend_module
|
| 7 |
+
from keras.src.api_export import keras_export
|
| 8 |
+
from keras.src.backend.common import global_state
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def in_tf_graph():
|
| 12 |
+
if global_state.get_global_attribute("in_tf_graph_scope", False):
|
| 13 |
+
return True
|
| 14 |
+
|
| 15 |
+
if "tensorflow" in sys.modules:
|
| 16 |
+
from keras.src.utils.module_utils import tensorflow as tf
|
| 17 |
+
|
| 18 |
+
return not tf.executing_eagerly()
|
| 19 |
+
return False
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def convert_tf_tensor(outputs, dtype=None):
|
| 23 |
+
if backend_module.backend() != "tensorflow" and not in_tf_graph():
|
| 24 |
+
outputs = backend_module.convert_to_tensor(outputs, dtype=dtype)
|
| 25 |
+
return outputs
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class TFGraphScope:
|
| 29 |
+
def __init__(self):
|
| 30 |
+
self._original_value = global_state.get_global_attribute(
|
| 31 |
+
"in_tf_graph_scope", False
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
def __enter__(self):
|
| 35 |
+
global_state.set_global_attribute("in_tf_graph_scope", True)
|
| 36 |
+
|
| 37 |
+
def __exit__(self, *args, **kwargs):
|
| 38 |
+
global_state.set_global_attribute(
|
| 39 |
+
"in_tf_graph_scope", self._original_value
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class DynamicBackend:
|
| 44 |
+
"""A class that can be used to switch from one backend to another.
|
| 45 |
+
|
| 46 |
+
Example:
|
| 47 |
+
|
| 48 |
+
```python
|
| 49 |
+
backend = DynamicBackend("tensorflow")
|
| 50 |
+
y = backend.square(tf.constant(...))
|
| 51 |
+
backend.set_backend("jax")
|
| 52 |
+
y = backend.square(jax.numpy.array(...))
|
| 53 |
+
```
|
| 54 |
+
|
| 55 |
+
Args:
|
| 56 |
+
backend: Initial backend to use (string).
|
| 57 |
+
"""
|
| 58 |
+
|
| 59 |
+
def __init__(self, backend=None):
|
| 60 |
+
self._backend = backend or backend_module.backend()
|
| 61 |
+
|
| 62 |
+
def set_backend(self, backend):
|
| 63 |
+
if backend not in ("tensorflow", "jax", "torch", "numpy", "openvino"):
|
| 64 |
+
raise ValueError(
|
| 65 |
+
"Available backends are ('tensorflow', 'jax', 'torch', "
|
| 66 |
+
f"'numpy' and 'openvino'). Received: backend={backend}"
|
| 67 |
+
)
|
| 68 |
+
self._backend = backend
|
| 69 |
+
|
| 70 |
+
def reset(self):
|
| 71 |
+
self._backend = backend_module.backend()
|
| 72 |
+
|
| 73 |
+
@property
|
| 74 |
+
def name(self):
|
| 75 |
+
return self._backend
|
| 76 |
+
|
| 77 |
+
def __getattr__(self, name):
|
| 78 |
+
if self._backend == "tensorflow":
|
| 79 |
+
module = importlib.import_module("keras.src.backend.tensorflow")
|
| 80 |
+
return getattr(module, name)
|
| 81 |
+
if self._backend == "jax":
|
| 82 |
+
module = importlib.import_module("keras.src.backend.jax")
|
| 83 |
+
return getattr(module, name)
|
| 84 |
+
if self._backend == "torch":
|
| 85 |
+
module = importlib.import_module("keras.src.backend.torch")
|
| 86 |
+
return getattr(module, name)
|
| 87 |
+
if self._backend == "numpy":
|
| 88 |
+
if backend_module.backend() == "numpy":
|
| 89 |
+
return getattr(backend_module, name)
|
| 90 |
+
else:
|
| 91 |
+
raise NotImplementedError(
|
| 92 |
+
"Currently, we cannot dynamically import the numpy backend "
|
| 93 |
+
"because it would disrupt the namespace of the import."
|
| 94 |
+
)
|
| 95 |
+
if self._backend == "openvino":
|
| 96 |
+
module = importlib.import_module("keras.src.backend.openvino")
|
| 97 |
+
return getattr(module, name)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
@keras_export("keras.config.set_backend")
|
| 101 |
+
def set_backend(backend):
|
| 102 |
+
"""Reload the backend (and the Keras package).
|
| 103 |
+
|
| 104 |
+
Example:
|
| 105 |
+
|
| 106 |
+
```python
|
| 107 |
+
keras.config.set_backend("jax")
|
| 108 |
+
```
|
| 109 |
+
|
| 110 |
+
⚠️ WARNING ⚠️: Using this function is dangerous and should be done
|
| 111 |
+
carefully. Changing the backend will **NOT** convert
|
| 112 |
+
the type of any already-instantiated objects.
|
| 113 |
+
Thus, any layers / tensors / etc. already created will no
|
| 114 |
+
longer be usable without errors. It is strongly recommended **not**
|
| 115 |
+
to keep around **any** Keras-originated objects instances created
|
| 116 |
+
before calling `set_backend()`.
|
| 117 |
+
|
| 118 |
+
This includes any function or class instance that uses any Keras
|
| 119 |
+
functionality. All such code needs to be re-executed after calling
|
| 120 |
+
`set_backend()`.
|
| 121 |
+
"""
|
| 122 |
+
os.environ["KERAS_BACKEND"] = backend
|
| 123 |
+
# Clear module cache.
|
| 124 |
+
loaded_modules = [
|
| 125 |
+
key for key in sys.modules.keys() if key.startswith("keras")
|
| 126 |
+
]
|
| 127 |
+
for key in loaded_modules:
|
| 128 |
+
del sys.modules[key]
|
| 129 |
+
# Reimport Keras with the new backend (set via KERAS_BACKEND).
|
| 130 |
+
import keras
|
| 131 |
+
|
| 132 |
+
# Finally: refresh all imported Keras submodules.
|
| 133 |
+
globs = copy.copy(globals())
|
| 134 |
+
for key, value in globs.items():
|
| 135 |
+
if value.__class__ == keras.__class__:
|
| 136 |
+
if str(value).startswith("<module 'keras."):
|
| 137 |
+
module_name = str(value)
|
| 138 |
+
module_name = module_name[module_name.find("'") + 1 :]
|
| 139 |
+
module_name = module_name[: module_name.find("'")]
|
| 140 |
+
globals()[key] = importlib.import_module(module_name)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/code_stats.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def count_loc(directory, exclude=("_test",), extensions=(".py",), verbose=0):
|
| 5 |
+
loc = 0
|
| 6 |
+
for root, _, fnames in os.walk(directory):
|
| 7 |
+
skip = False
|
| 8 |
+
for ex in exclude:
|
| 9 |
+
if root.endswith(ex):
|
| 10 |
+
skip = True
|
| 11 |
+
if skip:
|
| 12 |
+
continue
|
| 13 |
+
|
| 14 |
+
for fname in fnames:
|
| 15 |
+
skip = False
|
| 16 |
+
for ext in extensions:
|
| 17 |
+
if not fname.endswith(ext):
|
| 18 |
+
skip = True
|
| 19 |
+
break
|
| 20 |
+
|
| 21 |
+
for ex in exclude:
|
| 22 |
+
if fname.endswith(ex + ext):
|
| 23 |
+
skip = True
|
| 24 |
+
break
|
| 25 |
+
if skip:
|
| 26 |
+
continue
|
| 27 |
+
|
| 28 |
+
fname = os.path.join(root, fname)
|
| 29 |
+
if verbose:
|
| 30 |
+
print(f"Count LoCs in {fname}")
|
| 31 |
+
|
| 32 |
+
with open(fname) as f:
|
| 33 |
+
lines = f.read().split("\n")
|
| 34 |
+
|
| 35 |
+
string_open = False
|
| 36 |
+
for line in lines:
|
| 37 |
+
line = line.strip()
|
| 38 |
+
if not line or line.startswith("#"):
|
| 39 |
+
continue
|
| 40 |
+
if not string_open:
|
| 41 |
+
if not line.startswith('"""'):
|
| 42 |
+
loc += 1
|
| 43 |
+
else:
|
| 44 |
+
if not line.endswith('"""'):
|
| 45 |
+
string_open = True
|
| 46 |
+
else:
|
| 47 |
+
if line.startswith('"""'):
|
| 48 |
+
string_open = False
|
| 49 |
+
return loc
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/config.py
ADDED
|
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import json
|
| 3 |
+
|
| 4 |
+
try:
|
| 5 |
+
import difflib
|
| 6 |
+
except ImportError:
|
| 7 |
+
difflib = None
|
| 8 |
+
|
| 9 |
+
from keras.src.api_export import keras_export
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@keras_export("keras.utils.Config")
|
| 13 |
+
class Config:
|
| 14 |
+
"""A Config is a dict-like container for named values.
|
| 15 |
+
|
| 16 |
+
It offers a few advantages over a plain dict:
|
| 17 |
+
|
| 18 |
+
- Setting and retrieving values via attribute setting / getting.
|
| 19 |
+
- Ability to freeze the config to ensure no accidental config modifications
|
| 20 |
+
occur past a certain point in your program.
|
| 21 |
+
- Easy serialization of the whole config as JSON.
|
| 22 |
+
|
| 23 |
+
Examples:
|
| 24 |
+
|
| 25 |
+
```python
|
| 26 |
+
# Create a config via constructor arguments
|
| 27 |
+
config = Config("learning_rate"=0.1, "momentum"=0.9)
|
| 28 |
+
|
| 29 |
+
# Then keep adding to it via attribute-style setting
|
| 30 |
+
config.use_ema = True
|
| 31 |
+
config.ema_overwrite_frequency = 100
|
| 32 |
+
|
| 33 |
+
# You can also add attributes via dict-like access
|
| 34 |
+
config["seed"] = 123
|
| 35 |
+
|
| 36 |
+
# You can retrieve entries both via attribute-style
|
| 37 |
+
# access and dict-style access
|
| 38 |
+
assert config.seed == 100
|
| 39 |
+
assert config["learning_rate"] == 0.1
|
| 40 |
+
```
|
| 41 |
+
|
| 42 |
+
A config behaves like a dict:
|
| 43 |
+
|
| 44 |
+
```python
|
| 45 |
+
config = Config("learning_rate"=0.1, "momentum"=0.9)
|
| 46 |
+
for k, v in config.items():
|
| 47 |
+
print(f"{k}={v}")
|
| 48 |
+
|
| 49 |
+
print(f"keys: {list(config.keys())}")
|
| 50 |
+
print(f"values: {list(config.values())}")
|
| 51 |
+
```
|
| 52 |
+
|
| 53 |
+
In fact, it can be turned into one:
|
| 54 |
+
|
| 55 |
+
```python
|
| 56 |
+
config = Config("learning_rate"=0.1, "momentum"=0.9)
|
| 57 |
+
dict_config = config.as_dict()
|
| 58 |
+
```
|
| 59 |
+
|
| 60 |
+
You can easily serialize a config to JSON:
|
| 61 |
+
|
| 62 |
+
```python
|
| 63 |
+
config = Config("learning_rate"=0.1, "momentum"=0.9)
|
| 64 |
+
|
| 65 |
+
json_str = config.to_json()
|
| 66 |
+
```
|
| 67 |
+
|
| 68 |
+
You can also freeze a config to prevent further changes:
|
| 69 |
+
|
| 70 |
+
```python
|
| 71 |
+
config = Config()
|
| 72 |
+
config.optimizer = "adam"
|
| 73 |
+
config.seed = 123
|
| 74 |
+
|
| 75 |
+
# Freeze the config to prevent changes.
|
| 76 |
+
config.freeze()
|
| 77 |
+
assert config.frozen
|
| 78 |
+
|
| 79 |
+
config.foo = "bar" # This will raise an error.
|
| 80 |
+
```
|
| 81 |
+
"""
|
| 82 |
+
|
| 83 |
+
__attrs__ = None
|
| 84 |
+
|
| 85 |
+
def __init__(self, **kwargs):
|
| 86 |
+
self._config = kwargs
|
| 87 |
+
self._frozen = False
|
| 88 |
+
self.__attrs__ = set(dir(self))
|
| 89 |
+
|
| 90 |
+
@property
|
| 91 |
+
def frozen(self):
|
| 92 |
+
"""Returns True if the config is frozen."""
|
| 93 |
+
return self._frozen
|
| 94 |
+
|
| 95 |
+
def freeze(self):
|
| 96 |
+
"""Marks the config as frozen, preventing any ulterior modification."""
|
| 97 |
+
self._frozen = True
|
| 98 |
+
|
| 99 |
+
def unfreeze(self):
|
| 100 |
+
self._frozen = False
|
| 101 |
+
|
| 102 |
+
def _raise_if_frozen(self):
|
| 103 |
+
if self._frozen:
|
| 104 |
+
raise ValueError(
|
| 105 |
+
"Cannot mutate attribute(s) because the config is frozen."
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
def as_dict(self):
|
| 109 |
+
return copy.copy(self._config)
|
| 110 |
+
|
| 111 |
+
def to_json(self):
|
| 112 |
+
return json.dumps(self._config)
|
| 113 |
+
|
| 114 |
+
def keys(self):
|
| 115 |
+
return self._config.keys()
|
| 116 |
+
|
| 117 |
+
def values(self):
|
| 118 |
+
return self._config.values()
|
| 119 |
+
|
| 120 |
+
def items(self):
|
| 121 |
+
return self._config.items()
|
| 122 |
+
|
| 123 |
+
def pop(self, *args):
|
| 124 |
+
self._raise_if_frozen()
|
| 125 |
+
return self._config.pop(*args)
|
| 126 |
+
|
| 127 |
+
def update(self, *args, **kwargs):
|
| 128 |
+
self._raise_if_frozen()
|
| 129 |
+
return self._config.update(*args, **kwargs)
|
| 130 |
+
|
| 131 |
+
def get(self, keyname, value=None):
|
| 132 |
+
return self._config.get(keyname, value)
|
| 133 |
+
|
| 134 |
+
def __setattr__(self, name, value):
|
| 135 |
+
attrs = object.__getattribute__(self, "__attrs__")
|
| 136 |
+
if attrs is None or name in attrs:
|
| 137 |
+
return object.__setattr__(self, name, value)
|
| 138 |
+
|
| 139 |
+
self._raise_if_frozen()
|
| 140 |
+
self._config[name] = value
|
| 141 |
+
|
| 142 |
+
def __getattr__(self, name):
|
| 143 |
+
attrs = object.__getattribute__(self, "__attrs__")
|
| 144 |
+
if attrs is None or name in attrs:
|
| 145 |
+
return object.__getattribute__(self, name)
|
| 146 |
+
|
| 147 |
+
if name in self._config:
|
| 148 |
+
return self._config[name]
|
| 149 |
+
|
| 150 |
+
msg = f"Unknown attribute: '{name}'."
|
| 151 |
+
if difflib is not None:
|
| 152 |
+
closest_matches = difflib.get_close_matches(
|
| 153 |
+
name, self._config.keys(), n=1, cutoff=0.7
|
| 154 |
+
)
|
| 155 |
+
if closest_matches:
|
| 156 |
+
msg += f" Did you mean '{closest_matches[0]}'?"
|
| 157 |
+
raise AttributeError(msg)
|
| 158 |
+
|
| 159 |
+
def __setitem__(self, key, item):
|
| 160 |
+
self._raise_if_frozen()
|
| 161 |
+
self._config[key] = item
|
| 162 |
+
|
| 163 |
+
def __getitem__(self, key):
|
| 164 |
+
return self._config[key]
|
| 165 |
+
|
| 166 |
+
def __repr__(self):
|
| 167 |
+
return f"<Config {self._config}>"
|
| 168 |
+
|
| 169 |
+
def __iter__(self):
|
| 170 |
+
keys = sorted(self._config.keys())
|
| 171 |
+
for k in keys:
|
| 172 |
+
yield k
|
| 173 |
+
|
| 174 |
+
def __len__(self):
|
| 175 |
+
return len(self._config)
|
| 176 |
+
|
| 177 |
+
def __delitem__(self, key):
|
| 178 |
+
self._raise_if_frozen()
|
| 179 |
+
del self._config[key]
|
| 180 |
+
|
| 181 |
+
def __contains__(self, item):
|
| 182 |
+
return item in self._config
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/dataset_utils.py
ADDED
|
@@ -0,0 +1,763 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import random
|
| 3 |
+
import time
|
| 4 |
+
import warnings
|
| 5 |
+
from multiprocessing.pool import ThreadPool
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
from keras.src import tree
|
| 10 |
+
from keras.src.api_export import keras_export
|
| 11 |
+
from keras.src.utils import io_utils
|
| 12 |
+
from keras.src.utils.module_utils import tensorflow as tf
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@keras_export("keras.utils.split_dataset")
|
| 16 |
+
def split_dataset(
|
| 17 |
+
dataset, left_size=None, right_size=None, shuffle=False, seed=None
|
| 18 |
+
):
|
| 19 |
+
"""Splits a dataset into a left half and a right half (e.g. train / test).
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
dataset:
|
| 23 |
+
A `tf.data.Dataset`, a `torch.utils.data.Dataset` object,
|
| 24 |
+
or a list/tuple of arrays with the same length.
|
| 25 |
+
left_size: If float (in the range `[0, 1]`), it signifies
|
| 26 |
+
the fraction of the data to pack in the left dataset. If integer, it
|
| 27 |
+
signifies the number of samples to pack in the left dataset. If
|
| 28 |
+
`None`, defaults to the complement to `right_size`.
|
| 29 |
+
Defaults to `None`.
|
| 30 |
+
right_size: If float (in the range `[0, 1]`), it signifies
|
| 31 |
+
the fraction of the data to pack in the right dataset.
|
| 32 |
+
If integer, it signifies the number of samples to pack
|
| 33 |
+
in the right dataset.
|
| 34 |
+
If `None`, defaults to the complement to `left_size`.
|
| 35 |
+
Defaults to `None`.
|
| 36 |
+
shuffle: Boolean, whether to shuffle the data before splitting it.
|
| 37 |
+
seed: A random seed for shuffling.
|
| 38 |
+
|
| 39 |
+
Returns:
|
| 40 |
+
A tuple of two `tf.data.Dataset` objects:
|
| 41 |
+
the left and right splits.
|
| 42 |
+
|
| 43 |
+
Example:
|
| 44 |
+
|
| 45 |
+
>>> data = np.random.random(size=(1000, 4))
|
| 46 |
+
>>> left_ds, right_ds = keras.utils.split_dataset(data, left_size=0.8)
|
| 47 |
+
>>> int(left_ds.cardinality())
|
| 48 |
+
800
|
| 49 |
+
>>> int(right_ds.cardinality())
|
| 50 |
+
200
|
| 51 |
+
"""
|
| 52 |
+
dataset_type_spec = _get_type_spec(dataset)
|
| 53 |
+
|
| 54 |
+
if dataset_type_spec is None:
|
| 55 |
+
raise TypeError(
|
| 56 |
+
"The `dataset` argument must be either"
|
| 57 |
+
"a `tf.data.Dataset`, a `torch.utils.data.Dataset`"
|
| 58 |
+
"object, or a list/tuple of arrays. "
|
| 59 |
+
f"Received: dataset={dataset} of type {type(dataset)}"
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
if right_size is None and left_size is None:
|
| 63 |
+
raise ValueError(
|
| 64 |
+
"At least one of the `left_size` or `right_size` "
|
| 65 |
+
"must be specified. Received: left_size=None and "
|
| 66 |
+
"right_size=None"
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
dataset_as_list = _convert_dataset_to_list(dataset, dataset_type_spec)
|
| 70 |
+
|
| 71 |
+
if shuffle:
|
| 72 |
+
if seed is None:
|
| 73 |
+
seed = random.randint(0, int(1e6))
|
| 74 |
+
random.seed(seed)
|
| 75 |
+
random.shuffle(dataset_as_list)
|
| 76 |
+
|
| 77 |
+
total_length = len(dataset_as_list)
|
| 78 |
+
|
| 79 |
+
left_size, right_size = _rescale_dataset_split_sizes(
|
| 80 |
+
left_size, right_size, total_length
|
| 81 |
+
)
|
| 82 |
+
left_split = list(dataset_as_list[:left_size])
|
| 83 |
+
right_split = list(dataset_as_list[-right_size:])
|
| 84 |
+
|
| 85 |
+
left_split = _restore_dataset_from_list(
|
| 86 |
+
left_split, dataset_type_spec, dataset
|
| 87 |
+
)
|
| 88 |
+
right_split = _restore_dataset_from_list(
|
| 89 |
+
right_split, dataset_type_spec, dataset
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
left_split = tf.data.Dataset.from_tensor_slices(left_split)
|
| 93 |
+
right_split = tf.data.Dataset.from_tensor_slices(right_split)
|
| 94 |
+
|
| 95 |
+
# apply batching to the splits if the dataset is batched
|
| 96 |
+
if dataset_type_spec is tf.data.Dataset and is_batched(dataset):
|
| 97 |
+
batch_size = get_batch_size(dataset)
|
| 98 |
+
if batch_size is not None:
|
| 99 |
+
left_split = left_split.batch(batch_size)
|
| 100 |
+
right_split = right_split.batch(batch_size)
|
| 101 |
+
|
| 102 |
+
left_split = left_split.prefetch(tf.data.AUTOTUNE)
|
| 103 |
+
right_split = right_split.prefetch(tf.data.AUTOTUNE)
|
| 104 |
+
return left_split, right_split
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def _convert_dataset_to_list(
|
| 108 |
+
dataset,
|
| 109 |
+
dataset_type_spec,
|
| 110 |
+
data_size_warning_flag=True,
|
| 111 |
+
ensure_shape_similarity=True,
|
| 112 |
+
):
|
| 113 |
+
"""Convert `dataset` object to a list of samples.
|
| 114 |
+
|
| 115 |
+
Args:
|
| 116 |
+
dataset: A `tf.data.Dataset`, a `torch.utils.data.Dataset` object,
|
| 117 |
+
or a list/tuple of arrays.
|
| 118 |
+
dataset_type_spec: the type of the dataset.
|
| 119 |
+
data_size_warning_flag: If set to `True`, a warning will
|
| 120 |
+
be issued if the dataset takes longer than 10 seconds to iterate.
|
| 121 |
+
Defaults to `True`.
|
| 122 |
+
ensure_shape_similarity: If set to `True`, the shape of
|
| 123 |
+
the first sample will be used to validate the shape of rest of the
|
| 124 |
+
samples. Defaults to `True`.
|
| 125 |
+
|
| 126 |
+
Returns:
|
| 127 |
+
List: A list of samples.
|
| 128 |
+
"""
|
| 129 |
+
dataset_iterator = _get_data_iterator_from_dataset(
|
| 130 |
+
dataset, dataset_type_spec
|
| 131 |
+
)
|
| 132 |
+
dataset_as_list = []
|
| 133 |
+
|
| 134 |
+
start_time = time.time()
|
| 135 |
+
for sample in _get_next_sample(
|
| 136 |
+
dataset_iterator,
|
| 137 |
+
ensure_shape_similarity,
|
| 138 |
+
data_size_warning_flag,
|
| 139 |
+
start_time,
|
| 140 |
+
):
|
| 141 |
+
dataset_as_list.append(sample)
|
| 142 |
+
|
| 143 |
+
return dataset_as_list
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def _get_data_iterator_from_dataset(dataset, dataset_type_spec):
|
| 147 |
+
"""Get the iterator from a dataset.
|
| 148 |
+
|
| 149 |
+
Args:
|
| 150 |
+
dataset: A `tf.data.Dataset`, a `torch.utils.data.Dataset` object,
|
| 151 |
+
or a list/tuple of arrays.
|
| 152 |
+
dataset_type_spec: The type of the dataset.
|
| 153 |
+
|
| 154 |
+
Returns:
|
| 155 |
+
iterator: An `iterator` object.
|
| 156 |
+
"""
|
| 157 |
+
if dataset_type_spec is list:
|
| 158 |
+
if len(dataset) == 0:
|
| 159 |
+
raise ValueError(
|
| 160 |
+
"Received an empty list dataset. "
|
| 161 |
+
"Please provide a non-empty list of arrays."
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
expected_shape = None
|
| 165 |
+
for i, element in enumerate(dataset):
|
| 166 |
+
if not isinstance(element, np.ndarray):
|
| 167 |
+
raise ValueError(
|
| 168 |
+
"Expected a list of `numpy.ndarray` objects,"
|
| 169 |
+
f"Received: {type(element)} at index {i}."
|
| 170 |
+
)
|
| 171 |
+
if expected_shape is None:
|
| 172 |
+
expected_shape = element.shape
|
| 173 |
+
elif element.shape[0] != expected_shape[0]:
|
| 174 |
+
raise ValueError(
|
| 175 |
+
"Received a list of NumPy arrays with different lengths."
|
| 176 |
+
f"Mismatch found at index {i}, "
|
| 177 |
+
f"Expected shape={expected_shape} "
|
| 178 |
+
f"Received shape={np.array(element).shape}."
|
| 179 |
+
"Please provide a list of NumPy arrays of the same length."
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
return iter(zip(*dataset))
|
| 183 |
+
elif dataset_type_spec is tuple:
|
| 184 |
+
if len(dataset) == 0:
|
| 185 |
+
raise ValueError(
|
| 186 |
+
"Received an empty list dataset."
|
| 187 |
+
"Please provide a non-empty tuple of arrays."
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
expected_shape = None
|
| 191 |
+
for i, element in enumerate(dataset):
|
| 192 |
+
if not isinstance(element, np.ndarray):
|
| 193 |
+
raise ValueError(
|
| 194 |
+
"Expected a tuple of `numpy.ndarray` objects,"
|
| 195 |
+
f"Received: {type(element)} at index {i}."
|
| 196 |
+
)
|
| 197 |
+
if expected_shape is None:
|
| 198 |
+
expected_shape = element.shape
|
| 199 |
+
elif element.shape[0] != expected_shape[0]:
|
| 200 |
+
raise ValueError(
|
| 201 |
+
"Received a tuple of NumPy arrays with different lengths."
|
| 202 |
+
f"Mismatch found at index {i}, "
|
| 203 |
+
f"Expected shape={expected_shape} "
|
| 204 |
+
f"Received shape={np.array(element).shape}."
|
| 205 |
+
"Please provide a tuple of NumPy arrays of the same length."
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
return iter(zip(*dataset))
|
| 209 |
+
elif dataset_type_spec is tf.data.Dataset:
|
| 210 |
+
if is_batched(dataset):
|
| 211 |
+
dataset = dataset.unbatch()
|
| 212 |
+
return iter(dataset)
|
| 213 |
+
|
| 214 |
+
elif is_torch_dataset(dataset):
|
| 215 |
+
return iter(dataset)
|
| 216 |
+
elif dataset_type_spec is np.ndarray:
|
| 217 |
+
return iter(dataset)
|
| 218 |
+
raise ValueError(f"Invalid dataset_type_spec: {dataset_type_spec}")
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def _get_next_sample(
|
| 222 |
+
dataset_iterator,
|
| 223 |
+
ensure_shape_similarity,
|
| 224 |
+
data_size_warning_flag,
|
| 225 |
+
start_time,
|
| 226 |
+
):
|
| 227 |
+
"""Yield data samples from the `dataset_iterator`.
|
| 228 |
+
|
| 229 |
+
Args:
|
| 230 |
+
dataset_iterator: An `iterator` object.
|
| 231 |
+
ensure_shape_similarity: If set to `True`, the shape of
|
| 232 |
+
the first sample will be used to validate the shape of rest of the
|
| 233 |
+
samples. Defaults to `True`.
|
| 234 |
+
data_size_warning_flag: If set to `True`, a warning will
|
| 235 |
+
be issued if the dataset takes longer than 10 seconds to iterate.
|
| 236 |
+
Defaults to `True`.
|
| 237 |
+
start_time (float): the start time of the dataset iteration. this is
|
| 238 |
+
used only if `data_size_warning_flag` is set to true.
|
| 239 |
+
|
| 240 |
+
Yields:
|
| 241 |
+
data_sample: The next sample.
|
| 242 |
+
"""
|
| 243 |
+
from keras.src.trainers.data_adapters.data_adapter_utils import (
|
| 244 |
+
is_torch_tensor,
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
try:
|
| 248 |
+
dataset_iterator = iter(dataset_iterator)
|
| 249 |
+
first_sample = next(dataset_iterator)
|
| 250 |
+
if isinstance(first_sample, (tf.Tensor, np.ndarray)) or is_torch_tensor(
|
| 251 |
+
first_sample
|
| 252 |
+
):
|
| 253 |
+
first_sample_shape = np.array(first_sample).shape
|
| 254 |
+
else:
|
| 255 |
+
first_sample_shape = None
|
| 256 |
+
ensure_shape_similarity = False
|
| 257 |
+
yield first_sample
|
| 258 |
+
except StopIteration:
|
| 259 |
+
raise ValueError(
|
| 260 |
+
"Received an empty dataset. Argument `dataset` must "
|
| 261 |
+
"be a non-empty list/tuple of `numpy.ndarray` objects "
|
| 262 |
+
"or `tf.data.Dataset` objects."
|
| 263 |
+
)
|
| 264 |
+
|
| 265 |
+
for i, sample in enumerate(dataset_iterator):
|
| 266 |
+
if ensure_shape_similarity:
|
| 267 |
+
if first_sample_shape != np.array(sample).shape:
|
| 268 |
+
raise ValueError(
|
| 269 |
+
"All `dataset` samples must have same shape, "
|
| 270 |
+
f"Expected shape: {np.array(first_sample).shape} "
|
| 271 |
+
f"Received shape: {np.array(sample).shape} at index "
|
| 272 |
+
f"{i}."
|
| 273 |
+
)
|
| 274 |
+
if data_size_warning_flag:
|
| 275 |
+
if i % 10 == 0:
|
| 276 |
+
cur_time = time.time()
|
| 277 |
+
# warns user if the dataset is too large to iterate within 10s
|
| 278 |
+
if int(cur_time - start_time) > 10 and data_size_warning_flag:
|
| 279 |
+
warnings.warn(
|
| 280 |
+
"The dataset is taking longer than 10 seconds to "
|
| 281 |
+
"iterate over. This may be due to the size of the "
|
| 282 |
+
"dataset. Keep in mind that the `split_dataset` "
|
| 283 |
+
"utility is only for small in-memory dataset "
|
| 284 |
+
"(e.g. < 10,000 samples).",
|
| 285 |
+
category=ResourceWarning,
|
| 286 |
+
source="split_dataset",
|
| 287 |
+
)
|
| 288 |
+
data_size_warning_flag = False
|
| 289 |
+
yield sample
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
def is_torch_dataset(dataset):
|
| 293 |
+
if hasattr(dataset, "__class__"):
|
| 294 |
+
for parent in dataset.__class__.__mro__:
|
| 295 |
+
if parent.__name__ == "Dataset" and str(
|
| 296 |
+
parent.__module__
|
| 297 |
+
).startswith("torch.utils.data"):
|
| 298 |
+
return True
|
| 299 |
+
return False
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
def _rescale_dataset_split_sizes(left_size, right_size, total_length):
|
| 303 |
+
"""Rescale the dataset split sizes.
|
| 304 |
+
|
| 305 |
+
We want to ensure that the sum of
|
| 306 |
+
the split sizes is equal to the total length of the dataset.
|
| 307 |
+
|
| 308 |
+
Args:
|
| 309 |
+
left_size: The size of the left dataset split.
|
| 310 |
+
right_size: The size of the right dataset split.
|
| 311 |
+
total_length: The total length of the dataset.
|
| 312 |
+
|
| 313 |
+
Returns:
|
| 314 |
+
tuple: A tuple of rescaled `left_size` and `right_size` integers.
|
| 315 |
+
"""
|
| 316 |
+
left_size_type = type(left_size)
|
| 317 |
+
right_size_type = type(right_size)
|
| 318 |
+
|
| 319 |
+
# check both left_size and right_size are integers or floats
|
| 320 |
+
if (left_size is not None and left_size_type not in [int, float]) and (
|
| 321 |
+
right_size is not None and right_size_type not in [int, float]
|
| 322 |
+
):
|
| 323 |
+
raise TypeError(
|
| 324 |
+
"Invalid `left_size` and `right_size` Types. Expected: "
|
| 325 |
+
"integer or float or None, Received: type(left_size)="
|
| 326 |
+
f"{left_size_type} and type(right_size)={right_size_type}"
|
| 327 |
+
)
|
| 328 |
+
|
| 329 |
+
# check left_size is a integer or float
|
| 330 |
+
if left_size is not None and left_size_type not in [int, float]:
|
| 331 |
+
raise TypeError(
|
| 332 |
+
"Invalid `left_size` Type. Expected: int or float or None, "
|
| 333 |
+
f"Received: type(left_size)={left_size_type}. "
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
# check right_size is a integer or float
|
| 337 |
+
if right_size is not None and right_size_type not in [int, float]:
|
| 338 |
+
raise TypeError(
|
| 339 |
+
"Invalid `right_size` Type. "
|
| 340 |
+
"Expected: int or float or None,"
|
| 341 |
+
f"Received: type(right_size)={right_size_type}."
|
| 342 |
+
)
|
| 343 |
+
|
| 344 |
+
# check left_size and right_size are non-zero
|
| 345 |
+
if left_size == 0 and right_size == 0:
|
| 346 |
+
raise ValueError(
|
| 347 |
+
"Both `left_size` and `right_size` are zero. "
|
| 348 |
+
"At least one of the split sizes must be non-zero."
|
| 349 |
+
)
|
| 350 |
+
|
| 351 |
+
# check left_size is non-negative and less than 1 and less than total_length
|
| 352 |
+
if (
|
| 353 |
+
left_size_type is int
|
| 354 |
+
and (left_size <= 0 or left_size >= total_length)
|
| 355 |
+
or left_size_type is float
|
| 356 |
+
and (left_size <= 0 or left_size >= 1)
|
| 357 |
+
):
|
| 358 |
+
raise ValueError(
|
| 359 |
+
"`left_size` should be either a positive integer "
|
| 360 |
+
f"smaller than {total_length}, or a float "
|
| 361 |
+
"within the range `[0, 1]`. Received: left_size="
|
| 362 |
+
f"{left_size}"
|
| 363 |
+
)
|
| 364 |
+
|
| 365 |
+
# check right_size is non-negative and less than 1 and less than
|
| 366 |
+
# total_length
|
| 367 |
+
if (
|
| 368 |
+
right_size_type is int
|
| 369 |
+
and (right_size <= 0 or right_size >= total_length)
|
| 370 |
+
or right_size_type is float
|
| 371 |
+
and (right_size <= 0 or right_size >= 1)
|
| 372 |
+
):
|
| 373 |
+
raise ValueError(
|
| 374 |
+
"`right_size` should be either a positive integer "
|
| 375 |
+
f"and smaller than {total_length} or a float "
|
| 376 |
+
"within the range `[0, 1]`. Received: right_size="
|
| 377 |
+
f"{right_size}"
|
| 378 |
+
)
|
| 379 |
+
|
| 380 |
+
# check sum of left_size and right_size is less than or equal to
|
| 381 |
+
# total_length
|
| 382 |
+
if (
|
| 383 |
+
right_size_type is left_size_type is float
|
| 384 |
+
and right_size + left_size > 1
|
| 385 |
+
):
|
| 386 |
+
raise ValueError(
|
| 387 |
+
"The sum of `left_size` and `right_size` is greater "
|
| 388 |
+
"than 1. It must be less than or equal to 1."
|
| 389 |
+
)
|
| 390 |
+
|
| 391 |
+
if left_size_type is float:
|
| 392 |
+
left_size = round(left_size * total_length)
|
| 393 |
+
elif left_size_type is int:
|
| 394 |
+
left_size = float(left_size)
|
| 395 |
+
|
| 396 |
+
if right_size_type is float:
|
| 397 |
+
right_size = round(right_size * total_length)
|
| 398 |
+
elif right_size_type is int:
|
| 399 |
+
right_size = float(right_size)
|
| 400 |
+
|
| 401 |
+
if left_size is None:
|
| 402 |
+
left_size = total_length - right_size
|
| 403 |
+
elif right_size is None:
|
| 404 |
+
right_size = total_length - left_size
|
| 405 |
+
|
| 406 |
+
if left_size + right_size > total_length:
|
| 407 |
+
raise ValueError(
|
| 408 |
+
"The sum of `left_size` and `right_size` should "
|
| 409 |
+
"be smaller than the {total_length}. "
|
| 410 |
+
f"Received: left_size + right_size = {left_size+right_size}"
|
| 411 |
+
f"and total_length = {total_length}"
|
| 412 |
+
)
|
| 413 |
+
|
| 414 |
+
for split, side in [(left_size, "left"), (right_size, "right")]:
|
| 415 |
+
if split == 0:
|
| 416 |
+
raise ValueError(
|
| 417 |
+
f"With `dataset` of length={total_length}, `left_size`="
|
| 418 |
+
f"{left_size} and `right_size`={right_size}."
|
| 419 |
+
f"Resulting {side} side dataset split will be empty. "
|
| 420 |
+
"Adjust any of the aforementioned parameters"
|
| 421 |
+
)
|
| 422 |
+
|
| 423 |
+
left_size, right_size = int(left_size), int(right_size)
|
| 424 |
+
return left_size, right_size
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
def _restore_dataset_from_list(
|
| 428 |
+
dataset_as_list, dataset_type_spec, original_dataset
|
| 429 |
+
):
|
| 430 |
+
"""Restore the dataset from the list of arrays."""
|
| 431 |
+
if dataset_type_spec in [tuple, list, tf.data.Dataset] or is_torch_dataset(
|
| 432 |
+
original_dataset
|
| 433 |
+
):
|
| 434 |
+
# Save structure by taking the first element.
|
| 435 |
+
element_spec = dataset_as_list[0]
|
| 436 |
+
# Flatten each element.
|
| 437 |
+
dataset_as_list = [tree.flatten(sample) for sample in dataset_as_list]
|
| 438 |
+
# Combine respective elements at all indices.
|
| 439 |
+
dataset_as_list = [np.array(sample) for sample in zip(*dataset_as_list)]
|
| 440 |
+
# Recreate the original structure of elements.
|
| 441 |
+
dataset_as_list = tree.pack_sequence_as(element_spec, dataset_as_list)
|
| 442 |
+
# Turn lists to tuples as tf.data will fail on lists.
|
| 443 |
+
return tree.traverse(
|
| 444 |
+
lambda x: tuple(x) if isinstance(x, list) else x,
|
| 445 |
+
dataset_as_list,
|
| 446 |
+
top_down=False,
|
| 447 |
+
)
|
| 448 |
+
|
| 449 |
+
return dataset_as_list
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
def is_batched(dataset):
|
| 453 |
+
"""Check if the `tf.data.Dataset` is batched."""
|
| 454 |
+
return hasattr(dataset, "_batch_size")
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
def get_batch_size(dataset):
|
| 458 |
+
"""Get the batch size of the dataset."""
|
| 459 |
+
if is_batched(dataset):
|
| 460 |
+
return dataset._batch_size
|
| 461 |
+
else:
|
| 462 |
+
return None
|
| 463 |
+
|
| 464 |
+
|
| 465 |
+
def _get_type_spec(dataset):
|
| 466 |
+
"""Get the type spec of the dataset."""
|
| 467 |
+
if isinstance(dataset, tuple):
|
| 468 |
+
return tuple
|
| 469 |
+
elif isinstance(dataset, list):
|
| 470 |
+
return list
|
| 471 |
+
elif isinstance(dataset, np.ndarray):
|
| 472 |
+
return np.ndarray
|
| 473 |
+
elif isinstance(dataset, tf.data.Dataset):
|
| 474 |
+
return tf.data.Dataset
|
| 475 |
+
elif is_torch_dataset(dataset):
|
| 476 |
+
from torch.utils.data import Dataset as TorchDataset
|
| 477 |
+
|
| 478 |
+
return TorchDataset
|
| 479 |
+
else:
|
| 480 |
+
return None
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
def index_directory(
|
| 484 |
+
directory,
|
| 485 |
+
labels,
|
| 486 |
+
formats,
|
| 487 |
+
class_names=None,
|
| 488 |
+
shuffle=True,
|
| 489 |
+
seed=None,
|
| 490 |
+
follow_links=False,
|
| 491 |
+
verbose=True,
|
| 492 |
+
):
|
| 493 |
+
"""List all files in `directory`, with their labels.
|
| 494 |
+
|
| 495 |
+
Args:
|
| 496 |
+
directory: Directory where the data is located.
|
| 497 |
+
If `labels` is `"inferred"`, it should contain
|
| 498 |
+
subdirectories, each containing files for a class.
|
| 499 |
+
Otherwise, the directory structure is ignored.
|
| 500 |
+
labels: Either `"inferred"`
|
| 501 |
+
(labels are generated from the directory structure),
|
| 502 |
+
`None` (no labels),
|
| 503 |
+
or a list/tuple of integer labels of the same size as the number
|
| 504 |
+
of valid files found in the directory.
|
| 505 |
+
Labels should be sorted according
|
| 506 |
+
to the alphanumeric order of the image file paths
|
| 507 |
+
(obtained via `os.walk(directory)` in Python).
|
| 508 |
+
formats: Allowlist of file extensions to index
|
| 509 |
+
(e.g. `".jpg"`, `".txt"`).
|
| 510 |
+
class_names: Only valid if `labels="inferred"`. This is the explicit
|
| 511 |
+
list of class names (must match names of subdirectories). Used
|
| 512 |
+
to control the order of the classes
|
| 513 |
+
(otherwise alphanumerical order is used).
|
| 514 |
+
shuffle: Whether to shuffle the data. Defaults to `True`.
|
| 515 |
+
If set to `False`, sorts the data in alphanumeric order.
|
| 516 |
+
seed: Optional random seed for shuffling.
|
| 517 |
+
follow_links: Whether to visits subdirectories pointed to by symlinks.
|
| 518 |
+
verbose: Whether the function prints number of files found and classes.
|
| 519 |
+
Defaults to `True`.
|
| 520 |
+
|
| 521 |
+
Returns:
|
| 522 |
+
tuple (file_paths, labels, class_names).
|
| 523 |
+
- file_paths: list of file paths (strings).
|
| 524 |
+
- labels: list of matching integer labels (same length as file_paths)
|
| 525 |
+
- class_names: names of the classes corresponding to these labels, in
|
| 526 |
+
order.
|
| 527 |
+
"""
|
| 528 |
+
if labels == "inferred":
|
| 529 |
+
subdirs = []
|
| 530 |
+
for subdir in sorted(tf.io.gfile.listdir(directory)):
|
| 531 |
+
if tf.io.gfile.isdir(tf.io.gfile.join(directory, subdir)):
|
| 532 |
+
if not subdir.startswith("."):
|
| 533 |
+
if subdir.endswith("/"):
|
| 534 |
+
subdir = subdir[:-1]
|
| 535 |
+
subdirs.append(subdir)
|
| 536 |
+
if class_names is not None:
|
| 537 |
+
if not set(class_names).issubset(set(subdirs)):
|
| 538 |
+
raise ValueError(
|
| 539 |
+
"The `class_names` passed did not match the "
|
| 540 |
+
"names of the subdirectories of the target directory. "
|
| 541 |
+
f"Expected: {subdirs} (or a subset of it), "
|
| 542 |
+
f"but received: class_names={class_names}"
|
| 543 |
+
)
|
| 544 |
+
subdirs = class_names # Keep provided order.
|
| 545 |
+
else:
|
| 546 |
+
# In the explicit/no-label cases, index from the parent directory down.
|
| 547 |
+
subdirs = [""]
|
| 548 |
+
if class_names is not None:
|
| 549 |
+
if labels is None:
|
| 550 |
+
raise ValueError(
|
| 551 |
+
"When `labels=None` (no labels), argument `class_names` "
|
| 552 |
+
"cannot be specified."
|
| 553 |
+
)
|
| 554 |
+
else:
|
| 555 |
+
raise ValueError(
|
| 556 |
+
"When argument `labels` is specified, argument "
|
| 557 |
+
"`class_names` cannot be specified (the `class_names` "
|
| 558 |
+
"will be the sorted list of labels)."
|
| 559 |
+
)
|
| 560 |
+
class_names = subdirs
|
| 561 |
+
class_indices = dict(zip(class_names, range(len(class_names))))
|
| 562 |
+
|
| 563 |
+
# Build an index of the files
|
| 564 |
+
# in the different class subfolders.
|
| 565 |
+
pool = ThreadPool()
|
| 566 |
+
results = []
|
| 567 |
+
filenames = []
|
| 568 |
+
|
| 569 |
+
for dirpath in (tf.io.gfile.join(directory, subdir) for subdir in subdirs):
|
| 570 |
+
results.append(
|
| 571 |
+
pool.apply_async(
|
| 572 |
+
index_subdirectory,
|
| 573 |
+
(dirpath, class_indices, follow_links, formats),
|
| 574 |
+
)
|
| 575 |
+
)
|
| 576 |
+
labels_list = []
|
| 577 |
+
for res in results:
|
| 578 |
+
partial_filenames, partial_labels = res.get()
|
| 579 |
+
labels_list.append(partial_labels)
|
| 580 |
+
filenames += partial_filenames
|
| 581 |
+
|
| 582 |
+
if labels == "inferred":
|
| 583 |
+
# Inferred labels.
|
| 584 |
+
i = 0
|
| 585 |
+
labels = np.zeros((len(filenames),), dtype="int32")
|
| 586 |
+
for partial_labels in labels_list:
|
| 587 |
+
labels[i : i + len(partial_labels)] = partial_labels
|
| 588 |
+
i += len(partial_labels)
|
| 589 |
+
elif labels is None:
|
| 590 |
+
class_names = None
|
| 591 |
+
else:
|
| 592 |
+
# Manual labels.
|
| 593 |
+
if len(labels) != len(filenames):
|
| 594 |
+
raise ValueError(
|
| 595 |
+
"Expected the lengths of `labels` to match the number "
|
| 596 |
+
"of files in the target directory. len(labels) is "
|
| 597 |
+
f"{len(labels)} while we found {len(filenames)} files "
|
| 598 |
+
f"in directory {directory}."
|
| 599 |
+
)
|
| 600 |
+
class_names = [str(label) for label in sorted(set(labels))]
|
| 601 |
+
if verbose:
|
| 602 |
+
if labels is None:
|
| 603 |
+
io_utils.print_msg(f"Found {len(filenames)} files.")
|
| 604 |
+
else:
|
| 605 |
+
io_utils.print_msg(
|
| 606 |
+
f"Found {len(filenames)} files belonging "
|
| 607 |
+
f"to {len(class_names)} classes."
|
| 608 |
+
)
|
| 609 |
+
pool.close()
|
| 610 |
+
pool.join()
|
| 611 |
+
file_paths = [tf.io.gfile.join(directory, fname) for fname in filenames]
|
| 612 |
+
|
| 613 |
+
if shuffle:
|
| 614 |
+
# Shuffle globally to erase macro-structure
|
| 615 |
+
if seed is None:
|
| 616 |
+
seed = np.random.randint(1e6)
|
| 617 |
+
rng = np.random.RandomState(seed)
|
| 618 |
+
rng.shuffle(file_paths)
|
| 619 |
+
if labels is not None:
|
| 620 |
+
rng = np.random.RandomState(seed)
|
| 621 |
+
rng.shuffle(labels)
|
| 622 |
+
return file_paths, labels, class_names
|
| 623 |
+
|
| 624 |
+
|
| 625 |
+
def iter_valid_files(directory, follow_links, formats):
|
| 626 |
+
if not follow_links:
|
| 627 |
+
walk = tf.io.gfile.walk(directory)
|
| 628 |
+
else:
|
| 629 |
+
walk = os.walk(directory, followlinks=follow_links)
|
| 630 |
+
for root, _, files in sorted(walk, key=lambda x: x[0]):
|
| 631 |
+
for fname in sorted(files):
|
| 632 |
+
if fname.lower().endswith(formats):
|
| 633 |
+
yield root, fname
|
| 634 |
+
|
| 635 |
+
|
| 636 |
+
def index_subdirectory(directory, class_indices, follow_links, formats):
|
| 637 |
+
"""Recursively walks directory and list image paths and their class index.
|
| 638 |
+
|
| 639 |
+
Args:
|
| 640 |
+
directory: string, target directory.
|
| 641 |
+
class_indices: dict mapping class names to their index.
|
| 642 |
+
follow_links: boolean, whether to recursively follow subdirectories
|
| 643 |
+
(if False, we only list top-level images in `directory`).
|
| 644 |
+
formats: Allowlist of file extensions to index (e.g. ".jpg", ".txt").
|
| 645 |
+
|
| 646 |
+
Returns:
|
| 647 |
+
tuple `(filenames, labels)`. `filenames` is a list of relative file
|
| 648 |
+
paths, and `labels` is a list of integer labels corresponding
|
| 649 |
+
to these files.
|
| 650 |
+
"""
|
| 651 |
+
dirname = os.path.basename(directory)
|
| 652 |
+
valid_files = iter_valid_files(directory, follow_links, formats)
|
| 653 |
+
labels = []
|
| 654 |
+
filenames = []
|
| 655 |
+
for root, fname in valid_files:
|
| 656 |
+
labels.append(class_indices[dirname])
|
| 657 |
+
absolute_path = tf.io.gfile.join(root, fname)
|
| 658 |
+
relative_path = tf.io.gfile.join(
|
| 659 |
+
dirname, os.path.relpath(absolute_path, directory)
|
| 660 |
+
)
|
| 661 |
+
filenames.append(relative_path)
|
| 662 |
+
return filenames, labels
|
| 663 |
+
|
| 664 |
+
|
| 665 |
+
def get_training_or_validation_split(samples, labels, validation_split, subset):
|
| 666 |
+
"""Potentially restrict samples & labels to a training or validation split.
|
| 667 |
+
|
| 668 |
+
Args:
|
| 669 |
+
samples: List of elements.
|
| 670 |
+
labels: List of corresponding labels.
|
| 671 |
+
validation_split: Float, fraction of data to reserve for validation.
|
| 672 |
+
subset: Subset of the data to return.
|
| 673 |
+
Either `"training"`, `"validation"`, or `None`.
|
| 674 |
+
If `None`, we return all of the data.
|
| 675 |
+
|
| 676 |
+
Returns:
|
| 677 |
+
tuple (samples, labels), potentially restricted to the specified subset.
|
| 678 |
+
"""
|
| 679 |
+
if not validation_split:
|
| 680 |
+
return samples, labels
|
| 681 |
+
|
| 682 |
+
num_val_samples = int(validation_split * len(samples))
|
| 683 |
+
if subset == "training":
|
| 684 |
+
io_utils.print_msg(
|
| 685 |
+
f"Using {len(samples) - num_val_samples} " f"files for training."
|
| 686 |
+
)
|
| 687 |
+
samples = samples[:-num_val_samples]
|
| 688 |
+
if labels is not None:
|
| 689 |
+
labels = labels[:-num_val_samples]
|
| 690 |
+
elif subset == "validation":
|
| 691 |
+
io_utils.print_msg(f"Using {num_val_samples} files for validation.")
|
| 692 |
+
samples = samples[-num_val_samples:]
|
| 693 |
+
if labels is not None:
|
| 694 |
+
labels = labels[-num_val_samples:]
|
| 695 |
+
else:
|
| 696 |
+
raise ValueError(
|
| 697 |
+
'`subset` must be either "training" '
|
| 698 |
+
f'or "validation", received: {subset}'
|
| 699 |
+
)
|
| 700 |
+
return samples, labels
|
| 701 |
+
|
| 702 |
+
|
| 703 |
+
def labels_to_dataset(labels, label_mode, num_classes):
|
| 704 |
+
"""Create a `tf.data.Dataset` from the list/tuple of labels.
|
| 705 |
+
|
| 706 |
+
Args:
|
| 707 |
+
labels: list/tuple of labels to be converted into a `tf.data.Dataset`.
|
| 708 |
+
label_mode: String describing the encoding of `labels`. Options are:
|
| 709 |
+
- `"binary"` indicates that the labels (there can be only 2) are encoded
|
| 710 |
+
as `float32` scalars with values 0 or 1
|
| 711 |
+
(e.g. for `binary_crossentropy`).
|
| 712 |
+
- `"categorical"` means that the labels are mapped into a categorical
|
| 713 |
+
vector. (e.g. for `categorical_crossentropy` loss).
|
| 714 |
+
num_classes: number of classes of labels.
|
| 715 |
+
|
| 716 |
+
Returns:
|
| 717 |
+
A `tf.data.Dataset` instance.
|
| 718 |
+
"""
|
| 719 |
+
label_ds = tf.data.Dataset.from_tensor_slices(labels)
|
| 720 |
+
if label_mode == "binary":
|
| 721 |
+
label_ds = label_ds.map(
|
| 722 |
+
lambda x: tf.expand_dims(tf.cast(x, "float32"), axis=-1),
|
| 723 |
+
num_parallel_calls=tf.data.AUTOTUNE,
|
| 724 |
+
)
|
| 725 |
+
elif label_mode == "categorical":
|
| 726 |
+
label_ds = label_ds.map(
|
| 727 |
+
lambda x: tf.one_hot(x, num_classes),
|
| 728 |
+
num_parallel_calls=tf.data.AUTOTUNE,
|
| 729 |
+
)
|
| 730 |
+
return label_ds
|
| 731 |
+
|
| 732 |
+
|
| 733 |
+
def check_validation_split_arg(validation_split, subset, shuffle, seed):
|
| 734 |
+
"""Raise errors in case of invalid argument values.
|
| 735 |
+
|
| 736 |
+
Args:
|
| 737 |
+
validation_split: float between 0 and 1, fraction of data to reserve for
|
| 738 |
+
validation.
|
| 739 |
+
subset: One of `"training"`, `"validation"`, or `"both"`. Only used if
|
| 740 |
+
`validation_split` is set.
|
| 741 |
+
shuffle: Whether to shuffle the data. Either `True` or `False`.
|
| 742 |
+
seed: random seed for shuffling and transformations.
|
| 743 |
+
"""
|
| 744 |
+
if validation_split and not 0 < validation_split < 1:
|
| 745 |
+
raise ValueError(
|
| 746 |
+
"`validation_split` must be between 0 and 1, "
|
| 747 |
+
f"received: {validation_split}"
|
| 748 |
+
)
|
| 749 |
+
if (validation_split or subset) and not (validation_split and subset):
|
| 750 |
+
raise ValueError(
|
| 751 |
+
"If `subset` is set, `validation_split` must be set, and inversely."
|
| 752 |
+
)
|
| 753 |
+
if subset not in ("training", "validation", "both", None):
|
| 754 |
+
raise ValueError(
|
| 755 |
+
'`subset` must be either "training", '
|
| 756 |
+
f'"validation" or "both", received: {subset}'
|
| 757 |
+
)
|
| 758 |
+
if validation_split and shuffle and seed is None:
|
| 759 |
+
raise ValueError(
|
| 760 |
+
"If using `validation_split` and shuffling the data, you must "
|
| 761 |
+
"provide a `seed` argument, to make sure that there is no "
|
| 762 |
+
"overlap between the training and validation subset."
|
| 763 |
+
)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/dtype_utils.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src import ops
|
| 3 |
+
|
| 4 |
+
DTYPE_TO_SIZE = {
|
| 5 |
+
**{f"float{i}": i for i in (16, 32, 64)},
|
| 6 |
+
**{f"int{i}": i for i in (8, 16, 32, 64)},
|
| 7 |
+
**{f"uint{i}": i for i in (8, 16, 32, 64)},
|
| 8 |
+
"bfloat16": 16,
|
| 9 |
+
"bool": 1,
|
| 10 |
+
}
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def dtype_size(dtype):
|
| 14 |
+
size = DTYPE_TO_SIZE.get(dtype, None)
|
| 15 |
+
if size is None:
|
| 16 |
+
raise ValueError(f"Invalid dtype: {dtype}")
|
| 17 |
+
return size
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def is_float(dtype):
|
| 21 |
+
return "float" in dtype
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def cast_to_common_dtype(tensors):
|
| 25 |
+
"""Cast a list of tensors to a common dtype.
|
| 26 |
+
|
| 27 |
+
If any tensor is floating-point, they will all be casted to the most-precise
|
| 28 |
+
floating-point dtype. Otherwise the tensors are not casted.
|
| 29 |
+
|
| 30 |
+
Args:
|
| 31 |
+
tensors: A list of tensors.
|
| 32 |
+
|
| 33 |
+
Returns:
|
| 34 |
+
Same list, casted to a common dtype.
|
| 35 |
+
"""
|
| 36 |
+
highest_float = None
|
| 37 |
+
highest_float_size = (
|
| 38 |
+
-1
|
| 39 |
+
) # Initially set to an impossible value for comparison
|
| 40 |
+
for x in tensors:
|
| 41 |
+
dtype = backend.standardize_dtype(x.dtype)
|
| 42 |
+
if is_float(dtype):
|
| 43 |
+
if highest_float is None or dtype_size(dtype) > highest_float_size:
|
| 44 |
+
highest_float = dtype
|
| 45 |
+
highest_float_size = dtype_size(dtype)
|
| 46 |
+
elif dtype == "float16" and highest_float == "bfloat16":
|
| 47 |
+
highest_float = "float32"
|
| 48 |
+
highest_float_size = dtype_size(highest_float)
|
| 49 |
+
if highest_float:
|
| 50 |
+
tensors = [ops.cast(x, highest_float) for x in tensors]
|
| 51 |
+
return tensors
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/file_utils.py
ADDED
|
@@ -0,0 +1,518 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import hashlib
|
| 2 |
+
import os
|
| 3 |
+
import re
|
| 4 |
+
import shutil
|
| 5 |
+
import tarfile
|
| 6 |
+
import urllib
|
| 7 |
+
import warnings
|
| 8 |
+
import zipfile
|
| 9 |
+
from urllib.request import urlretrieve
|
| 10 |
+
|
| 11 |
+
from keras.src.api_export import keras_export
|
| 12 |
+
from keras.src.backend import config
|
| 13 |
+
from keras.src.utils import io_utils
|
| 14 |
+
from keras.src.utils.module_utils import gfile
|
| 15 |
+
from keras.src.utils.progbar import Progbar
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def path_to_string(path):
|
| 19 |
+
"""Convert `PathLike` objects to their string representation.
|
| 20 |
+
|
| 21 |
+
If given a non-string typed path object, converts it to its string
|
| 22 |
+
representation.
|
| 23 |
+
|
| 24 |
+
If the object passed to `path` is not among the above, then it is
|
| 25 |
+
returned unchanged. This allows e.g. passthrough of file objects
|
| 26 |
+
through this function.
|
| 27 |
+
|
| 28 |
+
Args:
|
| 29 |
+
path: `PathLike` object that represents a path
|
| 30 |
+
|
| 31 |
+
Returns:
|
| 32 |
+
A string representation of the path argument, if Python support exists.
|
| 33 |
+
"""
|
| 34 |
+
if isinstance(path, os.PathLike):
|
| 35 |
+
return os.fspath(path)
|
| 36 |
+
return path
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def resolve_path(path):
|
| 40 |
+
return os.path.realpath(os.path.abspath(path))
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def is_path_in_dir(path, base_dir):
|
| 44 |
+
return resolve_path(os.path.join(base_dir, path)).startswith(base_dir)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def is_link_in_dir(info, base):
|
| 48 |
+
tip = resolve_path(os.path.join(base, os.path.dirname(info.name)))
|
| 49 |
+
return is_path_in_dir(info.linkname, base_dir=tip)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def filter_safe_paths(members):
|
| 53 |
+
base_dir = resolve_path(".")
|
| 54 |
+
for finfo in members:
|
| 55 |
+
valid_path = False
|
| 56 |
+
if is_path_in_dir(finfo.name, base_dir):
|
| 57 |
+
valid_path = True
|
| 58 |
+
yield finfo
|
| 59 |
+
elif finfo.issym() or finfo.islnk():
|
| 60 |
+
if is_link_in_dir(finfo, base_dir):
|
| 61 |
+
valid_path = True
|
| 62 |
+
yield finfo
|
| 63 |
+
if not valid_path:
|
| 64 |
+
warnings.warn(
|
| 65 |
+
"Skipping invalid path during archive extraction: "
|
| 66 |
+
f"'{finfo.name}'.",
|
| 67 |
+
stacklevel=2,
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def extract_archive(file_path, path=".", archive_format="auto"):
|
| 72 |
+
"""Extracts an archive if it matches a support format.
|
| 73 |
+
|
| 74 |
+
Supports `.tar`, `.tar.gz`, `.tar.bz`, and `.zip` formats.
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
file_path: Path to the archive file.
|
| 78 |
+
path: Where to extract the archive file.
|
| 79 |
+
archive_format: Archive format to try for extracting the file.
|
| 80 |
+
Options are `"auto"`, `"tar"`, `"zip"`, and `None`.
|
| 81 |
+
`"tar"` includes `.tar`, `.tar.gz`, and `.tar.bz` files.
|
| 82 |
+
The default `"auto"` uses `["tar", "zip"]`.
|
| 83 |
+
`None` or an empty list will return no matches found.
|
| 84 |
+
|
| 85 |
+
Returns:
|
| 86 |
+
`True` if a match was found and an archive extraction was completed,
|
| 87 |
+
`False` otherwise.
|
| 88 |
+
"""
|
| 89 |
+
if archive_format is None:
|
| 90 |
+
return False
|
| 91 |
+
if archive_format == "auto":
|
| 92 |
+
archive_format = ["tar", "zip"]
|
| 93 |
+
if isinstance(archive_format, str):
|
| 94 |
+
archive_format = [archive_format]
|
| 95 |
+
|
| 96 |
+
file_path = path_to_string(file_path)
|
| 97 |
+
path = path_to_string(path)
|
| 98 |
+
|
| 99 |
+
for archive_type in archive_format:
|
| 100 |
+
if archive_type == "tar":
|
| 101 |
+
open_fn = tarfile.open
|
| 102 |
+
is_match_fn = tarfile.is_tarfile
|
| 103 |
+
elif archive_type == "zip":
|
| 104 |
+
open_fn = zipfile.ZipFile
|
| 105 |
+
is_match_fn = zipfile.is_zipfile
|
| 106 |
+
else:
|
| 107 |
+
raise NotImplementedError(archive_type)
|
| 108 |
+
|
| 109 |
+
if is_match_fn(file_path):
|
| 110 |
+
with open_fn(file_path) as archive:
|
| 111 |
+
try:
|
| 112 |
+
if zipfile.is_zipfile(file_path):
|
| 113 |
+
# Zip archive.
|
| 114 |
+
archive.extractall(path)
|
| 115 |
+
else:
|
| 116 |
+
# Tar archive, perhaps unsafe. Filter paths.
|
| 117 |
+
archive.extractall(
|
| 118 |
+
path, members=filter_safe_paths(archive)
|
| 119 |
+
)
|
| 120 |
+
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
|
| 121 |
+
if os.path.exists(path):
|
| 122 |
+
if os.path.isfile(path):
|
| 123 |
+
os.remove(path)
|
| 124 |
+
else:
|
| 125 |
+
shutil.rmtree(path)
|
| 126 |
+
raise
|
| 127 |
+
return True
|
| 128 |
+
return False
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
@keras_export("keras.utils.get_file")
|
| 132 |
+
def get_file(
|
| 133 |
+
fname=None,
|
| 134 |
+
origin=None,
|
| 135 |
+
untar=False,
|
| 136 |
+
md5_hash=None,
|
| 137 |
+
file_hash=None,
|
| 138 |
+
cache_subdir="datasets",
|
| 139 |
+
hash_algorithm="auto",
|
| 140 |
+
extract=False,
|
| 141 |
+
archive_format="auto",
|
| 142 |
+
cache_dir=None,
|
| 143 |
+
force_download=False,
|
| 144 |
+
):
|
| 145 |
+
"""Downloads a file from a URL if it not already in the cache.
|
| 146 |
+
|
| 147 |
+
By default the file at the url `origin` is downloaded to the
|
| 148 |
+
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
|
| 149 |
+
and given the filename `fname`. The final location of a file
|
| 150 |
+
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
|
| 151 |
+
Files in `.tar`, `.tar.gz`, `.tar.bz`, and `.zip` formats can
|
| 152 |
+
also be extracted.
|
| 153 |
+
|
| 154 |
+
Passing a hash will verify the file after download. The command line
|
| 155 |
+
programs `shasum` and `sha256sum` can compute the hash.
|
| 156 |
+
|
| 157 |
+
Example:
|
| 158 |
+
|
| 159 |
+
```python
|
| 160 |
+
path_to_downloaded_file = get_file(
|
| 161 |
+
origin="https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz",
|
| 162 |
+
extract=True,
|
| 163 |
+
)
|
| 164 |
+
```
|
| 165 |
+
|
| 166 |
+
Args:
|
| 167 |
+
fname: If the target is a single file, this is your desired
|
| 168 |
+
local name for the file.
|
| 169 |
+
If `None`, the name of the file at `origin` will be used.
|
| 170 |
+
If downloading and extracting a directory archive,
|
| 171 |
+
the provided `fname` will be used as extraction directory
|
| 172 |
+
name (only if it doesn't have an extension).
|
| 173 |
+
origin: Original URL of the file.
|
| 174 |
+
untar: Deprecated in favor of `extract` argument.
|
| 175 |
+
Boolean, whether the file is a tar archive that should
|
| 176 |
+
be extracted.
|
| 177 |
+
md5_hash: Deprecated in favor of `file_hash` argument.
|
| 178 |
+
md5 hash of the file for file integrity verification.
|
| 179 |
+
file_hash: The expected hash string of the file after download.
|
| 180 |
+
The sha256 and md5 hash algorithms are both supported.
|
| 181 |
+
cache_subdir: Subdirectory under the Keras cache dir where the file is
|
| 182 |
+
saved. If an absolute path, e.g. `"/path/to/folder"` is
|
| 183 |
+
specified, the file will be saved at that location.
|
| 184 |
+
hash_algorithm: Select the hash algorithm to verify the file.
|
| 185 |
+
options are `"md5'`, `"sha256'`, and `"auto'`.
|
| 186 |
+
The default 'auto' detects the hash algorithm in use.
|
| 187 |
+
extract: If `True`, extracts the archive. Only applicable to compressed
|
| 188 |
+
archive files like tar or zip.
|
| 189 |
+
archive_format: Archive format to try for extracting the file.
|
| 190 |
+
Options are `"auto'`, `"tar'`, `"zip'`, and `None`.
|
| 191 |
+
`"tar"` includes tar, tar.gz, and tar.bz files.
|
| 192 |
+
The default `"auto"` corresponds to `["tar", "zip"]`.
|
| 193 |
+
None or an empty list will return no matches found.
|
| 194 |
+
cache_dir: Location to store cached files, when None it
|
| 195 |
+
defaults ether `$KERAS_HOME` if the `KERAS_HOME` environment
|
| 196 |
+
variable is set or `~/.keras/`.
|
| 197 |
+
force_download: If `True`, the file will always be re-downloaded
|
| 198 |
+
regardless of the cache state.
|
| 199 |
+
|
| 200 |
+
Returns:
|
| 201 |
+
Path to the downloaded file.
|
| 202 |
+
|
| 203 |
+
**⚠️ Warning on malicious downloads ⚠️**
|
| 204 |
+
|
| 205 |
+
Downloading something from the Internet carries a risk.
|
| 206 |
+
NEVER download a file/archive if you do not trust the source.
|
| 207 |
+
We recommend that you specify the `file_hash` argument
|
| 208 |
+
(if the hash of the source file is known) to make sure that the file you
|
| 209 |
+
are getting is the one you expect.
|
| 210 |
+
"""
|
| 211 |
+
if origin is None:
|
| 212 |
+
raise ValueError(
|
| 213 |
+
'Please specify the "origin" argument (URL of the file '
|
| 214 |
+
"to download)."
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
if cache_dir is None:
|
| 218 |
+
cache_dir = config.keras_home()
|
| 219 |
+
if md5_hash is not None and file_hash is None:
|
| 220 |
+
file_hash = md5_hash
|
| 221 |
+
hash_algorithm = "md5"
|
| 222 |
+
datadir_base = os.path.expanduser(cache_dir)
|
| 223 |
+
if not os.access(datadir_base, os.W_OK):
|
| 224 |
+
datadir_base = os.path.join("/tmp", ".keras")
|
| 225 |
+
datadir = os.path.join(datadir_base, cache_subdir)
|
| 226 |
+
os.makedirs(datadir, exist_ok=True)
|
| 227 |
+
|
| 228 |
+
provided_fname = fname
|
| 229 |
+
fname = path_to_string(fname)
|
| 230 |
+
|
| 231 |
+
if not fname:
|
| 232 |
+
fname = os.path.basename(urllib.parse.urlsplit(origin).path)
|
| 233 |
+
if not fname:
|
| 234 |
+
raise ValueError(
|
| 235 |
+
"Can't parse the file name from the origin provided: "
|
| 236 |
+
f"'{origin}'."
|
| 237 |
+
"Please specify the `fname` argument."
|
| 238 |
+
)
|
| 239 |
+
else:
|
| 240 |
+
if os.sep in fname:
|
| 241 |
+
raise ValueError(
|
| 242 |
+
"Paths are no longer accepted as the `fname` argument. "
|
| 243 |
+
"To specify the file's parent directory, use "
|
| 244 |
+
f"the `cache_dir` argument. Received: fname={fname}"
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
if extract or untar:
|
| 248 |
+
if provided_fname:
|
| 249 |
+
if "." in fname:
|
| 250 |
+
download_target = os.path.join(datadir, fname)
|
| 251 |
+
fname = fname[: fname.find(".")]
|
| 252 |
+
extraction_dir = os.path.join(datadir, fname + "_extracted")
|
| 253 |
+
else:
|
| 254 |
+
extraction_dir = os.path.join(datadir, fname)
|
| 255 |
+
download_target = os.path.join(datadir, fname + "_archive")
|
| 256 |
+
else:
|
| 257 |
+
extraction_dir = os.path.join(datadir, fname)
|
| 258 |
+
download_target = os.path.join(datadir, fname + "_archive")
|
| 259 |
+
else:
|
| 260 |
+
download_target = os.path.join(datadir, fname)
|
| 261 |
+
|
| 262 |
+
if force_download:
|
| 263 |
+
download = True
|
| 264 |
+
elif os.path.exists(download_target):
|
| 265 |
+
# File found in cache.
|
| 266 |
+
download = False
|
| 267 |
+
# Verify integrity if a hash was provided.
|
| 268 |
+
if file_hash is not None:
|
| 269 |
+
if not validate_file(
|
| 270 |
+
download_target, file_hash, algorithm=hash_algorithm
|
| 271 |
+
):
|
| 272 |
+
io_utils.print_msg(
|
| 273 |
+
"A local file was found, but it seems to be "
|
| 274 |
+
f"incomplete or outdated because the {hash_algorithm} "
|
| 275 |
+
"file hash does not match the original value of "
|
| 276 |
+
f"{file_hash} so we will re-download the data."
|
| 277 |
+
)
|
| 278 |
+
download = True
|
| 279 |
+
else:
|
| 280 |
+
download = True
|
| 281 |
+
|
| 282 |
+
if download:
|
| 283 |
+
io_utils.print_msg(f"Downloading data from {origin}")
|
| 284 |
+
|
| 285 |
+
class DLProgbar:
|
| 286 |
+
"""Manage progress bar state for use in urlretrieve."""
|
| 287 |
+
|
| 288 |
+
def __init__(self):
|
| 289 |
+
self.progbar = None
|
| 290 |
+
self.finished = False
|
| 291 |
+
|
| 292 |
+
def __call__(self, block_num, block_size, total_size):
|
| 293 |
+
if total_size == -1:
|
| 294 |
+
total_size = None
|
| 295 |
+
if not self.progbar:
|
| 296 |
+
self.progbar = Progbar(total_size)
|
| 297 |
+
current = block_num * block_size
|
| 298 |
+
|
| 299 |
+
if total_size is None:
|
| 300 |
+
self.progbar.update(current)
|
| 301 |
+
else:
|
| 302 |
+
if current < total_size:
|
| 303 |
+
self.progbar.update(current)
|
| 304 |
+
elif not self.finished:
|
| 305 |
+
self.progbar.update(self.progbar.target)
|
| 306 |
+
self.finished = True
|
| 307 |
+
|
| 308 |
+
error_msg = "URL fetch failure on {}: {} -- {}"
|
| 309 |
+
try:
|
| 310 |
+
try:
|
| 311 |
+
urlretrieve(origin, download_target, DLProgbar())
|
| 312 |
+
except urllib.error.HTTPError as e:
|
| 313 |
+
raise Exception(error_msg.format(origin, e.code, e.msg))
|
| 314 |
+
except urllib.error.URLError as e:
|
| 315 |
+
raise Exception(error_msg.format(origin, e.errno, e.reason))
|
| 316 |
+
except (Exception, KeyboardInterrupt):
|
| 317 |
+
if os.path.exists(download_target):
|
| 318 |
+
os.remove(download_target)
|
| 319 |
+
raise
|
| 320 |
+
|
| 321 |
+
# Validate download if succeeded and user provided an expected hash
|
| 322 |
+
# Security conscious users would get the hash of the file from a
|
| 323 |
+
# separate channel and pass it to this API to prevent MITM / corruption:
|
| 324 |
+
if os.path.exists(download_target) and file_hash is not None:
|
| 325 |
+
if not validate_file(
|
| 326 |
+
download_target, file_hash, algorithm=hash_algorithm
|
| 327 |
+
):
|
| 328 |
+
raise ValueError(
|
| 329 |
+
"Incomplete or corrupted file detected. "
|
| 330 |
+
f"The {hash_algorithm} "
|
| 331 |
+
"file hash does not match the provided value "
|
| 332 |
+
f"of {file_hash}."
|
| 333 |
+
)
|
| 334 |
+
|
| 335 |
+
if extract or untar:
|
| 336 |
+
if untar:
|
| 337 |
+
archive_format = "tar"
|
| 338 |
+
|
| 339 |
+
status = extract_archive(
|
| 340 |
+
download_target, extraction_dir, archive_format
|
| 341 |
+
)
|
| 342 |
+
if not status:
|
| 343 |
+
warnings.warn("Could not extract archive.", stacklevel=2)
|
| 344 |
+
return extraction_dir
|
| 345 |
+
|
| 346 |
+
return download_target
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
def resolve_hasher(algorithm, file_hash=None):
|
| 350 |
+
"""Returns hash algorithm as hashlib function."""
|
| 351 |
+
if algorithm == "sha256":
|
| 352 |
+
return hashlib.sha256()
|
| 353 |
+
|
| 354 |
+
if algorithm == "auto" and file_hash is not None and len(file_hash) == 64:
|
| 355 |
+
return hashlib.sha256()
|
| 356 |
+
|
| 357 |
+
# This is used only for legacy purposes.
|
| 358 |
+
return hashlib.md5()
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
def hash_file(fpath, algorithm="sha256", chunk_size=65535):
|
| 362 |
+
"""Calculates a file sha256 or md5 hash.
|
| 363 |
+
|
| 364 |
+
Example:
|
| 365 |
+
|
| 366 |
+
>>> hash_file('/path/to/file.zip')
|
| 367 |
+
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
|
| 368 |
+
|
| 369 |
+
Args:
|
| 370 |
+
fpath: Path to the file being validated.
|
| 371 |
+
algorithm: Hash algorithm, one of `"auto"`, `"sha256"`, or `"md5"`.
|
| 372 |
+
The default `"auto"` detects the hash algorithm in use.
|
| 373 |
+
chunk_size: Bytes to read at a time, important for large files.
|
| 374 |
+
|
| 375 |
+
Returns:
|
| 376 |
+
The file hash.
|
| 377 |
+
"""
|
| 378 |
+
if isinstance(algorithm, str):
|
| 379 |
+
hasher = resolve_hasher(algorithm)
|
| 380 |
+
else:
|
| 381 |
+
hasher = algorithm
|
| 382 |
+
|
| 383 |
+
with open(fpath, "rb") as fpath_file:
|
| 384 |
+
for chunk in iter(lambda: fpath_file.read(chunk_size), b""):
|
| 385 |
+
hasher.update(chunk)
|
| 386 |
+
|
| 387 |
+
return hasher.hexdigest()
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
def validate_file(fpath, file_hash, algorithm="auto", chunk_size=65535):
|
| 391 |
+
"""Validates a file against a sha256 or md5 hash.
|
| 392 |
+
|
| 393 |
+
Args:
|
| 394 |
+
fpath: path to the file being validated
|
| 395 |
+
file_hash: The expected hash string of the file.
|
| 396 |
+
The sha256 and md5 hash algorithms are both supported.
|
| 397 |
+
algorithm: Hash algorithm, one of `"auto"`, `"sha256"`, or `"md5"`.
|
| 398 |
+
The default `"auto"` detects the hash algorithm in use.
|
| 399 |
+
chunk_size: Bytes to read at a time, important for large files.
|
| 400 |
+
|
| 401 |
+
Returns:
|
| 402 |
+
Boolean, whether the file is valid.
|
| 403 |
+
"""
|
| 404 |
+
hasher = resolve_hasher(algorithm, file_hash)
|
| 405 |
+
|
| 406 |
+
if str(hash_file(fpath, hasher, chunk_size)) == str(file_hash):
|
| 407 |
+
return True
|
| 408 |
+
else:
|
| 409 |
+
return False
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
def is_remote_path(filepath):
|
| 413 |
+
"""
|
| 414 |
+
Determines if a given filepath indicates a remote location.
|
| 415 |
+
|
| 416 |
+
This function checks if the filepath represents a known remote pattern
|
| 417 |
+
such as GCS (`/gcs`), CNS (`/cns`), CFS (`/cfs`), HDFS (`/hdfs`)
|
| 418 |
+
|
| 419 |
+
Args:
|
| 420 |
+
filepath (str): The path to be checked.
|
| 421 |
+
|
| 422 |
+
Returns:
|
| 423 |
+
bool: True if the filepath is a recognized remote path, otherwise False
|
| 424 |
+
"""
|
| 425 |
+
if re.match(r"^(/cns|/cfs|/gcs|/hdfs|/readahead|.*://).*$", str(filepath)):
|
| 426 |
+
return True
|
| 427 |
+
return False
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
# Below are gfile-replacement utils.
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
def _raise_if_no_gfile(path):
|
| 434 |
+
raise ValueError(
|
| 435 |
+
"Handling remote paths requires installing TensorFlow "
|
| 436 |
+
f"(in order to use gfile). Received path: {path}"
|
| 437 |
+
)
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
def exists(path):
|
| 441 |
+
if is_remote_path(path):
|
| 442 |
+
if gfile.available:
|
| 443 |
+
return gfile.exists(path)
|
| 444 |
+
else:
|
| 445 |
+
_raise_if_no_gfile(path)
|
| 446 |
+
return os.path.exists(path)
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
def File(path, mode="r"):
|
| 450 |
+
if is_remote_path(path):
|
| 451 |
+
if gfile.available:
|
| 452 |
+
return gfile.GFile(path, mode=mode)
|
| 453 |
+
else:
|
| 454 |
+
_raise_if_no_gfile(path)
|
| 455 |
+
return open(path, mode=mode)
|
| 456 |
+
|
| 457 |
+
|
| 458 |
+
def join(path, *paths):
|
| 459 |
+
if is_remote_path(path):
|
| 460 |
+
if gfile.available:
|
| 461 |
+
return gfile.join(path, *paths)
|
| 462 |
+
else:
|
| 463 |
+
_raise_if_no_gfile(path)
|
| 464 |
+
return os.path.join(path, *paths)
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
def isdir(path):
|
| 468 |
+
if is_remote_path(path):
|
| 469 |
+
if gfile.available:
|
| 470 |
+
return gfile.isdir(path)
|
| 471 |
+
else:
|
| 472 |
+
_raise_if_no_gfile(path)
|
| 473 |
+
return os.path.isdir(path)
|
| 474 |
+
|
| 475 |
+
|
| 476 |
+
def remove(path):
|
| 477 |
+
if is_remote_path(path):
|
| 478 |
+
if gfile.available:
|
| 479 |
+
return gfile.remove(path)
|
| 480 |
+
else:
|
| 481 |
+
_raise_if_no_gfile(path)
|
| 482 |
+
return os.remove(path)
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
def rmtree(path):
|
| 486 |
+
if is_remote_path(path):
|
| 487 |
+
if gfile.available:
|
| 488 |
+
return gfile.rmtree(path)
|
| 489 |
+
else:
|
| 490 |
+
_raise_if_no_gfile(path)
|
| 491 |
+
return shutil.rmtree(path)
|
| 492 |
+
|
| 493 |
+
|
| 494 |
+
def listdir(path):
|
| 495 |
+
if is_remote_path(path):
|
| 496 |
+
if gfile.available:
|
| 497 |
+
return gfile.listdir(path)
|
| 498 |
+
else:
|
| 499 |
+
_raise_if_no_gfile(path)
|
| 500 |
+
return os.listdir(path)
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
def copy(src, dst):
|
| 504 |
+
if is_remote_path(src) or is_remote_path(dst):
|
| 505 |
+
if gfile.available:
|
| 506 |
+
return gfile.copy(src, dst, overwrite=True)
|
| 507 |
+
else:
|
| 508 |
+
_raise_if_no_gfile(f"src={src} dst={dst}")
|
| 509 |
+
return shutil.copy(src, dst)
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
def makedirs(path):
|
| 513 |
+
if is_remote_path(path):
|
| 514 |
+
if gfile.available:
|
| 515 |
+
return gfile.makedirs(path)
|
| 516 |
+
else:
|
| 517 |
+
_raise_if_no_gfile(path)
|
| 518 |
+
return os.makedirs(path)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/image_dataset_utils.py
ADDED
|
@@ -0,0 +1,459 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from keras.src.api_export import keras_export
|
| 4 |
+
from keras.src.backend.config import standardize_data_format
|
| 5 |
+
from keras.src.utils import dataset_utils
|
| 6 |
+
from keras.src.utils import image_utils
|
| 7 |
+
from keras.src.utils.module_utils import tensorflow as tf
|
| 8 |
+
|
| 9 |
+
ALLOWLIST_FORMATS = (".bmp", ".gif", ".jpeg", ".jpg", ".png")
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@keras_export(
|
| 13 |
+
[
|
| 14 |
+
"keras.utils.image_dataset_from_directory",
|
| 15 |
+
"keras.preprocessing.image_dataset_from_directory",
|
| 16 |
+
]
|
| 17 |
+
)
|
| 18 |
+
def image_dataset_from_directory(
|
| 19 |
+
directory,
|
| 20 |
+
labels="inferred",
|
| 21 |
+
label_mode="int",
|
| 22 |
+
class_names=None,
|
| 23 |
+
color_mode="rgb",
|
| 24 |
+
batch_size=32,
|
| 25 |
+
image_size=(256, 256),
|
| 26 |
+
shuffle=True,
|
| 27 |
+
seed=None,
|
| 28 |
+
validation_split=None,
|
| 29 |
+
subset=None,
|
| 30 |
+
interpolation="bilinear",
|
| 31 |
+
follow_links=False,
|
| 32 |
+
crop_to_aspect_ratio=False,
|
| 33 |
+
pad_to_aspect_ratio=False,
|
| 34 |
+
data_format=None,
|
| 35 |
+
verbose=True,
|
| 36 |
+
):
|
| 37 |
+
"""Generates a `tf.data.Dataset` from image files in a directory.
|
| 38 |
+
|
| 39 |
+
If your directory structure is:
|
| 40 |
+
|
| 41 |
+
```
|
| 42 |
+
main_directory/
|
| 43 |
+
...class_a/
|
| 44 |
+
......a_image_1.jpg
|
| 45 |
+
......a_image_2.jpg
|
| 46 |
+
...class_b/
|
| 47 |
+
......b_image_1.jpg
|
| 48 |
+
......b_image_2.jpg
|
| 49 |
+
```
|
| 50 |
+
|
| 51 |
+
Then calling `image_dataset_from_directory(main_directory,
|
| 52 |
+
labels='inferred')` will return a `tf.data.Dataset` that yields batches of
|
| 53 |
+
images from the subdirectories `class_a` and `class_b`, together with labels
|
| 54 |
+
0 and 1 (0 corresponding to `class_a` and 1 corresponding to `class_b`).
|
| 55 |
+
|
| 56 |
+
Supported image formats: `.jpeg`, `.jpg`, `.png`, `.bmp`, `.gif`.
|
| 57 |
+
Animated gifs are truncated to the first frame.
|
| 58 |
+
|
| 59 |
+
Args:
|
| 60 |
+
directory: Directory where the data is located.
|
| 61 |
+
If `labels` is `"inferred"`, it should contain
|
| 62 |
+
subdirectories, each containing images for a class.
|
| 63 |
+
Otherwise, the directory structure is ignored.
|
| 64 |
+
labels: Either `"inferred"`
|
| 65 |
+
(labels are generated from the directory structure),
|
| 66 |
+
`None` (no labels),
|
| 67 |
+
or a list/tuple of integer labels of the same size as the number of
|
| 68 |
+
image files found in the directory. Labels should be sorted
|
| 69 |
+
according to the alphanumeric order of the image file paths
|
| 70 |
+
(obtained via `os.walk(directory)` in Python).
|
| 71 |
+
label_mode: String describing the encoding of `labels`. Options are:
|
| 72 |
+
- `"int"`: means that the labels are encoded as integers
|
| 73 |
+
(e.g. for `sparse_categorical_crossentropy` loss).
|
| 74 |
+
- `"categorical"` means that the labels are
|
| 75 |
+
encoded as a categorical vector
|
| 76 |
+
(e.g. for `categorical_crossentropy` loss).
|
| 77 |
+
- `"binary"` means that the labels (there can be only 2)
|
| 78 |
+
are encoded as `float32` scalars with values 0 or 1
|
| 79 |
+
(e.g. for `binary_crossentropy`).
|
| 80 |
+
- `None` (no labels).
|
| 81 |
+
class_names: Only valid if `labels` is `"inferred"`.
|
| 82 |
+
This is the explicit list of class names
|
| 83 |
+
(must match names of subdirectories). Used to control the order
|
| 84 |
+
of the classes (otherwise alphanumerical order is used).
|
| 85 |
+
color_mode: One of `"grayscale"`, `"rgb"`, `"rgba"`.
|
| 86 |
+
Whether the images will be converted to
|
| 87 |
+
have 1, 3, or 4 channels. Defaults to `"rgb"`.
|
| 88 |
+
batch_size: Size of the batches of data. Defaults to 32.
|
| 89 |
+
If `None`, the data will not be batched
|
| 90 |
+
(the dataset will yield individual samples).
|
| 91 |
+
image_size: Size to resize images to after they are read from disk,
|
| 92 |
+
specified as `(height, width)`.
|
| 93 |
+
Since the pipeline processes batches of images that must all have
|
| 94 |
+
the same size, this must be provided. Defaults to `(256, 256)`.
|
| 95 |
+
shuffle: Whether to shuffle the data. Defaults to `True`.
|
| 96 |
+
If set to `False`, sorts the data in alphanumeric order.
|
| 97 |
+
seed: Optional random seed for shuffling and transformations.
|
| 98 |
+
validation_split: Optional float between 0 and 1,
|
| 99 |
+
fraction of data to reserve for validation.
|
| 100 |
+
subset: Subset of the data to return.
|
| 101 |
+
One of `"training"`, `"validation"`, or `"both"`.
|
| 102 |
+
Only used if `validation_split` is set.
|
| 103 |
+
When `subset="both"`, the utility returns a tuple of two datasets
|
| 104 |
+
(the training and validation datasets respectively).
|
| 105 |
+
interpolation: String, the interpolation method used when
|
| 106 |
+
resizing images.
|
| 107 |
+
Supports `"bilinear"`, `"nearest"`, `"bicubic"`, `"area"`,
|
| 108 |
+
`"lanczos3"`, `"lanczos5"`, `"gaussian"`, `"mitchellcubic"`.
|
| 109 |
+
Defaults to `"bilinear"`.
|
| 110 |
+
follow_links: Whether to visit subdirectories pointed to by symlinks.
|
| 111 |
+
Defaults to `False`.
|
| 112 |
+
crop_to_aspect_ratio: If `True`, resize the images without aspect
|
| 113 |
+
ratio distortion. When the original aspect ratio differs from the
|
| 114 |
+
target aspect ratio, the output image will be cropped so as to
|
| 115 |
+
return the largest possible window in the image
|
| 116 |
+
(of size `image_size`) that matches the target aspect ratio. By
|
| 117 |
+
default (`crop_to_aspect_ratio=False`), aspect ratio may not be
|
| 118 |
+
preserved.
|
| 119 |
+
pad_to_aspect_ratio: If `True`, resize the images without aspect
|
| 120 |
+
ratio distortion. When the original aspect ratio differs from the
|
| 121 |
+
target aspect ratio, the output image will be padded so as to
|
| 122 |
+
return the largest possible window in the image
|
| 123 |
+
(of size `image_size`) that matches the target aspect ratio. By
|
| 124 |
+
default (`pad_to_aspect_ratio=False`), aspect ratio may not be
|
| 125 |
+
preserved.
|
| 126 |
+
data_format: If None uses keras.config.image_data_format()
|
| 127 |
+
otherwise either 'channel_last' or 'channel_first'.
|
| 128 |
+
verbose: Whether to display number information on classes and
|
| 129 |
+
number of files found. Defaults to `True`.
|
| 130 |
+
|
| 131 |
+
Returns:
|
| 132 |
+
|
| 133 |
+
A `tf.data.Dataset` object.
|
| 134 |
+
|
| 135 |
+
- If `label_mode` is `None`, it yields `float32` tensors of shape
|
| 136 |
+
`(batch_size, image_size[0], image_size[1], num_channels)`,
|
| 137 |
+
encoding images (see below for rules regarding `num_channels`).
|
| 138 |
+
- Otherwise, it yields a tuple `(images, labels)`, where `images` has
|
| 139 |
+
shape `(batch_size, image_size[0], image_size[1], num_channels)`,
|
| 140 |
+
and `labels` follows the format described below.
|
| 141 |
+
|
| 142 |
+
Rules regarding labels format:
|
| 143 |
+
|
| 144 |
+
- if `label_mode` is `"int"`, the labels are an `int32` tensor of shape
|
| 145 |
+
`(batch_size,)`.
|
| 146 |
+
- if `label_mode` is `"binary"`, the labels are a `float32` tensor of
|
| 147 |
+
1s and 0s of shape `(batch_size, 1)`.
|
| 148 |
+
- if `label_mode` is `"categorical"`, the labels are a `float32` tensor
|
| 149 |
+
of shape `(batch_size, num_classes)`, representing a one-hot
|
| 150 |
+
encoding of the class index.
|
| 151 |
+
|
| 152 |
+
Rules regarding number of channels in the yielded images:
|
| 153 |
+
|
| 154 |
+
- if `color_mode` is `"grayscale"`,
|
| 155 |
+
there's 1 channel in the image tensors.
|
| 156 |
+
- if `color_mode` is `"rgb"`,
|
| 157 |
+
there are 3 channels in the image tensors.
|
| 158 |
+
- if `color_mode` is `"rgba"`,
|
| 159 |
+
there are 4 channels in the image tensors.
|
| 160 |
+
"""
|
| 161 |
+
|
| 162 |
+
if labels not in ("inferred", None):
|
| 163 |
+
if not isinstance(labels, (list, tuple)):
|
| 164 |
+
raise ValueError(
|
| 165 |
+
"`labels` argument should be a list/tuple of integer labels, "
|
| 166 |
+
"of the same size as the number of image files in the target "
|
| 167 |
+
"directory. If you wish to infer the labels from the "
|
| 168 |
+
"subdirectory "
|
| 169 |
+
'names in the target directory, pass `labels="inferred"`. '
|
| 170 |
+
"If you wish to get a dataset that only contains images "
|
| 171 |
+
f"(no labels), pass `labels=None`. Received: labels={labels}"
|
| 172 |
+
)
|
| 173 |
+
if class_names:
|
| 174 |
+
raise ValueError(
|
| 175 |
+
"You can only pass `class_names` if "
|
| 176 |
+
f'`labels="inferred"`. Received: labels={labels}, and '
|
| 177 |
+
f"class_names={class_names}"
|
| 178 |
+
)
|
| 179 |
+
if label_mode not in {"int", "categorical", "binary", None}:
|
| 180 |
+
raise ValueError(
|
| 181 |
+
'`label_mode` argument must be one of "int", '
|
| 182 |
+
'"categorical", "binary", '
|
| 183 |
+
f"or None. Received: label_mode={label_mode}"
|
| 184 |
+
)
|
| 185 |
+
if labels is None or label_mode is None:
|
| 186 |
+
labels = None
|
| 187 |
+
label_mode = None
|
| 188 |
+
if color_mode == "rgb":
|
| 189 |
+
num_channels = 3
|
| 190 |
+
elif color_mode == "rgba":
|
| 191 |
+
num_channels = 4
|
| 192 |
+
elif color_mode == "grayscale":
|
| 193 |
+
num_channels = 1
|
| 194 |
+
else:
|
| 195 |
+
raise ValueError(
|
| 196 |
+
'`color_mode` must be one of {"rgb", "rgba", "grayscale"}. '
|
| 197 |
+
f"Received: color_mode={color_mode}"
|
| 198 |
+
)
|
| 199 |
+
|
| 200 |
+
if isinstance(image_size, int):
|
| 201 |
+
image_size = (image_size, image_size)
|
| 202 |
+
elif not isinstance(image_size, (list, tuple)) or not len(image_size) == 2:
|
| 203 |
+
raise ValueError(
|
| 204 |
+
"Invalid `image_size` value. Expected a tuple of 2 integers. "
|
| 205 |
+
f"Received: image_size={image_size}"
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
interpolation = interpolation.lower()
|
| 209 |
+
supported_interpolations = (
|
| 210 |
+
"bilinear",
|
| 211 |
+
"nearest",
|
| 212 |
+
"bicubic",
|
| 213 |
+
"area",
|
| 214 |
+
"lanczos3",
|
| 215 |
+
"lanczos5",
|
| 216 |
+
"gaussian",
|
| 217 |
+
"mitchellcubic",
|
| 218 |
+
)
|
| 219 |
+
if interpolation not in supported_interpolations:
|
| 220 |
+
raise ValueError(
|
| 221 |
+
"Argument `interpolation` should be one of "
|
| 222 |
+
f"{supported_interpolations}. "
|
| 223 |
+
f"Received: interpolation={interpolation}"
|
| 224 |
+
)
|
| 225 |
+
|
| 226 |
+
dataset_utils.check_validation_split_arg(
|
| 227 |
+
validation_split, subset, shuffle, seed
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
if seed is None:
|
| 231 |
+
seed = np.random.randint(1e6)
|
| 232 |
+
image_paths, labels, class_names = dataset_utils.index_directory(
|
| 233 |
+
directory,
|
| 234 |
+
labels,
|
| 235 |
+
formats=ALLOWLIST_FORMATS,
|
| 236 |
+
class_names=class_names,
|
| 237 |
+
shuffle=shuffle,
|
| 238 |
+
seed=seed,
|
| 239 |
+
follow_links=follow_links,
|
| 240 |
+
verbose=verbose,
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
if label_mode == "binary" and len(class_names) != 2:
|
| 244 |
+
raise ValueError(
|
| 245 |
+
'When passing `label_mode="binary"`, there must be exactly 2 '
|
| 246 |
+
f"class_names. Received: class_names={class_names}"
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
data_format = standardize_data_format(data_format=data_format)
|
| 250 |
+
if batch_size is not None:
|
| 251 |
+
shuffle_buffer_size = batch_size * 8
|
| 252 |
+
else:
|
| 253 |
+
shuffle_buffer_size = 1024
|
| 254 |
+
|
| 255 |
+
if subset == "both":
|
| 256 |
+
(
|
| 257 |
+
image_paths_train,
|
| 258 |
+
labels_train,
|
| 259 |
+
) = dataset_utils.get_training_or_validation_split(
|
| 260 |
+
image_paths, labels, validation_split, "training"
|
| 261 |
+
)
|
| 262 |
+
(
|
| 263 |
+
image_paths_val,
|
| 264 |
+
labels_val,
|
| 265 |
+
) = dataset_utils.get_training_or_validation_split(
|
| 266 |
+
image_paths, labels, validation_split, "validation"
|
| 267 |
+
)
|
| 268 |
+
if not image_paths_train:
|
| 269 |
+
raise ValueError(
|
| 270 |
+
f"No training images found in directory {directory}. "
|
| 271 |
+
f"Allowed formats: {ALLOWLIST_FORMATS}"
|
| 272 |
+
)
|
| 273 |
+
if not image_paths_val:
|
| 274 |
+
raise ValueError(
|
| 275 |
+
f"No validation images found in directory {directory}. "
|
| 276 |
+
f"Allowed formats: {ALLOWLIST_FORMATS}"
|
| 277 |
+
)
|
| 278 |
+
train_dataset = paths_and_labels_to_dataset(
|
| 279 |
+
image_paths=image_paths_train,
|
| 280 |
+
image_size=image_size,
|
| 281 |
+
num_channels=num_channels,
|
| 282 |
+
labels=labels_train,
|
| 283 |
+
label_mode=label_mode,
|
| 284 |
+
num_classes=len(class_names) if class_names else 0,
|
| 285 |
+
interpolation=interpolation,
|
| 286 |
+
crop_to_aspect_ratio=crop_to_aspect_ratio,
|
| 287 |
+
pad_to_aspect_ratio=pad_to_aspect_ratio,
|
| 288 |
+
data_format=data_format,
|
| 289 |
+
shuffle=shuffle,
|
| 290 |
+
shuffle_buffer_size=shuffle_buffer_size,
|
| 291 |
+
seed=seed,
|
| 292 |
+
)
|
| 293 |
+
|
| 294 |
+
val_dataset = paths_and_labels_to_dataset(
|
| 295 |
+
image_paths=image_paths_val,
|
| 296 |
+
image_size=image_size,
|
| 297 |
+
num_channels=num_channels,
|
| 298 |
+
labels=labels_val,
|
| 299 |
+
label_mode=label_mode,
|
| 300 |
+
num_classes=len(class_names) if class_names else 0,
|
| 301 |
+
interpolation=interpolation,
|
| 302 |
+
crop_to_aspect_ratio=crop_to_aspect_ratio,
|
| 303 |
+
pad_to_aspect_ratio=pad_to_aspect_ratio,
|
| 304 |
+
data_format=data_format,
|
| 305 |
+
shuffle=False,
|
| 306 |
+
)
|
| 307 |
+
|
| 308 |
+
if batch_size is not None:
|
| 309 |
+
train_dataset = train_dataset.batch(batch_size)
|
| 310 |
+
val_dataset = val_dataset.batch(batch_size)
|
| 311 |
+
|
| 312 |
+
train_dataset = train_dataset.prefetch(tf.data.AUTOTUNE)
|
| 313 |
+
val_dataset = val_dataset.prefetch(tf.data.AUTOTUNE)
|
| 314 |
+
|
| 315 |
+
# Users may need to reference `class_names`.
|
| 316 |
+
train_dataset.class_names = class_names
|
| 317 |
+
val_dataset.class_names = class_names
|
| 318 |
+
|
| 319 |
+
# Include file paths for images as attribute.
|
| 320 |
+
train_dataset.file_paths = image_paths_train
|
| 321 |
+
val_dataset.file_paths = image_paths_val
|
| 322 |
+
|
| 323 |
+
dataset = [train_dataset, val_dataset]
|
| 324 |
+
else:
|
| 325 |
+
image_paths, labels = dataset_utils.get_training_or_validation_split(
|
| 326 |
+
image_paths, labels, validation_split, subset
|
| 327 |
+
)
|
| 328 |
+
if not image_paths:
|
| 329 |
+
raise ValueError(
|
| 330 |
+
f"No images found in directory {directory}. "
|
| 331 |
+
f"Allowed formats: {ALLOWLIST_FORMATS}"
|
| 332 |
+
)
|
| 333 |
+
|
| 334 |
+
dataset = paths_and_labels_to_dataset(
|
| 335 |
+
image_paths=image_paths,
|
| 336 |
+
image_size=image_size,
|
| 337 |
+
num_channels=num_channels,
|
| 338 |
+
labels=labels,
|
| 339 |
+
label_mode=label_mode,
|
| 340 |
+
num_classes=len(class_names) if class_names else 0,
|
| 341 |
+
interpolation=interpolation,
|
| 342 |
+
crop_to_aspect_ratio=crop_to_aspect_ratio,
|
| 343 |
+
pad_to_aspect_ratio=pad_to_aspect_ratio,
|
| 344 |
+
data_format=data_format,
|
| 345 |
+
shuffle=shuffle,
|
| 346 |
+
shuffle_buffer_size=shuffle_buffer_size,
|
| 347 |
+
seed=seed,
|
| 348 |
+
)
|
| 349 |
+
|
| 350 |
+
if batch_size is not None:
|
| 351 |
+
dataset = dataset.batch(batch_size)
|
| 352 |
+
|
| 353 |
+
dataset = dataset.prefetch(tf.data.AUTOTUNE)
|
| 354 |
+
# Users may need to reference `class_names`.
|
| 355 |
+
dataset.class_names = class_names
|
| 356 |
+
|
| 357 |
+
# Include file paths for images as attribute.
|
| 358 |
+
dataset.file_paths = image_paths
|
| 359 |
+
|
| 360 |
+
return dataset
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
def paths_and_labels_to_dataset(
|
| 364 |
+
image_paths,
|
| 365 |
+
image_size,
|
| 366 |
+
num_channels,
|
| 367 |
+
labels,
|
| 368 |
+
label_mode,
|
| 369 |
+
num_classes,
|
| 370 |
+
interpolation,
|
| 371 |
+
data_format,
|
| 372 |
+
crop_to_aspect_ratio=False,
|
| 373 |
+
pad_to_aspect_ratio=False,
|
| 374 |
+
shuffle=False,
|
| 375 |
+
shuffle_buffer_size=None,
|
| 376 |
+
seed=None,
|
| 377 |
+
):
|
| 378 |
+
"""Constructs a dataset of images and labels."""
|
| 379 |
+
path_ds = tf.data.Dataset.from_tensor_slices(image_paths)
|
| 380 |
+
if label_mode:
|
| 381 |
+
label_ds = dataset_utils.labels_to_dataset(
|
| 382 |
+
labels, label_mode, num_classes
|
| 383 |
+
)
|
| 384 |
+
ds = tf.data.Dataset.zip((path_ds, label_ds))
|
| 385 |
+
else:
|
| 386 |
+
ds = path_ds
|
| 387 |
+
|
| 388 |
+
if shuffle:
|
| 389 |
+
ds = ds.shuffle(buffer_size=shuffle_buffer_size or 1024, seed=seed)
|
| 390 |
+
|
| 391 |
+
args = (
|
| 392 |
+
image_size,
|
| 393 |
+
num_channels,
|
| 394 |
+
interpolation,
|
| 395 |
+
data_format,
|
| 396 |
+
crop_to_aspect_ratio,
|
| 397 |
+
pad_to_aspect_ratio,
|
| 398 |
+
)
|
| 399 |
+
if label_mode:
|
| 400 |
+
ds = ds.map(
|
| 401 |
+
lambda x, y: (load_image(x, *args), y),
|
| 402 |
+
num_parallel_calls=tf.data.AUTOTUNE,
|
| 403 |
+
)
|
| 404 |
+
else:
|
| 405 |
+
ds = ds.map(
|
| 406 |
+
lambda x: load_image(x, *args), num_parallel_calls=tf.data.AUTOTUNE
|
| 407 |
+
)
|
| 408 |
+
return ds
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
def load_image(
|
| 412 |
+
path,
|
| 413 |
+
image_size,
|
| 414 |
+
num_channels,
|
| 415 |
+
interpolation,
|
| 416 |
+
data_format,
|
| 417 |
+
crop_to_aspect_ratio=False,
|
| 418 |
+
pad_to_aspect_ratio=False,
|
| 419 |
+
):
|
| 420 |
+
"""Load an image from a path and resize it."""
|
| 421 |
+
img = tf.io.read_file(path)
|
| 422 |
+
img = tf.image.decode_image(
|
| 423 |
+
img, channels=num_channels, expand_animations=False
|
| 424 |
+
)
|
| 425 |
+
|
| 426 |
+
if pad_to_aspect_ratio and crop_to_aspect_ratio:
|
| 427 |
+
raise ValueError(
|
| 428 |
+
"Only one of `pad_to_aspect_ratio`, `crop_to_aspect_ratio`"
|
| 429 |
+
" can be set to `True`."
|
| 430 |
+
)
|
| 431 |
+
|
| 432 |
+
if crop_to_aspect_ratio:
|
| 433 |
+
from keras.src.backend import tensorflow as tf_backend
|
| 434 |
+
|
| 435 |
+
if data_format == "channels_first":
|
| 436 |
+
img = tf.transpose(img, (2, 0, 1))
|
| 437 |
+
img = image_utils.smart_resize(
|
| 438 |
+
img,
|
| 439 |
+
image_size,
|
| 440 |
+
interpolation=interpolation,
|
| 441 |
+
data_format=data_format,
|
| 442 |
+
backend_module=tf_backend,
|
| 443 |
+
)
|
| 444 |
+
elif pad_to_aspect_ratio:
|
| 445 |
+
img = tf.image.resize_with_pad(
|
| 446 |
+
img, image_size[0], image_size[1], method=interpolation
|
| 447 |
+
)
|
| 448 |
+
if data_format == "channels_first":
|
| 449 |
+
img = tf.transpose(img, (2, 0, 1))
|
| 450 |
+
else:
|
| 451 |
+
img = tf.image.resize(img, image_size, method=interpolation)
|
| 452 |
+
if data_format == "channels_first":
|
| 453 |
+
img = tf.transpose(img, (2, 0, 1))
|
| 454 |
+
|
| 455 |
+
if data_format == "channels_last":
|
| 456 |
+
img.set_shape((image_size[0], image_size[1], num_channels))
|
| 457 |
+
else:
|
| 458 |
+
img.set_shape((num_channels, image_size[0], image_size[1]))
|
| 459 |
+
return img
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/image_utils.py
ADDED
|
@@ -0,0 +1,457 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Utilities related to image handling."""
|
| 2 |
+
|
| 3 |
+
import io
|
| 4 |
+
import pathlib
|
| 5 |
+
import warnings
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
from keras.src import backend
|
| 10 |
+
from keras.src.api_export import keras_export
|
| 11 |
+
|
| 12 |
+
try:
|
| 13 |
+
from PIL import Image as pil_image
|
| 14 |
+
|
| 15 |
+
try:
|
| 16 |
+
pil_image_resampling = pil_image.Resampling
|
| 17 |
+
except AttributeError:
|
| 18 |
+
pil_image_resampling = pil_image
|
| 19 |
+
except ImportError:
|
| 20 |
+
pil_image = None
|
| 21 |
+
pil_image_resampling = None
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
if pil_image_resampling is not None:
|
| 25 |
+
PIL_INTERPOLATION_METHODS = {
|
| 26 |
+
"nearest": pil_image_resampling.NEAREST,
|
| 27 |
+
"bilinear": pil_image_resampling.BILINEAR,
|
| 28 |
+
"bicubic": pil_image_resampling.BICUBIC,
|
| 29 |
+
"hamming": pil_image_resampling.HAMMING,
|
| 30 |
+
"box": pil_image_resampling.BOX,
|
| 31 |
+
"lanczos": pil_image_resampling.LANCZOS,
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@keras_export(
|
| 36 |
+
[
|
| 37 |
+
"keras.utils.array_to_img",
|
| 38 |
+
"keras.preprocessing.image.array_to_img",
|
| 39 |
+
]
|
| 40 |
+
)
|
| 41 |
+
def array_to_img(x, data_format=None, scale=True, dtype=None):
|
| 42 |
+
"""Converts a 3D NumPy array to a PIL Image instance.
|
| 43 |
+
|
| 44 |
+
Example:
|
| 45 |
+
|
| 46 |
+
```python
|
| 47 |
+
from PIL import Image
|
| 48 |
+
img = np.random.random(size=(100, 100, 3))
|
| 49 |
+
pil_img = keras.utils.array_to_img(img)
|
| 50 |
+
```
|
| 51 |
+
|
| 52 |
+
Args:
|
| 53 |
+
x: Input data, in any form that can be converted to a NumPy array.
|
| 54 |
+
data_format: Image data format, can be either `"channels_first"` or
|
| 55 |
+
`"channels_last"`. Defaults to `None`, in which case the global
|
| 56 |
+
setting `keras.backend.image_data_format()` is used (unless you
|
| 57 |
+
changed it, it defaults to `"channels_last"`).
|
| 58 |
+
scale: Whether to rescale the image such that minimum and maximum values
|
| 59 |
+
are 0 and 255 respectively. Defaults to `True`.
|
| 60 |
+
dtype: Dtype to use. `None` means the global setting
|
| 61 |
+
`keras.backend.floatx()` is used (unless you changed it, it
|
| 62 |
+
defaults to `"float32"`). Defaults to `None`.
|
| 63 |
+
|
| 64 |
+
Returns:
|
| 65 |
+
A PIL Image instance.
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
data_format = backend.standardize_data_format(data_format)
|
| 69 |
+
if dtype is None:
|
| 70 |
+
dtype = backend.floatx()
|
| 71 |
+
if pil_image is None:
|
| 72 |
+
raise ImportError(
|
| 73 |
+
"Could not import PIL.Image. "
|
| 74 |
+
"The use of `array_to_img` requires PIL."
|
| 75 |
+
)
|
| 76 |
+
x = np.asarray(x, dtype=dtype)
|
| 77 |
+
if x.ndim != 3:
|
| 78 |
+
raise ValueError(
|
| 79 |
+
"Expected image array to have rank 3 (single image). "
|
| 80 |
+
f"Got array with shape: {x.shape}"
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
# Original NumPy array x has format (height, width, channel)
|
| 84 |
+
# or (channel, height, width)
|
| 85 |
+
# but target PIL image has format (width, height, channel)
|
| 86 |
+
if data_format == "channels_first":
|
| 87 |
+
x = x.transpose(1, 2, 0)
|
| 88 |
+
if scale:
|
| 89 |
+
x = x - np.min(x)
|
| 90 |
+
x_max = np.max(x)
|
| 91 |
+
if x_max != 0:
|
| 92 |
+
x /= x_max
|
| 93 |
+
x *= 255
|
| 94 |
+
if x.shape[2] == 4:
|
| 95 |
+
# RGBA
|
| 96 |
+
return pil_image.fromarray(x.astype("uint8"), "RGBA")
|
| 97 |
+
elif x.shape[2] == 3:
|
| 98 |
+
# RGB
|
| 99 |
+
return pil_image.fromarray(x.astype("uint8"), "RGB")
|
| 100 |
+
elif x.shape[2] == 1:
|
| 101 |
+
# grayscale
|
| 102 |
+
if np.max(x) > 255:
|
| 103 |
+
# 32-bit signed integer grayscale image. PIL mode "I"
|
| 104 |
+
return pil_image.fromarray(x[:, :, 0].astype("int32"), "I")
|
| 105 |
+
return pil_image.fromarray(x[:, :, 0].astype("uint8"), "L")
|
| 106 |
+
else:
|
| 107 |
+
raise ValueError(f"Unsupported channel number: {x.shape[2]}")
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
@keras_export(
|
| 111 |
+
[
|
| 112 |
+
"keras.utils.img_to_array",
|
| 113 |
+
"keras.preprocessing.image.img_to_array",
|
| 114 |
+
]
|
| 115 |
+
)
|
| 116 |
+
def img_to_array(img, data_format=None, dtype=None):
|
| 117 |
+
"""Converts a PIL Image instance to a NumPy array.
|
| 118 |
+
|
| 119 |
+
Example:
|
| 120 |
+
|
| 121 |
+
```python
|
| 122 |
+
from PIL import Image
|
| 123 |
+
img_data = np.random.random(size=(100, 100, 3))
|
| 124 |
+
img = keras.utils.array_to_img(img_data)
|
| 125 |
+
array = keras.utils.image.img_to_array(img)
|
| 126 |
+
```
|
| 127 |
+
|
| 128 |
+
Args:
|
| 129 |
+
img: Input PIL Image instance.
|
| 130 |
+
data_format: Image data format, can be either `"channels_first"` or
|
| 131 |
+
`"channels_last"`. Defaults to `None`, in which case the global
|
| 132 |
+
setting `keras.backend.image_data_format()` is used (unless you
|
| 133 |
+
changed it, it defaults to `"channels_last"`).
|
| 134 |
+
dtype: Dtype to use. `None` means the global setting
|
| 135 |
+
`keras.backend.floatx()` is used (unless you changed it, it
|
| 136 |
+
defaults to `"float32"`).
|
| 137 |
+
|
| 138 |
+
Returns:
|
| 139 |
+
A 3D NumPy array.
|
| 140 |
+
"""
|
| 141 |
+
|
| 142 |
+
data_format = backend.standardize_data_format(data_format)
|
| 143 |
+
if dtype is None:
|
| 144 |
+
dtype = backend.floatx()
|
| 145 |
+
# NumPy array x has format (height, width, channel)
|
| 146 |
+
# or (channel, height, width)
|
| 147 |
+
# but original PIL image has format (width, height, channel)
|
| 148 |
+
x = np.asarray(img, dtype=dtype)
|
| 149 |
+
if len(x.shape) == 3:
|
| 150 |
+
if data_format == "channels_first":
|
| 151 |
+
x = x.transpose(2, 0, 1)
|
| 152 |
+
elif len(x.shape) == 2:
|
| 153 |
+
if data_format == "channels_first":
|
| 154 |
+
x = x.reshape((1, x.shape[0], x.shape[1]))
|
| 155 |
+
else:
|
| 156 |
+
x = x.reshape((x.shape[0], x.shape[1], 1))
|
| 157 |
+
else:
|
| 158 |
+
raise ValueError(f"Unsupported image shape: {x.shape}")
|
| 159 |
+
return x
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
@keras_export(["keras.utils.save_img", "keras.preprocessing.image.save_img"])
|
| 163 |
+
def save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs):
|
| 164 |
+
"""Saves an image stored as a NumPy array to a path or file object.
|
| 165 |
+
|
| 166 |
+
Args:
|
| 167 |
+
path: Path or file object.
|
| 168 |
+
x: NumPy array.
|
| 169 |
+
data_format: Image data format, either `"channels_first"` or
|
| 170 |
+
`"channels_last"`.
|
| 171 |
+
file_format: Optional file format override. If omitted, the format to
|
| 172 |
+
use is determined from the filename extension. If a file object was
|
| 173 |
+
used instead of a filename, this parameter should always be used.
|
| 174 |
+
scale: Whether to rescale image values to be within `[0, 255]`.
|
| 175 |
+
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
|
| 176 |
+
"""
|
| 177 |
+
data_format = backend.standardize_data_format(data_format)
|
| 178 |
+
img = array_to_img(x, data_format=data_format, scale=scale)
|
| 179 |
+
if img.mode == "RGBA" and (file_format == "jpg" or file_format == "jpeg"):
|
| 180 |
+
warnings.warn(
|
| 181 |
+
"The JPG format does not support RGBA images, converting to RGB."
|
| 182 |
+
)
|
| 183 |
+
img = img.convert("RGB")
|
| 184 |
+
img.save(path, format=file_format, **kwargs)
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
@keras_export(["keras.utils.load_img", "keras.preprocessing.image.load_img"])
|
| 188 |
+
def load_img(
|
| 189 |
+
path,
|
| 190 |
+
color_mode="rgb",
|
| 191 |
+
target_size=None,
|
| 192 |
+
interpolation="nearest",
|
| 193 |
+
keep_aspect_ratio=False,
|
| 194 |
+
):
|
| 195 |
+
"""Loads an image into PIL format.
|
| 196 |
+
|
| 197 |
+
Example:
|
| 198 |
+
|
| 199 |
+
```python
|
| 200 |
+
image = keras.utils.load_img(image_path)
|
| 201 |
+
input_arr = keras.utils.img_to_array(image)
|
| 202 |
+
input_arr = np.array([input_arr]) # Convert single image to a batch.
|
| 203 |
+
predictions = model.predict(input_arr)
|
| 204 |
+
```
|
| 205 |
+
|
| 206 |
+
Args:
|
| 207 |
+
path: Path to image file.
|
| 208 |
+
color_mode: One of `"grayscale"`, `"rgb"`, `"rgba"`. Default: `"rgb"`.
|
| 209 |
+
The desired image format.
|
| 210 |
+
target_size: Either `None` (default to original size) or tuple of ints
|
| 211 |
+
`(img_height, img_width)`.
|
| 212 |
+
interpolation: Interpolation method used to resample the image if the
|
| 213 |
+
target size is different from that of the loaded image. Supported
|
| 214 |
+
methods are `"nearest"`, `"bilinear"`, and `"bicubic"`.
|
| 215 |
+
If PIL version 1.1.3 or newer is installed, `"lanczos"`
|
| 216 |
+
is also supported. If PIL version 3.4.0 or newer is installed,
|
| 217 |
+
`"box"` and `"hamming"` are also
|
| 218 |
+
supported. By default, `"nearest"` is used.
|
| 219 |
+
keep_aspect_ratio: Boolean, whether to resize images to a target
|
| 220 |
+
size without aspect ratio distortion. The image is cropped in
|
| 221 |
+
the center with target aspect ratio before resizing.
|
| 222 |
+
|
| 223 |
+
Returns:
|
| 224 |
+
A PIL Image instance.
|
| 225 |
+
"""
|
| 226 |
+
if pil_image is None:
|
| 227 |
+
raise ImportError(
|
| 228 |
+
"Could not import PIL.Image. The use of `load_img` requires PIL."
|
| 229 |
+
)
|
| 230 |
+
if isinstance(path, io.BytesIO):
|
| 231 |
+
img = pil_image.open(path)
|
| 232 |
+
elif isinstance(path, (pathlib.Path, bytes, str)):
|
| 233 |
+
if isinstance(path, pathlib.Path):
|
| 234 |
+
path = str(path.resolve())
|
| 235 |
+
with open(path, "rb") as f:
|
| 236 |
+
img = pil_image.open(io.BytesIO(f.read()))
|
| 237 |
+
else:
|
| 238 |
+
raise TypeError(
|
| 239 |
+
f"path should be path-like or io.BytesIO, not {type(path)}"
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
+
if color_mode == "grayscale":
|
| 243 |
+
# if image is not already an 8-bit, 16-bit or 32-bit grayscale image
|
| 244 |
+
# convert it to an 8-bit grayscale image.
|
| 245 |
+
if img.mode not in ("L", "I;16", "I"):
|
| 246 |
+
img = img.convert("L")
|
| 247 |
+
elif color_mode == "rgba":
|
| 248 |
+
if img.mode != "RGBA":
|
| 249 |
+
img = img.convert("RGBA")
|
| 250 |
+
elif color_mode == "rgb":
|
| 251 |
+
if img.mode != "RGB":
|
| 252 |
+
img = img.convert("RGB")
|
| 253 |
+
else:
|
| 254 |
+
raise ValueError('color_mode must be "grayscale", "rgb", or "rgba"')
|
| 255 |
+
if target_size is not None:
|
| 256 |
+
width_height_tuple = (target_size[1], target_size[0])
|
| 257 |
+
if img.size != width_height_tuple:
|
| 258 |
+
if interpolation not in PIL_INTERPOLATION_METHODS:
|
| 259 |
+
raise ValueError(
|
| 260 |
+
"Invalid interpolation method {} specified. Supported "
|
| 261 |
+
"methods are {}".format(
|
| 262 |
+
interpolation,
|
| 263 |
+
", ".join(PIL_INTERPOLATION_METHODS.keys()),
|
| 264 |
+
)
|
| 265 |
+
)
|
| 266 |
+
resample = PIL_INTERPOLATION_METHODS[interpolation]
|
| 267 |
+
|
| 268 |
+
if keep_aspect_ratio:
|
| 269 |
+
width, height = img.size
|
| 270 |
+
target_width, target_height = width_height_tuple
|
| 271 |
+
|
| 272 |
+
crop_height = (width * target_height) // target_width
|
| 273 |
+
crop_width = (height * target_width) // target_height
|
| 274 |
+
|
| 275 |
+
# Set back to input height / width
|
| 276 |
+
# if crop_height / crop_width is not smaller.
|
| 277 |
+
crop_height = min(height, crop_height)
|
| 278 |
+
crop_width = min(width, crop_width)
|
| 279 |
+
|
| 280 |
+
crop_box_hstart = (height - crop_height) // 2
|
| 281 |
+
crop_box_wstart = (width - crop_width) // 2
|
| 282 |
+
crop_box_wend = crop_box_wstart + crop_width
|
| 283 |
+
crop_box_hend = crop_box_hstart + crop_height
|
| 284 |
+
crop_box = [
|
| 285 |
+
crop_box_wstart,
|
| 286 |
+
crop_box_hstart,
|
| 287 |
+
crop_box_wend,
|
| 288 |
+
crop_box_hend,
|
| 289 |
+
]
|
| 290 |
+
img = img.resize(width_height_tuple, resample, box=crop_box)
|
| 291 |
+
else:
|
| 292 |
+
img = img.resize(width_height_tuple, resample)
|
| 293 |
+
return img
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
@keras_export("keras.preprocessing.image.smart_resize")
|
| 297 |
+
def smart_resize(
|
| 298 |
+
x,
|
| 299 |
+
size,
|
| 300 |
+
interpolation="bilinear",
|
| 301 |
+
data_format="channels_last",
|
| 302 |
+
backend_module=None,
|
| 303 |
+
):
|
| 304 |
+
"""Resize images to a target size without aspect ratio distortion.
|
| 305 |
+
|
| 306 |
+
Image datasets typically yield images that have each a different
|
| 307 |
+
size. However, these images need to be batched before they can be
|
| 308 |
+
processed by Keras layers. To be batched, images need to share the same
|
| 309 |
+
height and width.
|
| 310 |
+
|
| 311 |
+
You could simply do, in TF (or JAX equivalent):
|
| 312 |
+
|
| 313 |
+
```python
|
| 314 |
+
size = (200, 200)
|
| 315 |
+
ds = ds.map(lambda img: resize(img, size))
|
| 316 |
+
```
|
| 317 |
+
|
| 318 |
+
However, if you do this, you distort the aspect ratio of your images, since
|
| 319 |
+
in general they do not all have the same aspect ratio as `size`. This is
|
| 320 |
+
fine in many cases, but not always (e.g. for image generation models
|
| 321 |
+
this can be a problem).
|
| 322 |
+
|
| 323 |
+
Note that passing the argument `preserve_aspect_ratio=True` to `resize`
|
| 324 |
+
will preserve the aspect ratio, but at the cost of no longer respecting the
|
| 325 |
+
provided target size.
|
| 326 |
+
|
| 327 |
+
This calls for:
|
| 328 |
+
|
| 329 |
+
```python
|
| 330 |
+
size = (200, 200)
|
| 331 |
+
ds = ds.map(lambda img: smart_resize(img, size))
|
| 332 |
+
```
|
| 333 |
+
|
| 334 |
+
Your output images will actually be `(200, 200)`, and will not be distorted.
|
| 335 |
+
Instead, the parts of the image that do not fit within the target size
|
| 336 |
+
get cropped out.
|
| 337 |
+
|
| 338 |
+
The resizing process is:
|
| 339 |
+
|
| 340 |
+
1. Take the largest centered crop of the image that has the same aspect
|
| 341 |
+
ratio as the target size. For instance, if `size=(200, 200)` and the input
|
| 342 |
+
image has size `(340, 500)`, we take a crop of `(340, 340)` centered along
|
| 343 |
+
the width.
|
| 344 |
+
2. Resize the cropped image to the target size. In the example above,
|
| 345 |
+
we resize the `(340, 340)` crop to `(200, 200)`.
|
| 346 |
+
|
| 347 |
+
Args:
|
| 348 |
+
x: Input image or batch of images (as a tensor or NumPy array).
|
| 349 |
+
Must be in format `(height, width, channels)`
|
| 350 |
+
or `(batch_size, height, width, channels)`.
|
| 351 |
+
size: Tuple of `(height, width)` integer. Target size.
|
| 352 |
+
interpolation: String, interpolation to use for resizing.
|
| 353 |
+
Supports `"bilinear"`, `"nearest"`, `"bicubic"`,
|
| 354 |
+
`"lanczos3"`, `"lanczos5"`.
|
| 355 |
+
Defaults to `"bilinear"`.
|
| 356 |
+
data_format: `"channels_last"` or `"channels_first"`.
|
| 357 |
+
backend_module: Backend module to use (if different from the default
|
| 358 |
+
backend).
|
| 359 |
+
|
| 360 |
+
Returns:
|
| 361 |
+
Array with shape `(size[0], size[1], channels)`.
|
| 362 |
+
If the input image was a NumPy array, the output is a NumPy array,
|
| 363 |
+
and if it was a backend-native tensor,
|
| 364 |
+
the output is a backend-native tensor.
|
| 365 |
+
"""
|
| 366 |
+
backend_module = backend_module or backend
|
| 367 |
+
if len(size) != 2:
|
| 368 |
+
raise ValueError(
|
| 369 |
+
f"Expected `size` to be a tuple of 2 integers, but got: {size}."
|
| 370 |
+
)
|
| 371 |
+
img = backend_module.convert_to_tensor(x)
|
| 372 |
+
if len(img.shape) is not None:
|
| 373 |
+
if len(img.shape) < 3 or len(img.shape) > 4:
|
| 374 |
+
raise ValueError(
|
| 375 |
+
"Expected an image array with shape `(height, width, "
|
| 376 |
+
"channels)`, or `(batch_size, height, width, channels)`, but "
|
| 377 |
+
f"got input with incorrect rank, of shape {img.shape}."
|
| 378 |
+
)
|
| 379 |
+
shape = backend_module.shape(img)
|
| 380 |
+
if data_format == "channels_last":
|
| 381 |
+
height, width = shape[-3], shape[-2]
|
| 382 |
+
else:
|
| 383 |
+
height, width = shape[-2], shape[-1]
|
| 384 |
+
target_height, target_width = size
|
| 385 |
+
|
| 386 |
+
# Set back to input height / width if crop_height / crop_width is not
|
| 387 |
+
# smaller.
|
| 388 |
+
if isinstance(height, int) and isinstance(width, int):
|
| 389 |
+
# For JAX, we need to keep the slice indices as static integers
|
| 390 |
+
crop_height = int(float(width * target_height) / target_width)
|
| 391 |
+
crop_height = max(min(height, crop_height), 1)
|
| 392 |
+
crop_width = int(float(height * target_width) / target_height)
|
| 393 |
+
crop_width = max(min(width, crop_width), 1)
|
| 394 |
+
crop_box_hstart = int(float(height - crop_height) / 2)
|
| 395 |
+
crop_box_wstart = int(float(width - crop_width) / 2)
|
| 396 |
+
else:
|
| 397 |
+
crop_height = backend_module.cast(
|
| 398 |
+
backend_module.cast(width * target_height, "float32")
|
| 399 |
+
/ target_width,
|
| 400 |
+
"int32",
|
| 401 |
+
)
|
| 402 |
+
crop_height = backend_module.numpy.minimum(height, crop_height)
|
| 403 |
+
crop_height = backend_module.numpy.maximum(crop_height, 1)
|
| 404 |
+
crop_height = backend_module.cast(crop_height, "int32")
|
| 405 |
+
|
| 406 |
+
crop_width = backend_module.cast(
|
| 407 |
+
backend_module.cast(height * target_width, "float32")
|
| 408 |
+
/ target_height,
|
| 409 |
+
"int32",
|
| 410 |
+
)
|
| 411 |
+
crop_width = backend_module.numpy.minimum(width, crop_width)
|
| 412 |
+
crop_width = backend_module.numpy.maximum(crop_width, 1)
|
| 413 |
+
crop_width = backend_module.cast(crop_width, "int32")
|
| 414 |
+
|
| 415 |
+
crop_box_hstart = backend_module.cast(
|
| 416 |
+
backend_module.cast(height - crop_height, "float32") / 2, "int32"
|
| 417 |
+
)
|
| 418 |
+
crop_box_wstart = backend_module.cast(
|
| 419 |
+
backend_module.cast(width - crop_width, "float32") / 2, "int32"
|
| 420 |
+
)
|
| 421 |
+
|
| 422 |
+
if data_format == "channels_last":
|
| 423 |
+
if len(img.shape) == 4:
|
| 424 |
+
img = img[
|
| 425 |
+
:,
|
| 426 |
+
crop_box_hstart : crop_box_hstart + crop_height,
|
| 427 |
+
crop_box_wstart : crop_box_wstart + crop_width,
|
| 428 |
+
:,
|
| 429 |
+
]
|
| 430 |
+
else:
|
| 431 |
+
img = img[
|
| 432 |
+
crop_box_hstart : crop_box_hstart + crop_height,
|
| 433 |
+
crop_box_wstart : crop_box_wstart + crop_width,
|
| 434 |
+
:,
|
| 435 |
+
]
|
| 436 |
+
else:
|
| 437 |
+
if len(img.shape) == 4:
|
| 438 |
+
img = img[
|
| 439 |
+
:,
|
| 440 |
+
:,
|
| 441 |
+
crop_box_hstart : crop_box_hstart + crop_height,
|
| 442 |
+
crop_box_wstart : crop_box_wstart + crop_width,
|
| 443 |
+
]
|
| 444 |
+
else:
|
| 445 |
+
img = img[
|
| 446 |
+
:,
|
| 447 |
+
crop_box_hstart : crop_box_hstart + crop_height,
|
| 448 |
+
crop_box_wstart : crop_box_wstart + crop_width,
|
| 449 |
+
]
|
| 450 |
+
|
| 451 |
+
img = backend_module.image.resize(
|
| 452 |
+
img, size=size, interpolation=interpolation, data_format=data_format
|
| 453 |
+
)
|
| 454 |
+
|
| 455 |
+
if isinstance(x, np.ndarray):
|
| 456 |
+
return np.array(img)
|
| 457 |
+
return img
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/io_utils.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
|
| 3 |
+
from absl import logging
|
| 4 |
+
|
| 5 |
+
from keras.src.api_export import keras_export
|
| 6 |
+
from keras.src.backend.common import global_state
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@keras_export(
|
| 10 |
+
[
|
| 11 |
+
"keras.config.enable_interactive_logging",
|
| 12 |
+
"keras.utils.enable_interactive_logging",
|
| 13 |
+
]
|
| 14 |
+
)
|
| 15 |
+
def enable_interactive_logging():
|
| 16 |
+
"""Turn on interactive logging.
|
| 17 |
+
|
| 18 |
+
When interactive logging is enabled, Keras displays logs via stdout.
|
| 19 |
+
This provides the best experience when using Keras in an interactive
|
| 20 |
+
environment such as a shell or a notebook.
|
| 21 |
+
"""
|
| 22 |
+
global_state.set_global_attribute("interactive_logging", True)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@keras_export(
|
| 26 |
+
[
|
| 27 |
+
"keras.config.disable_interactive_logging",
|
| 28 |
+
"keras.utils.disable_interactive_logging",
|
| 29 |
+
]
|
| 30 |
+
)
|
| 31 |
+
def disable_interactive_logging():
|
| 32 |
+
"""Turn off interactive logging.
|
| 33 |
+
|
| 34 |
+
When interactive logging is disabled, Keras sends logs to `absl.logging`.
|
| 35 |
+
This is the best option when using Keras in a non-interactive
|
| 36 |
+
way, such as running a training or inference job on a server.
|
| 37 |
+
"""
|
| 38 |
+
global_state.set_global_attribute("interactive_logging", False)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@keras_export(
|
| 42 |
+
[
|
| 43 |
+
"keras.config.is_interactive_logging_enabled",
|
| 44 |
+
"keras.utils.is_interactive_logging_enabled",
|
| 45 |
+
]
|
| 46 |
+
)
|
| 47 |
+
def is_interactive_logging_enabled():
|
| 48 |
+
"""Check if interactive logging is enabled.
|
| 49 |
+
|
| 50 |
+
To switch between writing logs to stdout and `absl.logging`, you may use
|
| 51 |
+
`keras.config.enable_interactive_logging()` and
|
| 52 |
+
`keras.config.disable_interactive_logging()`.
|
| 53 |
+
|
| 54 |
+
Returns:
|
| 55 |
+
Boolean, `True` if interactive logging is enabled,
|
| 56 |
+
and `False` otherwise.
|
| 57 |
+
"""
|
| 58 |
+
return global_state.get_global_attribute("interactive_logging", True)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def set_logging_verbosity(level):
|
| 62 |
+
"""Sets the verbosity level for logging.
|
| 63 |
+
|
| 64 |
+
Supported log levels are as follows:
|
| 65 |
+
|
| 66 |
+
- `"FATAL"` (least verbose)
|
| 67 |
+
- `"ERROR"`
|
| 68 |
+
- `"WARNING"`
|
| 69 |
+
- `"INFO"`
|
| 70 |
+
- `"DEBUG"` (most verbose)
|
| 71 |
+
|
| 72 |
+
Args:
|
| 73 |
+
level: A string corresponding to the level of verbosity for logging.
|
| 74 |
+
"""
|
| 75 |
+
valid_levels = {
|
| 76 |
+
"FATAL": logging.FATAL,
|
| 77 |
+
"ERROR": logging.ERROR,
|
| 78 |
+
"WARNING": logging.WARNING,
|
| 79 |
+
"INFO": logging.INFO,
|
| 80 |
+
"DEBUG": logging.DEBUG,
|
| 81 |
+
}
|
| 82 |
+
verbosity = valid_levels.get(level)
|
| 83 |
+
if verbosity is None:
|
| 84 |
+
raise ValueError(
|
| 85 |
+
"Please pass a valid level for logging verbosity. "
|
| 86 |
+
f"Expected one of: {set(valid_levels.keys())}. "
|
| 87 |
+
f"Received: {level}"
|
| 88 |
+
)
|
| 89 |
+
logging.set_verbosity(verbosity)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def print_msg(message, line_break=True):
|
| 93 |
+
"""Print the message to absl logging or stdout."""
|
| 94 |
+
message = str(message)
|
| 95 |
+
if is_interactive_logging_enabled():
|
| 96 |
+
message = message + "\n" if line_break else message
|
| 97 |
+
try:
|
| 98 |
+
sys.stdout.write(message)
|
| 99 |
+
except UnicodeEncodeError:
|
| 100 |
+
# If the encoding differs from UTF-8, `sys.stdout.write` may fail.
|
| 101 |
+
# To address this, replace special unicode characters in the
|
| 102 |
+
# message, and then encode and decode using the target encoding.
|
| 103 |
+
message = _replace_special_unicode_character(message)
|
| 104 |
+
message_bytes = message.encode(sys.stdout.encoding, errors="ignore")
|
| 105 |
+
message = message_bytes.decode(sys.stdout.encoding)
|
| 106 |
+
sys.stdout.write(message)
|
| 107 |
+
sys.stdout.flush()
|
| 108 |
+
else:
|
| 109 |
+
logging.info(message)
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def ask_to_proceed_with_overwrite(filepath):
|
| 113 |
+
"""Produces a prompt asking about overwriting a file.
|
| 114 |
+
|
| 115 |
+
Args:
|
| 116 |
+
filepath: the path to the file to be overwritten.
|
| 117 |
+
|
| 118 |
+
Returns:
|
| 119 |
+
True if we can proceed with overwrite, False otherwise.
|
| 120 |
+
"""
|
| 121 |
+
overwrite = (
|
| 122 |
+
input(f"[WARNING] {filepath} already exists - overwrite? [y/n]")
|
| 123 |
+
.strip()
|
| 124 |
+
.lower()
|
| 125 |
+
)
|
| 126 |
+
while overwrite not in ("y", "n"):
|
| 127 |
+
overwrite = (
|
| 128 |
+
input('Enter "y" (overwrite) or "n" (cancel).').strip().lower()
|
| 129 |
+
)
|
| 130 |
+
if overwrite == "n":
|
| 131 |
+
return False
|
| 132 |
+
print_msg("[TIP] Next time specify overwrite=True!")
|
| 133 |
+
return True
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def _replace_special_unicode_character(message):
|
| 137 |
+
message = str(message).replace("━", "=") # Fall back to Keras2 behavior.
|
| 138 |
+
return message
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/jax_layer.py
ADDED
|
@@ -0,0 +1,677 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from keras.src import backend
|
| 6 |
+
from keras.src import tree
|
| 7 |
+
from keras.src.api_export import keras_export
|
| 8 |
+
from keras.src.layers.layer import Layer
|
| 9 |
+
from keras.src.saving import serialization_lib
|
| 10 |
+
from keras.src.utils import jax_utils
|
| 11 |
+
from keras.src.utils import tracking
|
| 12 |
+
from keras.src.utils.module_utils import jax
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@keras_export("keras.layers.JaxLayer")
|
| 16 |
+
class JaxLayer(Layer):
|
| 17 |
+
"""Keras Layer that wraps a JAX model.
|
| 18 |
+
|
| 19 |
+
This layer enables the use of JAX components within Keras when using JAX as
|
| 20 |
+
the backend for Keras.
|
| 21 |
+
|
| 22 |
+
## Model function
|
| 23 |
+
|
| 24 |
+
This layer accepts JAX models in the form of a function, `call_fn`, which
|
| 25 |
+
must take the following arguments with these exact names:
|
| 26 |
+
|
| 27 |
+
- `params`: trainable parameters of the model.
|
| 28 |
+
- `state` (*optional*): non-trainable state of the model. Can be omitted if
|
| 29 |
+
the model has no non-trainable state.
|
| 30 |
+
- `rng` (*optional*): a `jax.random.PRNGKey` instance. Can be omitted if the
|
| 31 |
+
model does not need RNGs, neither during training nor during inference.
|
| 32 |
+
- `inputs`: inputs to the model, a JAX array or a `PyTree` of arrays.
|
| 33 |
+
- `training` (*optional*): an argument specifying if we're in training mode
|
| 34 |
+
or inference mode, `True` is passed in training mode. Can be omitted if
|
| 35 |
+
the model behaves the same in training mode and inference mode.
|
| 36 |
+
|
| 37 |
+
The `inputs` argument is mandatory. Inputs to the model must be provided via
|
| 38 |
+
a single argument. If the JAX model takes multiple inputs as separate
|
| 39 |
+
arguments, they must be combined into a single structure, for instance in a
|
| 40 |
+
`tuple` or a `dict`.
|
| 41 |
+
|
| 42 |
+
## Model weights initialization
|
| 43 |
+
|
| 44 |
+
The initialization of the `params` and `state` of the model can be handled
|
| 45 |
+
by this layer, in which case the `init_fn` argument must be provided. This
|
| 46 |
+
allows the model to be initialized dynamically with the right shape.
|
| 47 |
+
Alternatively, and if the shape is known, the `params` argument and
|
| 48 |
+
optionally the `state` argument can be used to create an already initialized
|
| 49 |
+
model.
|
| 50 |
+
|
| 51 |
+
The `init_fn` function, if provided, must take the following arguments with
|
| 52 |
+
these exact names:
|
| 53 |
+
|
| 54 |
+
- `rng`: a `jax.random.PRNGKey` instance.
|
| 55 |
+
- `inputs`: a JAX array or a `PyTree` of arrays with placeholder values to
|
| 56 |
+
provide the shape of the inputs.
|
| 57 |
+
- `training` (*optional*): an argument specifying if we're in training mode
|
| 58 |
+
or inference mode. `True` is always passed to `init_fn`. Can be omitted
|
| 59 |
+
regardless of whether `call_fn` has a `training` argument.
|
| 60 |
+
|
| 61 |
+
## Models with non-trainable state
|
| 62 |
+
|
| 63 |
+
For JAX models that have non-trainable state:
|
| 64 |
+
|
| 65 |
+
- `call_fn` must have a `state` argument
|
| 66 |
+
- `call_fn` must return a `tuple` containing the outputs of the model and
|
| 67 |
+
the new non-trainable state of the model
|
| 68 |
+
- `init_fn` must return a `tuple` containing the initial trainable params of
|
| 69 |
+
the model and the initial non-trainable state of the model.
|
| 70 |
+
|
| 71 |
+
This code shows a possible combination of `call_fn` and `init_fn` signatures
|
| 72 |
+
for a model with non-trainable state. In this example, the model has a
|
| 73 |
+
`training` argument and an `rng` argument in `call_fn`.
|
| 74 |
+
|
| 75 |
+
```python
|
| 76 |
+
def stateful_call(params, state, rng, inputs, training):
|
| 77 |
+
outputs = ...
|
| 78 |
+
new_state = ...
|
| 79 |
+
return outputs, new_state
|
| 80 |
+
|
| 81 |
+
def stateful_init(rng, inputs):
|
| 82 |
+
initial_params = ...
|
| 83 |
+
initial_state = ...
|
| 84 |
+
return initial_params, initial_state
|
| 85 |
+
```
|
| 86 |
+
|
| 87 |
+
## Models without non-trainable state
|
| 88 |
+
|
| 89 |
+
For JAX models with no non-trainable state:
|
| 90 |
+
|
| 91 |
+
- `call_fn` must not have a `state` argument
|
| 92 |
+
- `call_fn` must return only the outputs of the model
|
| 93 |
+
- `init_fn` must return only the initial trainable params of the model.
|
| 94 |
+
|
| 95 |
+
This code shows a possible combination of `call_fn` and `init_fn` signatures
|
| 96 |
+
for a model without non-trainable state. In this example, the model does not
|
| 97 |
+
have a `training` argument and does not have an `rng` argument in `call_fn`.
|
| 98 |
+
|
| 99 |
+
```python
|
| 100 |
+
def stateless_call(params, inputs):
|
| 101 |
+
outputs = ...
|
| 102 |
+
return outputs
|
| 103 |
+
|
| 104 |
+
def stateless_init(rng, inputs):
|
| 105 |
+
initial_params = ...
|
| 106 |
+
return initial_params
|
| 107 |
+
```
|
| 108 |
+
|
| 109 |
+
## Conforming to the required signature
|
| 110 |
+
|
| 111 |
+
If a model has a different signature than the one required by `JaxLayer`,
|
| 112 |
+
one can easily write a wrapper method to adapt the arguments. This example
|
| 113 |
+
shows a model that has multiple inputs as separate arguments, expects
|
| 114 |
+
multiple RNGs in a `dict`, and has a `deterministic` argument with the
|
| 115 |
+
opposite meaning of `training`. To conform, the inputs are combined in a
|
| 116 |
+
single structure using a `tuple`, the RNG is split and used the populate the
|
| 117 |
+
expected `dict`, and the Boolean flag is negated:
|
| 118 |
+
|
| 119 |
+
```python
|
| 120 |
+
def my_model_fn(params, rngs, input1, input2, deterministic):
|
| 121 |
+
...
|
| 122 |
+
if not deterministic:
|
| 123 |
+
dropout_rng = rngs["dropout"]
|
| 124 |
+
keep = jax.random.bernoulli(dropout_rng, dropout_rate, x.shape)
|
| 125 |
+
x = jax.numpy.where(keep, x / dropout_rate, 0)
|
| 126 |
+
...
|
| 127 |
+
...
|
| 128 |
+
return outputs
|
| 129 |
+
|
| 130 |
+
def my_model_wrapper_fn(params, rng, inputs, training):
|
| 131 |
+
input1, input2 = inputs
|
| 132 |
+
rng1, rng2 = jax.random.split(rng)
|
| 133 |
+
rngs = {"dropout": rng1, "preprocessing": rng2}
|
| 134 |
+
deterministic = not training
|
| 135 |
+
return my_model_fn(params, rngs, input1, input2, deterministic)
|
| 136 |
+
|
| 137 |
+
keras_layer = JaxLayer(my_model_wrapper_fn, params=initial_params)
|
| 138 |
+
```
|
| 139 |
+
|
| 140 |
+
## Usage with Haiku modules
|
| 141 |
+
|
| 142 |
+
`JaxLayer` enables the use of [Haiku](https://dm-haiku.readthedocs.io)
|
| 143 |
+
components in the form of
|
| 144 |
+
[`haiku.Module`](https://dm-haiku.readthedocs.io/en/latest/api.html#module).
|
| 145 |
+
This is achieved by transforming the module per the Haiku pattern and then
|
| 146 |
+
passing `module.apply` in the `call_fn` parameter and `module.init` in the
|
| 147 |
+
`init_fn` parameter if needed.
|
| 148 |
+
|
| 149 |
+
If the model has non-trainable state, it should be transformed with
|
| 150 |
+
[`haiku.transform_with_state`](
|
| 151 |
+
https://dm-haiku.readthedocs.io/en/latest/api.html#haiku.transform_with_state).
|
| 152 |
+
If the model has no non-trainable state, it should be transformed with
|
| 153 |
+
[`haiku.transform`](
|
| 154 |
+
https://dm-haiku.readthedocs.io/en/latest/api.html#haiku.transform).
|
| 155 |
+
Additionally, and optionally, if the module does not use RNGs in "apply", it
|
| 156 |
+
can be transformed with
|
| 157 |
+
[`haiku.without_apply_rng`](
|
| 158 |
+
https://dm-haiku.readthedocs.io/en/latest/api.html#without-apply-rng).
|
| 159 |
+
|
| 160 |
+
The following example shows how to create a `JaxLayer` from a Haiku module
|
| 161 |
+
that uses random number generators via `hk.next_rng_key()` and takes a
|
| 162 |
+
training positional argument:
|
| 163 |
+
|
| 164 |
+
```python
|
| 165 |
+
class MyHaikuModule(hk.Module):
|
| 166 |
+
def __call__(self, x, training):
|
| 167 |
+
x = hk.Conv2D(32, (3, 3))(x)
|
| 168 |
+
x = jax.nn.relu(x)
|
| 169 |
+
x = hk.AvgPool((1, 2, 2, 1), (1, 2, 2, 1), "VALID")(x)
|
| 170 |
+
x = hk.Flatten()(x)
|
| 171 |
+
x = hk.Linear(200)(x)
|
| 172 |
+
if training:
|
| 173 |
+
x = hk.dropout(rng=hk.next_rng_key(), rate=0.3, x=x)
|
| 174 |
+
x = jax.nn.relu(x)
|
| 175 |
+
x = hk.Linear(10)(x)
|
| 176 |
+
x = jax.nn.softmax(x)
|
| 177 |
+
return x
|
| 178 |
+
|
| 179 |
+
def my_haiku_module_fn(inputs, training):
|
| 180 |
+
module = MyHaikuModule()
|
| 181 |
+
return module(inputs, training)
|
| 182 |
+
|
| 183 |
+
transformed_module = hk.transform(my_haiku_module_fn)
|
| 184 |
+
|
| 185 |
+
keras_layer = JaxLayer(
|
| 186 |
+
call_fn=transformed_module.apply,
|
| 187 |
+
init_fn=transformed_module.init,
|
| 188 |
+
)
|
| 189 |
+
```
|
| 190 |
+
|
| 191 |
+
Args:
|
| 192 |
+
call_fn: The function to call the model. See description above for the
|
| 193 |
+
list of arguments it takes and the outputs it returns.
|
| 194 |
+
init_fn: the function to call to initialize the model. See description
|
| 195 |
+
above for the list of arguments it takes and the outputs it returns.
|
| 196 |
+
If `None`, then `params` and/or `state` must be provided.
|
| 197 |
+
params: A `PyTree` containing all the model trainable parameters. This
|
| 198 |
+
allows passing trained parameters or controlling the initialization.
|
| 199 |
+
If both `params` and `state` are `None`, `init_fn` is called at
|
| 200 |
+
build time to initialize the trainable parameters of the model.
|
| 201 |
+
state: A `PyTree` containing all the model non-trainable state. This
|
| 202 |
+
allows passing learned state or controlling the initialization. If
|
| 203 |
+
both `params` and `state` are `None`, and `call_fn` takes a `state`
|
| 204 |
+
argument, then `init_fn` is called at build time to initialize the
|
| 205 |
+
non-trainable state of the model.
|
| 206 |
+
seed: Seed for random number generator. Optional.
|
| 207 |
+
"""
|
| 208 |
+
|
| 209 |
+
def __init__(
|
| 210 |
+
self,
|
| 211 |
+
call_fn,
|
| 212 |
+
init_fn=None,
|
| 213 |
+
params=None,
|
| 214 |
+
state=None,
|
| 215 |
+
seed=None,
|
| 216 |
+
**kwargs,
|
| 217 |
+
):
|
| 218 |
+
if backend.backend() != "jax":
|
| 219 |
+
raise ValueError(
|
| 220 |
+
"JaxLayer is only supported with the JAX backend. Current "
|
| 221 |
+
f"backend: {backend.backend()}"
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
if init_fn is None and params is None and state is None:
|
| 225 |
+
raise ValueError(
|
| 226 |
+
"`init_fn`, `params` and `state` cannot all be `None`."
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
super().__init__(**kwargs)
|
| 230 |
+
self.call_fn = call_fn
|
| 231 |
+
self.init_fn = init_fn
|
| 232 |
+
self.seed_generator = backend.random.SeedGenerator(seed)
|
| 233 |
+
self.tracked_params = self._create_variables(params, trainable=True)
|
| 234 |
+
self.tracked_state = self._create_variables(state, trainable=False)
|
| 235 |
+
if self.params is not None or self.state is not None:
|
| 236 |
+
self.built = True
|
| 237 |
+
|
| 238 |
+
self.call_fn_arguments = self._validate_signature(
|
| 239 |
+
call_fn,
|
| 240 |
+
"call_fn",
|
| 241 |
+
{"params", "state", "rng", "inputs", "training"},
|
| 242 |
+
{"inputs"},
|
| 243 |
+
)
|
| 244 |
+
self.has_state = "state" in self.call_fn_arguments
|
| 245 |
+
|
| 246 |
+
if init_fn:
|
| 247 |
+
self.init_fn_arguments = self._validate_signature(
|
| 248 |
+
init_fn, "init_fn", {"rng", "inputs", "training"}, {"inputs"}
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
def _validate_signature(self, fn, fn_name, allowed, required):
|
| 252 |
+
fn_parameters = inspect.signature(fn).parameters
|
| 253 |
+
for parameter_name in required:
|
| 254 |
+
if parameter_name not in fn_parameters:
|
| 255 |
+
raise ValueError(
|
| 256 |
+
f"Missing required argument in `{fn_name}`: "
|
| 257 |
+
f"`{parameter_name}`"
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
parameter_names = []
|
| 261 |
+
for parameter in fn_parameters.values():
|
| 262 |
+
if parameter.name not in allowed:
|
| 263 |
+
raise ValueError(
|
| 264 |
+
f"Unsupported argument in `{fn_name}`: `{parameter.name}`, "
|
| 265 |
+
f"supported arguments are `{'`, `'.join(allowed)}`"
|
| 266 |
+
)
|
| 267 |
+
parameter_names.append(parameter.name)
|
| 268 |
+
|
| 269 |
+
return parameter_names
|
| 270 |
+
|
| 271 |
+
@tracking.no_automatic_dependency_tracking
|
| 272 |
+
def _create_variables(self, values, trainable):
|
| 273 |
+
"""Create a structure of variables from a structure of JAX arrays.
|
| 274 |
+
|
| 275 |
+
`values` is traversed via JAX's `tree_map`. When a leaf is a JAX array
|
| 276 |
+
or a tensor-like object, a corresponding variable is created with it as
|
| 277 |
+
the initial value. The resulting structure of variables is assigned to
|
| 278 |
+
`self.params` or `self.state` depending on `trainable`. Then, a
|
| 279 |
+
flattened version of the variables is returned for tracking.
|
| 280 |
+
`self.params` or `self.state` are intentionally not tracked because
|
| 281 |
+
structures like `TrackedList` interfere with `jax.tree_utils`.
|
| 282 |
+
Note that leaf objects that are not JAX arrays and not tensor-like are
|
| 283 |
+
left intact as they are assumed to be configuration used by the model.
|
| 284 |
+
|
| 285 |
+
Args:
|
| 286 |
+
values: the structure of values to traverse.
|
| 287 |
+
trainable: whether to create trainable variables.
|
| 288 |
+
|
| 289 |
+
Returns:
|
| 290 |
+
flat list of variables initialized with `values` for tracking.
|
| 291 |
+
"""
|
| 292 |
+
|
| 293 |
+
def create_variable(value):
|
| 294 |
+
if backend.is_tensor(value) or isinstance(value, np.ndarray):
|
| 295 |
+
variable = self.add_weight(
|
| 296 |
+
value.shape, initializer="zeros", trainable=trainable
|
| 297 |
+
)
|
| 298 |
+
variable.assign(value)
|
| 299 |
+
return variable
|
| 300 |
+
elif isinstance(value, (np.generic, int, float)):
|
| 301 |
+
variable = self.add_weight(
|
| 302 |
+
(), initializer="zeros", trainable=trainable
|
| 303 |
+
)
|
| 304 |
+
variable.assign(value)
|
| 305 |
+
return variable
|
| 306 |
+
else:
|
| 307 |
+
return value
|
| 308 |
+
|
| 309 |
+
# Use JAX's tree_map as it understands registered classes.
|
| 310 |
+
variables = jax.tree_util.tree_map(create_variable, values)
|
| 311 |
+
|
| 312 |
+
if trainable:
|
| 313 |
+
self.params = variables
|
| 314 |
+
else:
|
| 315 |
+
self.state = variables
|
| 316 |
+
|
| 317 |
+
flat_variables, _ = jax.tree_util.tree_flatten(variables)
|
| 318 |
+
return flat_variables
|
| 319 |
+
|
| 320 |
+
def _get_init_rng(self):
|
| 321 |
+
"""
|
| 322 |
+
Returns a JAX `PRNGKey` or structure of `PRNGKey`s to pass to `init_fn`.
|
| 323 |
+
|
| 324 |
+
By default, this returns a single `PRNGKey` retrieved by calling
|
| 325 |
+
`self.seed_generator.next()`. Override this to return a different
|
| 326 |
+
structure.
|
| 327 |
+
|
| 328 |
+
Returns:
|
| 329 |
+
a JAX `PRNGKey` or structure of `PRNGKey`s that will be passed as
|
| 330 |
+
the `rng` argument of `init_fn`.
|
| 331 |
+
"""
|
| 332 |
+
return self.seed_generator.next()
|
| 333 |
+
|
| 334 |
+
def _get_call_rng(self, training):
|
| 335 |
+
"""
|
| 336 |
+
Returns a JAX `PRNGKey` or structure of `PRNGKey`s to pass to `call_fn`.
|
| 337 |
+
|
| 338 |
+
By default, this returns a single `PRNGKey` retrieved by calling
|
| 339 |
+
`self.seed_generator.next()` when `training` is `True`, and `None` when
|
| 340 |
+
`training` is `False`. Override this to return a different structure or
|
| 341 |
+
to pass RNGs in inference mode too.
|
| 342 |
+
|
| 343 |
+
Returns:
|
| 344 |
+
a JAX `PRNGKey` or structure of `PRNGKey`s that will be passed as
|
| 345 |
+
the `rng` argument of `call_fn`.
|
| 346 |
+
"""
|
| 347 |
+
if training:
|
| 348 |
+
return self.seed_generator.next()
|
| 349 |
+
else:
|
| 350 |
+
return None
|
| 351 |
+
|
| 352 |
+
def build(self, input_shape):
|
| 353 |
+
if self.params is not None or self.state is not None:
|
| 354 |
+
return
|
| 355 |
+
|
| 356 |
+
if jax_utils.is_in_jax_tracing_scope():
|
| 357 |
+
# This exception is not actually shown, it is caught and a detailed
|
| 358 |
+
# warning about calling 'build' is printed.
|
| 359 |
+
raise ValueError("'JaxLayer' cannot be built in tracing scope")
|
| 360 |
+
|
| 361 |
+
# Initialize `params` and `state` if needed by calling `init_fn`.
|
| 362 |
+
def create_input(shape):
|
| 363 |
+
shape = [d if d is not None else 1 for d in shape]
|
| 364 |
+
return jax.numpy.ones(shape)
|
| 365 |
+
|
| 366 |
+
init_inputs = tree.map_shape_structure(create_input, input_shape)
|
| 367 |
+
init_args = []
|
| 368 |
+
for argument_name in self.init_fn_arguments:
|
| 369 |
+
if argument_name == "rng":
|
| 370 |
+
init_args.append(self._get_init_rng())
|
| 371 |
+
elif argument_name == "inputs":
|
| 372 |
+
init_args.append(init_inputs)
|
| 373 |
+
elif argument_name == "training":
|
| 374 |
+
init_args.append(True)
|
| 375 |
+
|
| 376 |
+
init_result = self.init_fn(*init_args)
|
| 377 |
+
if self.has_state:
|
| 378 |
+
init_params, init_state = init_result
|
| 379 |
+
else:
|
| 380 |
+
init_params, init_state = init_result, None
|
| 381 |
+
|
| 382 |
+
self.tracked_params = self._create_variables(
|
| 383 |
+
init_params, trainable=True
|
| 384 |
+
)
|
| 385 |
+
self.tracked_state = self._create_variables(init_state, trainable=False)
|
| 386 |
+
self.built = True
|
| 387 |
+
|
| 388 |
+
def call(self, inputs, training=False):
|
| 389 |
+
def unwrap_variable(variable):
|
| 390 |
+
return None if variable is None else variable.value
|
| 391 |
+
|
| 392 |
+
call_args = []
|
| 393 |
+
for argument_name in self.call_fn_arguments:
|
| 394 |
+
if argument_name == "params":
|
| 395 |
+
call_args.append(
|
| 396 |
+
jax.tree_util.tree_map(unwrap_variable, self.params)
|
| 397 |
+
)
|
| 398 |
+
elif argument_name == "state":
|
| 399 |
+
call_args.append(
|
| 400 |
+
jax.tree_util.tree_map(unwrap_variable, self.state)
|
| 401 |
+
)
|
| 402 |
+
elif argument_name == "rng":
|
| 403 |
+
call_args.append(self._get_call_rng(training))
|
| 404 |
+
elif argument_name == "inputs":
|
| 405 |
+
call_args.append(inputs)
|
| 406 |
+
elif argument_name == "training":
|
| 407 |
+
call_args.append(training)
|
| 408 |
+
|
| 409 |
+
def assign_state_to_variable(value, variable):
|
| 410 |
+
# This exists only to make debugging this error case easier.
|
| 411 |
+
if not hasattr(variable, "assign"):
|
| 412 |
+
raise ValueError(
|
| 413 |
+
"Structure mismatch: the structure of the state returned "
|
| 414 |
+
"by `call` does not match the structure of the state at "
|
| 415 |
+
"initialization time."
|
| 416 |
+
)
|
| 417 |
+
variable.assign(value)
|
| 418 |
+
|
| 419 |
+
if self.has_state:
|
| 420 |
+
predictions, new_state = self.call_fn(*call_args)
|
| 421 |
+
jax.tree_util.tree_map(
|
| 422 |
+
assign_state_to_variable, new_state, self.state
|
| 423 |
+
)
|
| 424 |
+
return predictions
|
| 425 |
+
else:
|
| 426 |
+
return self.call_fn(*call_args)
|
| 427 |
+
|
| 428 |
+
def get_config(self):
|
| 429 |
+
config = {
|
| 430 |
+
"call_fn": serialization_lib.serialize_keras_object(self.call_fn),
|
| 431 |
+
"init_fn": serialization_lib.serialize_keras_object(self.init_fn),
|
| 432 |
+
}
|
| 433 |
+
base_config = super().get_config()
|
| 434 |
+
return dict(list(base_config.items()) + list(config.items()))
|
| 435 |
+
|
| 436 |
+
@classmethod
|
| 437 |
+
def from_config(cls, config):
|
| 438 |
+
call_fn = serialization_lib.deserialize_keras_object(config["call_fn"])
|
| 439 |
+
init_fn = serialization_lib.deserialize_keras_object(config["init_fn"])
|
| 440 |
+
config["call_fn"] = call_fn
|
| 441 |
+
config["init_fn"] = init_fn
|
| 442 |
+
return super().from_config(config)
|
| 443 |
+
|
| 444 |
+
|
| 445 |
+
@keras_export("keras.layers.FlaxLayer")
|
| 446 |
+
class FlaxLayer(JaxLayer):
|
| 447 |
+
"""Keras Layer that wraps a [Flax](https://flax.readthedocs.io) module.
|
| 448 |
+
|
| 449 |
+
This layer enables the use of Flax components in the form of
|
| 450 |
+
[`flax.linen.Module`](
|
| 451 |
+
https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html)
|
| 452 |
+
instances within Keras when using JAX as the backend for Keras.
|
| 453 |
+
|
| 454 |
+
The module method to use for the forward pass can be specified via the
|
| 455 |
+
`method` argument and is `__call__` by default. This method must take the
|
| 456 |
+
following arguments with these exact names:
|
| 457 |
+
|
| 458 |
+
- `self` if the method is bound to the module, which is the case for the
|
| 459 |
+
default of `__call__`, and `module` otherwise to pass the module.
|
| 460 |
+
- `inputs`: the inputs to the model, a JAX array or a `PyTree` of arrays.
|
| 461 |
+
- `training` *(optional)*: an argument specifying if we're in training mode
|
| 462 |
+
or inference mode, `True` is passed in training mode.
|
| 463 |
+
|
| 464 |
+
`FlaxLayer` handles the non-trainable state of your model and required RNGs
|
| 465 |
+
automatically. Note that the `mutable` parameter of
|
| 466 |
+
[`flax.linen.Module.apply()`](
|
| 467 |
+
https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html#flax.linen.apply)
|
| 468 |
+
is set to `DenyList(["params"])`, therefore making the assumption that all
|
| 469 |
+
the variables outside of the "params" collection are non-trainable weights.
|
| 470 |
+
|
| 471 |
+
This example shows how to create a `FlaxLayer` from a Flax `Module` with
|
| 472 |
+
the default `__call__` method and no training argument:
|
| 473 |
+
|
| 474 |
+
```python
|
| 475 |
+
class MyFlaxModule(flax.linen.Module):
|
| 476 |
+
@flax.linen.compact
|
| 477 |
+
def __call__(self, inputs):
|
| 478 |
+
x = inputs
|
| 479 |
+
x = flax.linen.Conv(features=32, kernel_size=(3, 3))(x)
|
| 480 |
+
x = flax.linen.relu(x)
|
| 481 |
+
x = flax.linen.avg_pool(x, window_shape=(2, 2), strides=(2, 2))
|
| 482 |
+
x = x.reshape((x.shape[0], -1)) # flatten
|
| 483 |
+
x = flax.linen.Dense(features=200)(x)
|
| 484 |
+
x = flax.linen.relu(x)
|
| 485 |
+
x = flax.linen.Dense(features=10)(x)
|
| 486 |
+
x = flax.linen.softmax(x)
|
| 487 |
+
return x
|
| 488 |
+
|
| 489 |
+
flax_module = MyFlaxModule()
|
| 490 |
+
keras_layer = FlaxLayer(flax_module)
|
| 491 |
+
```
|
| 492 |
+
|
| 493 |
+
This example shows how to wrap the module method to conform to the required
|
| 494 |
+
signature. This allows having multiple input arguments and a training
|
| 495 |
+
argument that has a different name and values. This additionally shows how
|
| 496 |
+
to use a function that is not bound to the module.
|
| 497 |
+
|
| 498 |
+
```python
|
| 499 |
+
class MyFlaxModule(flax.linen.Module):
|
| 500 |
+
@flax.linen.compact
|
| 501 |
+
def forward(self, input1, input2, deterministic):
|
| 502 |
+
...
|
| 503 |
+
return outputs
|
| 504 |
+
|
| 505 |
+
def my_flax_module_wrapper(module, inputs, training):
|
| 506 |
+
input1, input2 = inputs
|
| 507 |
+
return module.forward(input1, input2, not training)
|
| 508 |
+
|
| 509 |
+
flax_module = MyFlaxModule()
|
| 510 |
+
keras_layer = FlaxLayer(
|
| 511 |
+
module=flax_module,
|
| 512 |
+
method=my_flax_module_wrapper,
|
| 513 |
+
)
|
| 514 |
+
```
|
| 515 |
+
|
| 516 |
+
Args:
|
| 517 |
+
module: An instance of `flax.linen.Module` or subclass.
|
| 518 |
+
method: The method to call the model. This is generally a method in the
|
| 519 |
+
`Module`. If not provided, the `__call__` method is used. `method`
|
| 520 |
+
can also be a function not defined in the `Module`, in which case it
|
| 521 |
+
must take the `Module` as the first argument. It is used for both
|
| 522 |
+
`Module.init` and `Module.apply`. Details are documented in the
|
| 523 |
+
`method` argument of [`flax.linen.Module.apply()`](
|
| 524 |
+
https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html#flax.linen.apply).
|
| 525 |
+
variables: A `dict` containing all the variables of the module in the
|
| 526 |
+
same format as what is returned by [`flax.linen.Module.init()`](
|
| 527 |
+
https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html#flax.linen.init).
|
| 528 |
+
It should contain a "params" key and, if applicable, other keys for
|
| 529 |
+
collections of variables for non-trainable state. This allows
|
| 530 |
+
passing trained parameters and learned non-trainable state or
|
| 531 |
+
controlling the initialization. If `None` is passed, the module's
|
| 532 |
+
`init` function is called at build time to initialize the variables
|
| 533 |
+
of the model.
|
| 534 |
+
"""
|
| 535 |
+
|
| 536 |
+
def __init__(
|
| 537 |
+
self,
|
| 538 |
+
module,
|
| 539 |
+
method=None,
|
| 540 |
+
variables=None,
|
| 541 |
+
**kwargs,
|
| 542 |
+
):
|
| 543 |
+
# Late import to only require Flax when this is used.
|
| 544 |
+
from flax.core import scope as flax_scope
|
| 545 |
+
|
| 546 |
+
if backend.backend() != "jax":
|
| 547 |
+
raise ValueError(
|
| 548 |
+
"FlaxLayer is only supported with the JAX backend. Current "
|
| 549 |
+
f"backend: {backend.backend()}"
|
| 550 |
+
)
|
| 551 |
+
|
| 552 |
+
self.module = module
|
| 553 |
+
self.method = method
|
| 554 |
+
|
| 555 |
+
apply_mutable = flax_scope.DenyList(["params"])
|
| 556 |
+
|
| 557 |
+
def apply_with_training(params, state, rng, inputs, training):
|
| 558 |
+
return self.module.apply(
|
| 559 |
+
self._params_and_state_to_variables(params, state),
|
| 560 |
+
inputs,
|
| 561 |
+
rngs=rng,
|
| 562 |
+
method=self.method,
|
| 563 |
+
mutable=apply_mutable,
|
| 564 |
+
training=training,
|
| 565 |
+
)
|
| 566 |
+
|
| 567 |
+
def apply_without_training(params, state, rng, inputs):
|
| 568 |
+
return self.module.apply(
|
| 569 |
+
self._params_and_state_to_variables(params, state),
|
| 570 |
+
inputs,
|
| 571 |
+
rngs=rng,
|
| 572 |
+
method=self.method,
|
| 573 |
+
mutable=apply_mutable,
|
| 574 |
+
)
|
| 575 |
+
|
| 576 |
+
def init_with_training(rng, inputs, training):
|
| 577 |
+
return self._variables_to_params_and_state(
|
| 578 |
+
self.module.init(
|
| 579 |
+
rng,
|
| 580 |
+
inputs,
|
| 581 |
+
method=self.method,
|
| 582 |
+
training=training,
|
| 583 |
+
)
|
| 584 |
+
)
|
| 585 |
+
|
| 586 |
+
def init_without_training(rng, inputs):
|
| 587 |
+
return self._variables_to_params_and_state(
|
| 588 |
+
self.module.init(
|
| 589 |
+
rng,
|
| 590 |
+
inputs,
|
| 591 |
+
method=self.method,
|
| 592 |
+
)
|
| 593 |
+
)
|
| 594 |
+
|
| 595 |
+
if (
|
| 596 |
+
"training"
|
| 597 |
+
in inspect.signature(method or module.__call__).parameters
|
| 598 |
+
):
|
| 599 |
+
call_fn, init_fn = apply_with_training, init_with_training
|
| 600 |
+
else:
|
| 601 |
+
call_fn, init_fn = apply_without_training, init_without_training
|
| 602 |
+
|
| 603 |
+
params, state = self._variables_to_params_and_state(variables)
|
| 604 |
+
|
| 605 |
+
super().__init__(
|
| 606 |
+
call_fn=call_fn,
|
| 607 |
+
init_fn=init_fn,
|
| 608 |
+
params=params,
|
| 609 |
+
state=state,
|
| 610 |
+
**kwargs,
|
| 611 |
+
)
|
| 612 |
+
|
| 613 |
+
def _params_and_state_to_variables(self, params, state):
|
| 614 |
+
if params:
|
| 615 |
+
if state:
|
| 616 |
+
return {**params, **state}
|
| 617 |
+
else:
|
| 618 |
+
return params
|
| 619 |
+
elif state:
|
| 620 |
+
return state
|
| 621 |
+
return {}
|
| 622 |
+
|
| 623 |
+
def _variables_to_params_and_state(self, variables):
|
| 624 |
+
# neither params nor state
|
| 625 |
+
if variables is None:
|
| 626 |
+
return None, None
|
| 627 |
+
# state only
|
| 628 |
+
if "params" not in variables:
|
| 629 |
+
return {}, variables
|
| 630 |
+
# params only
|
| 631 |
+
if len(variables) == 1:
|
| 632 |
+
return variables, {}
|
| 633 |
+
# both, we need to split
|
| 634 |
+
params = {"params": variables["params"]}
|
| 635 |
+
state = {k: v for k, v in variables.items() if k != "params"}
|
| 636 |
+
return params, state
|
| 637 |
+
|
| 638 |
+
def _get_init_rng(self):
|
| 639 |
+
return {
|
| 640 |
+
"params": self.seed_generator.next(),
|
| 641 |
+
"dropout": self.seed_generator.next(),
|
| 642 |
+
}
|
| 643 |
+
|
| 644 |
+
def _get_call_rng(self, training):
|
| 645 |
+
if training:
|
| 646 |
+
return {"dropout": self.seed_generator.next()}
|
| 647 |
+
else:
|
| 648 |
+
return {}
|
| 649 |
+
|
| 650 |
+
def get_config(self):
|
| 651 |
+
config_method = self.method
|
| 652 |
+
if (
|
| 653 |
+
hasattr(self.method, "__self__")
|
| 654 |
+
and self.method.__self__ == self.module
|
| 655 |
+
):
|
| 656 |
+
# A method bound to the module is serialized by name.
|
| 657 |
+
config_method = self.method.__name__
|
| 658 |
+
config = {
|
| 659 |
+
"module": serialization_lib.serialize_keras_object(self.module),
|
| 660 |
+
"method": serialization_lib.serialize_keras_object(config_method),
|
| 661 |
+
}
|
| 662 |
+
base_config = super().get_config()
|
| 663 |
+
# call_fn and init_fn come from module, do not save them.
|
| 664 |
+
base_config.pop("call_fn")
|
| 665 |
+
base_config.pop("init_fn")
|
| 666 |
+
return dict(list(base_config.items()) + list(config.items()))
|
| 667 |
+
|
| 668 |
+
@classmethod
|
| 669 |
+
def from_config(cls, config):
|
| 670 |
+
module = serialization_lib.deserialize_keras_object(config["module"])
|
| 671 |
+
method = serialization_lib.deserialize_keras_object(config["method"])
|
| 672 |
+
if isinstance(config["method"], str):
|
| 673 |
+
# Deserialize bound method from the module.
|
| 674 |
+
method = getattr(module, method)
|
| 675 |
+
config["module"] = module
|
| 676 |
+
config["method"] = method
|
| 677 |
+
return cls(**config)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/jax_utils.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def is_in_jax_tracing_scope(x=None):
|
| 5 |
+
if backend.backend() == "jax":
|
| 6 |
+
if x is None:
|
| 7 |
+
x = backend.numpy.ones(())
|
| 8 |
+
for c in x.__class__.__mro__:
|
| 9 |
+
if c.__name__ == "Tracer" and c.__module__.startswith("jax"):
|
| 10 |
+
return True
|
| 11 |
+
return False
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/model_visualization.py
ADDED
|
@@ -0,0 +1,487 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Utilities related to model visualization."""
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import sys
|
| 5 |
+
|
| 6 |
+
from keras.src import tree
|
| 7 |
+
from keras.src.api_export import keras_export
|
| 8 |
+
from keras.src.utils import io_utils
|
| 9 |
+
|
| 10 |
+
try:
|
| 11 |
+
import pydot
|
| 12 |
+
except ImportError:
|
| 13 |
+
# pydot_ng and pydotplus are older forks of pydot
|
| 14 |
+
# which may still be used by some users
|
| 15 |
+
try:
|
| 16 |
+
import pydot_ng as pydot
|
| 17 |
+
except ImportError:
|
| 18 |
+
try:
|
| 19 |
+
import pydotplus as pydot
|
| 20 |
+
except ImportError:
|
| 21 |
+
pydot = None
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def check_pydot():
|
| 25 |
+
"""Returns True if PyDot is available."""
|
| 26 |
+
return pydot is not None
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def check_graphviz():
|
| 30 |
+
"""Returns True if both PyDot and Graphviz are available."""
|
| 31 |
+
if not check_pydot():
|
| 32 |
+
return False
|
| 33 |
+
try:
|
| 34 |
+
# Attempt to create an image of a blank graph
|
| 35 |
+
# to check the pydot/graphviz installation.
|
| 36 |
+
pydot.Dot.create(pydot.Dot())
|
| 37 |
+
return True
|
| 38 |
+
except (OSError, pydot.PydotException):
|
| 39 |
+
return False
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def add_edge(dot, src, dst):
|
| 43 |
+
if not dot.get_edge(src, dst):
|
| 44 |
+
edge = pydot.Edge(src, dst)
|
| 45 |
+
edge.set("penwidth", "2")
|
| 46 |
+
dot.add_edge(edge)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def get_layer_activation_name(layer):
|
| 50 |
+
if hasattr(layer.activation, "name"):
|
| 51 |
+
activation_name = layer.activation.name
|
| 52 |
+
elif hasattr(layer.activation, "__name__"):
|
| 53 |
+
activation_name = layer.activation.__name__
|
| 54 |
+
else:
|
| 55 |
+
activation_name = str(layer.activation)
|
| 56 |
+
return activation_name
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def make_layer_label(layer, **kwargs):
|
| 60 |
+
class_name = layer.__class__.__name__
|
| 61 |
+
|
| 62 |
+
show_layer_names = kwargs.pop("show_layer_names")
|
| 63 |
+
show_layer_activations = kwargs.pop("show_layer_activations")
|
| 64 |
+
show_dtype = kwargs.pop("show_dtype")
|
| 65 |
+
show_shapes = kwargs.pop("show_shapes")
|
| 66 |
+
show_trainable = kwargs.pop("show_trainable")
|
| 67 |
+
if kwargs:
|
| 68 |
+
raise ValueError(f"Invalid kwargs: {kwargs}")
|
| 69 |
+
|
| 70 |
+
table = (
|
| 71 |
+
'<<table border="0" cellborder="1" bgcolor="black" cellpadding="10">'
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
colspan_max = sum(int(x) for x in (show_dtype, show_trainable))
|
| 75 |
+
if show_shapes:
|
| 76 |
+
colspan_max += 2
|
| 77 |
+
colspan = max(1, colspan_max)
|
| 78 |
+
|
| 79 |
+
if show_layer_names:
|
| 80 |
+
table += (
|
| 81 |
+
f'<tr><td colspan="{colspan}" bgcolor="black">'
|
| 82 |
+
'<font point-size="16" color="white">'
|
| 83 |
+
f"<b>{layer.name}</b> ({class_name})"
|
| 84 |
+
"</font></td></tr>"
|
| 85 |
+
)
|
| 86 |
+
else:
|
| 87 |
+
table += (
|
| 88 |
+
f'<tr><td colspan="{colspan}" bgcolor="black">'
|
| 89 |
+
'<font point-size="16" color="white">'
|
| 90 |
+
f"<b>{class_name}</b>"
|
| 91 |
+
"</font></td></tr>"
|
| 92 |
+
)
|
| 93 |
+
if (
|
| 94 |
+
show_layer_activations
|
| 95 |
+
and hasattr(layer, "activation")
|
| 96 |
+
and layer.activation is not None
|
| 97 |
+
):
|
| 98 |
+
table += (
|
| 99 |
+
f'<tr><td bgcolor="white" colspan="{colspan}">'
|
| 100 |
+
'<font point-size="14">'
|
| 101 |
+
f"Activation: <b>{get_layer_activation_name(layer)}</b>"
|
| 102 |
+
"</font></td></tr>"
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
cols = []
|
| 106 |
+
if show_shapes:
|
| 107 |
+
input_shape = None
|
| 108 |
+
output_shape = None
|
| 109 |
+
try:
|
| 110 |
+
input_shape = tree.map_structure(lambda x: x.shape, layer.input)
|
| 111 |
+
output_shape = tree.map_structure(lambda x: x.shape, layer.output)
|
| 112 |
+
except (ValueError, AttributeError):
|
| 113 |
+
pass
|
| 114 |
+
|
| 115 |
+
def format_shape(shape):
|
| 116 |
+
if shape is not None:
|
| 117 |
+
if isinstance(shape, dict):
|
| 118 |
+
shape_str = ", ".join(
|
| 119 |
+
[f"{k}: {v}" for k, v in shape.items()]
|
| 120 |
+
)
|
| 121 |
+
else:
|
| 122 |
+
shape_str = f"{shape}"
|
| 123 |
+
shape_str = shape_str.replace("}", "").replace("{", "")
|
| 124 |
+
else:
|
| 125 |
+
shape_str = "?"
|
| 126 |
+
return shape_str
|
| 127 |
+
|
| 128 |
+
if class_name != "InputLayer":
|
| 129 |
+
cols.append(
|
| 130 |
+
(
|
| 131 |
+
'<td bgcolor="white"><font point-size="14">'
|
| 132 |
+
f"Input shape: <b>{format_shape(input_shape)}</b>"
|
| 133 |
+
"</font></td>"
|
| 134 |
+
)
|
| 135 |
+
)
|
| 136 |
+
cols.append(
|
| 137 |
+
(
|
| 138 |
+
'<td bgcolor="white"><font point-size="14">'
|
| 139 |
+
f"Output shape: <b>{format_shape(output_shape)}</b>"
|
| 140 |
+
"</font></td>"
|
| 141 |
+
)
|
| 142 |
+
)
|
| 143 |
+
if show_dtype:
|
| 144 |
+
dtype = None
|
| 145 |
+
try:
|
| 146 |
+
dtype = tree.map_structure(lambda x: x.dtype, layer.output)
|
| 147 |
+
except (ValueError, AttributeError):
|
| 148 |
+
pass
|
| 149 |
+
cols.append(
|
| 150 |
+
(
|
| 151 |
+
'<td bgcolor="white"><font point-size="14">'
|
| 152 |
+
f'Output dtype: <b>{dtype or "?"}</b>'
|
| 153 |
+
"</font></td>"
|
| 154 |
+
)
|
| 155 |
+
)
|
| 156 |
+
if show_trainable and hasattr(layer, "trainable") and layer.weights:
|
| 157 |
+
if layer.trainable:
|
| 158 |
+
cols.append(
|
| 159 |
+
(
|
| 160 |
+
'<td bgcolor="forestgreen">'
|
| 161 |
+
'<font point-size="14" color="white">'
|
| 162 |
+
"<b>Trainable</b></font></td>"
|
| 163 |
+
)
|
| 164 |
+
)
|
| 165 |
+
else:
|
| 166 |
+
cols.append(
|
| 167 |
+
(
|
| 168 |
+
'<td bgcolor="firebrick">'
|
| 169 |
+
'<font point-size="14" color="white">'
|
| 170 |
+
"<b>Non-trainable</b></font></td>"
|
| 171 |
+
)
|
| 172 |
+
)
|
| 173 |
+
if cols:
|
| 174 |
+
colspan = len(cols)
|
| 175 |
+
else:
|
| 176 |
+
colspan = 1
|
| 177 |
+
|
| 178 |
+
if cols:
|
| 179 |
+
table += "<tr>" + "".join(cols) + "</tr>"
|
| 180 |
+
table += "</table>>"
|
| 181 |
+
return table
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
def make_node(layer, **kwargs):
|
| 185 |
+
node = pydot.Node(str(id(layer)), label=make_layer_label(layer, **kwargs))
|
| 186 |
+
node.set("fontname", "Helvetica")
|
| 187 |
+
node.set("border", "0")
|
| 188 |
+
node.set("margin", "0")
|
| 189 |
+
return node
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def remove_unused_edges(dot):
|
| 193 |
+
nodes = [v.get_name() for v in dot.get_nodes()]
|
| 194 |
+
for edge in dot.get_edges():
|
| 195 |
+
if edge.get_destination() not in nodes:
|
| 196 |
+
dot.del_edge(edge.get_source(), edge.get_destination())
|
| 197 |
+
return dot
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
@keras_export("keras.utils.model_to_dot")
|
| 201 |
+
def model_to_dot(
|
| 202 |
+
model,
|
| 203 |
+
show_shapes=False,
|
| 204 |
+
show_dtype=False,
|
| 205 |
+
show_layer_names=True,
|
| 206 |
+
rankdir="TB",
|
| 207 |
+
expand_nested=False,
|
| 208 |
+
dpi=200,
|
| 209 |
+
subgraph=False,
|
| 210 |
+
show_layer_activations=False,
|
| 211 |
+
show_trainable=False,
|
| 212 |
+
**kwargs,
|
| 213 |
+
):
|
| 214 |
+
"""Convert a Keras model to dot format.
|
| 215 |
+
|
| 216 |
+
Args:
|
| 217 |
+
model: A Keras model instance.
|
| 218 |
+
show_shapes: whether to display shape information.
|
| 219 |
+
show_dtype: whether to display layer dtypes.
|
| 220 |
+
show_layer_names: whether to display layer names.
|
| 221 |
+
rankdir: `rankdir` argument passed to PyDot,
|
| 222 |
+
a string specifying the format of the plot: `"TB"`
|
| 223 |
+
creates a vertical plot; `"LR"` creates a horizontal plot.
|
| 224 |
+
expand_nested: whether to expand nested Functional models
|
| 225 |
+
into clusters.
|
| 226 |
+
dpi: Image resolution in dots per inch.
|
| 227 |
+
subgraph: whether to return a `pydot.Cluster` instance.
|
| 228 |
+
show_layer_activations: Display layer activations (only for layers that
|
| 229 |
+
have an `activation` property).
|
| 230 |
+
show_trainable: whether to display if a layer is trainable.
|
| 231 |
+
|
| 232 |
+
Returns:
|
| 233 |
+
A `pydot.Dot` instance representing the Keras model or
|
| 234 |
+
a `pydot.Cluster` instance representing nested model if
|
| 235 |
+
`subgraph=True`.
|
| 236 |
+
"""
|
| 237 |
+
from keras.src.ops.function import make_node_key
|
| 238 |
+
|
| 239 |
+
if not model.built:
|
| 240 |
+
raise ValueError(
|
| 241 |
+
"This model has not yet been built. "
|
| 242 |
+
"Build the model first by calling `build()` or by calling "
|
| 243 |
+
"the model on a batch of data."
|
| 244 |
+
)
|
| 245 |
+
|
| 246 |
+
from keras.src.models import functional
|
| 247 |
+
from keras.src.models import sequential
|
| 248 |
+
|
| 249 |
+
# from keras.src.layers import Wrapper
|
| 250 |
+
|
| 251 |
+
if not check_pydot():
|
| 252 |
+
raise ImportError(
|
| 253 |
+
"You must install pydot (`pip install pydot`) for "
|
| 254 |
+
"model_to_dot to work."
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
if subgraph:
|
| 258 |
+
dot = pydot.Cluster(style="dashed", graph_name=model.name)
|
| 259 |
+
dot.set("label", model.name)
|
| 260 |
+
dot.set("labeljust", "l")
|
| 261 |
+
else:
|
| 262 |
+
dot = pydot.Dot()
|
| 263 |
+
dot.set("rankdir", rankdir)
|
| 264 |
+
dot.set("concentrate", True)
|
| 265 |
+
dot.set("dpi", dpi)
|
| 266 |
+
dot.set("splines", "ortho")
|
| 267 |
+
dot.set_node_defaults(shape="record")
|
| 268 |
+
|
| 269 |
+
if kwargs.pop("layer_range", None) is not None:
|
| 270 |
+
raise ValueError("Argument `layer_range` is no longer supported.")
|
| 271 |
+
if kwargs:
|
| 272 |
+
raise ValueError(f"Unrecognized keyword arguments: {kwargs}")
|
| 273 |
+
|
| 274 |
+
kwargs = {
|
| 275 |
+
"show_layer_names": show_layer_names,
|
| 276 |
+
"show_layer_activations": show_layer_activations,
|
| 277 |
+
"show_dtype": show_dtype,
|
| 278 |
+
"show_shapes": show_shapes,
|
| 279 |
+
"show_trainable": show_trainable,
|
| 280 |
+
}
|
| 281 |
+
|
| 282 |
+
if isinstance(model, sequential.Sequential):
|
| 283 |
+
layers = model.layers
|
| 284 |
+
elif not isinstance(model, functional.Functional):
|
| 285 |
+
# We treat subclassed models as a single node.
|
| 286 |
+
node = make_node(model, **kwargs)
|
| 287 |
+
dot.add_node(node)
|
| 288 |
+
return dot
|
| 289 |
+
else:
|
| 290 |
+
layers = model._operations
|
| 291 |
+
|
| 292 |
+
# Create graph nodes.
|
| 293 |
+
sub_n_first_node = {}
|
| 294 |
+
sub_n_last_node = {}
|
| 295 |
+
for i, layer in enumerate(layers):
|
| 296 |
+
# Process nested functional models.
|
| 297 |
+
if expand_nested and isinstance(layer, functional.Functional):
|
| 298 |
+
submodel = model_to_dot(
|
| 299 |
+
layer,
|
| 300 |
+
show_shapes,
|
| 301 |
+
show_dtype,
|
| 302 |
+
show_layer_names,
|
| 303 |
+
rankdir,
|
| 304 |
+
expand_nested,
|
| 305 |
+
subgraph=True,
|
| 306 |
+
show_layer_activations=show_layer_activations,
|
| 307 |
+
show_trainable=show_trainable,
|
| 308 |
+
)
|
| 309 |
+
# sub_n : submodel
|
| 310 |
+
sub_n_nodes = submodel.get_nodes()
|
| 311 |
+
sub_n_first_node[layer.name] = sub_n_nodes[0]
|
| 312 |
+
sub_n_last_node[layer.name] = sub_n_nodes[-1]
|
| 313 |
+
dot.add_subgraph(submodel)
|
| 314 |
+
|
| 315 |
+
else:
|
| 316 |
+
node = make_node(layer, **kwargs)
|
| 317 |
+
dot.add_node(node)
|
| 318 |
+
|
| 319 |
+
# Connect nodes with edges.
|
| 320 |
+
# Sequential case.
|
| 321 |
+
if isinstance(model, sequential.Sequential):
|
| 322 |
+
for i in range(len(layers) - 1):
|
| 323 |
+
inbound_layer_id = str(id(layers[i]))
|
| 324 |
+
layer_id = str(id(layers[i + 1]))
|
| 325 |
+
add_edge(dot, inbound_layer_id, layer_id)
|
| 326 |
+
return dot
|
| 327 |
+
|
| 328 |
+
# Functional case.
|
| 329 |
+
for i, layer in enumerate(layers):
|
| 330 |
+
layer_id = str(id(layer))
|
| 331 |
+
for i, node in enumerate(layer._inbound_nodes):
|
| 332 |
+
node_key = make_node_key(layer, i)
|
| 333 |
+
if node_key in model._nodes:
|
| 334 |
+
for parent_node in node.parent_nodes:
|
| 335 |
+
inbound_layer = parent_node.operation
|
| 336 |
+
inbound_layer_id = str(id(inbound_layer))
|
| 337 |
+
if not expand_nested:
|
| 338 |
+
assert dot.get_node(inbound_layer_id)
|
| 339 |
+
assert dot.get_node(layer_id)
|
| 340 |
+
add_edge(dot, inbound_layer_id, layer_id)
|
| 341 |
+
else:
|
| 342 |
+
# if inbound_layer is not Functional
|
| 343 |
+
if not isinstance(inbound_layer, functional.Functional):
|
| 344 |
+
# if current layer is not Functional
|
| 345 |
+
if not isinstance(layer, functional.Functional):
|
| 346 |
+
assert dot.get_node(inbound_layer_id)
|
| 347 |
+
assert dot.get_node(layer_id)
|
| 348 |
+
add_edge(dot, inbound_layer_id, layer_id)
|
| 349 |
+
# if current layer is Functional
|
| 350 |
+
elif isinstance(layer, functional.Functional):
|
| 351 |
+
add_edge(
|
| 352 |
+
dot,
|
| 353 |
+
inbound_layer_id,
|
| 354 |
+
sub_n_first_node[layer.name].get_name(),
|
| 355 |
+
)
|
| 356 |
+
# if inbound_layer is Functional
|
| 357 |
+
elif isinstance(inbound_layer, functional.Functional):
|
| 358 |
+
name = sub_n_last_node[
|
| 359 |
+
inbound_layer.name
|
| 360 |
+
].get_name()
|
| 361 |
+
if isinstance(layer, functional.Functional):
|
| 362 |
+
output_name = sub_n_first_node[
|
| 363 |
+
layer.name
|
| 364 |
+
].get_name()
|
| 365 |
+
add_edge(dot, name, output_name)
|
| 366 |
+
else:
|
| 367 |
+
add_edge(dot, name, layer_id)
|
| 368 |
+
return dot
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
@keras_export("keras.utils.plot_model")
|
| 372 |
+
def plot_model(
|
| 373 |
+
model,
|
| 374 |
+
to_file="model.png",
|
| 375 |
+
show_shapes=False,
|
| 376 |
+
show_dtype=False,
|
| 377 |
+
show_layer_names=False,
|
| 378 |
+
rankdir="TB",
|
| 379 |
+
expand_nested=False,
|
| 380 |
+
dpi=200,
|
| 381 |
+
show_layer_activations=False,
|
| 382 |
+
show_trainable=False,
|
| 383 |
+
**kwargs,
|
| 384 |
+
):
|
| 385 |
+
"""Converts a Keras model to dot format and save to a file.
|
| 386 |
+
|
| 387 |
+
Example:
|
| 388 |
+
|
| 389 |
+
```python
|
| 390 |
+
inputs = ...
|
| 391 |
+
outputs = ...
|
| 392 |
+
model = keras.Model(inputs=inputs, outputs=outputs)
|
| 393 |
+
|
| 394 |
+
dot_img_file = '/tmp/model_1.png'
|
| 395 |
+
keras.utils.plot_model(model, to_file=dot_img_file, show_shapes=True)
|
| 396 |
+
```
|
| 397 |
+
|
| 398 |
+
Args:
|
| 399 |
+
model: A Keras model instance
|
| 400 |
+
to_file: File name of the plot image.
|
| 401 |
+
show_shapes: whether to display shape information.
|
| 402 |
+
show_dtype: whether to display layer dtypes.
|
| 403 |
+
show_layer_names: whether to display layer names.
|
| 404 |
+
rankdir: `rankdir` argument passed to PyDot,
|
| 405 |
+
a string specifying the format of the plot: `"TB"`
|
| 406 |
+
creates a vertical plot; `"LR"` creates a horizontal plot.
|
| 407 |
+
expand_nested: whether to expand nested Functional models
|
| 408 |
+
into clusters.
|
| 409 |
+
dpi: Image resolution in dots per inch.
|
| 410 |
+
show_layer_activations: Display layer activations (only for layers that
|
| 411 |
+
have an `activation` property).
|
| 412 |
+
show_trainable: whether to display if a layer is trainable.
|
| 413 |
+
|
| 414 |
+
Returns:
|
| 415 |
+
A Jupyter notebook Image object if Jupyter is installed.
|
| 416 |
+
This enables in-line display of the model plots in notebooks.
|
| 417 |
+
"""
|
| 418 |
+
|
| 419 |
+
if not model.built:
|
| 420 |
+
raise ValueError(
|
| 421 |
+
"This model has not yet been built. "
|
| 422 |
+
"Build the model first by calling `build()` or by calling "
|
| 423 |
+
"the model on a batch of data."
|
| 424 |
+
)
|
| 425 |
+
if not check_pydot():
|
| 426 |
+
message = (
|
| 427 |
+
"You must install pydot (`pip install pydot`) "
|
| 428 |
+
"for `plot_model` to work."
|
| 429 |
+
)
|
| 430 |
+
if "IPython.core.magics.namespace" in sys.modules:
|
| 431 |
+
# We don't raise an exception here in order to avoid crashing
|
| 432 |
+
# notebook tests where graphviz is not available.
|
| 433 |
+
io_utils.print_msg(message)
|
| 434 |
+
return
|
| 435 |
+
else:
|
| 436 |
+
raise ImportError(message)
|
| 437 |
+
if not check_graphviz():
|
| 438 |
+
message = (
|
| 439 |
+
"You must install graphviz "
|
| 440 |
+
"(see instructions at https://graphviz.gitlab.io/download/) "
|
| 441 |
+
"for `plot_model` to work."
|
| 442 |
+
)
|
| 443 |
+
if "IPython.core.magics.namespace" in sys.modules:
|
| 444 |
+
# We don't raise an exception here in order to avoid crashing
|
| 445 |
+
# notebook tests where graphviz is not available.
|
| 446 |
+
io_utils.print_msg(message)
|
| 447 |
+
return
|
| 448 |
+
else:
|
| 449 |
+
raise ImportError(message)
|
| 450 |
+
|
| 451 |
+
if kwargs.pop("layer_range", None) is not None:
|
| 452 |
+
raise ValueError("Argument `layer_range` is no longer supported.")
|
| 453 |
+
if kwargs:
|
| 454 |
+
raise ValueError(f"Unrecognized keyword arguments: {kwargs}")
|
| 455 |
+
|
| 456 |
+
dot = model_to_dot(
|
| 457 |
+
model,
|
| 458 |
+
show_shapes=show_shapes,
|
| 459 |
+
show_dtype=show_dtype,
|
| 460 |
+
show_layer_names=show_layer_names,
|
| 461 |
+
rankdir=rankdir,
|
| 462 |
+
expand_nested=expand_nested,
|
| 463 |
+
dpi=dpi,
|
| 464 |
+
show_layer_activations=show_layer_activations,
|
| 465 |
+
show_trainable=show_trainable,
|
| 466 |
+
)
|
| 467 |
+
to_file = str(to_file)
|
| 468 |
+
if dot is None:
|
| 469 |
+
return
|
| 470 |
+
dot = remove_unused_edges(dot)
|
| 471 |
+
_, extension = os.path.splitext(to_file)
|
| 472 |
+
if not extension:
|
| 473 |
+
extension = "png"
|
| 474 |
+
else:
|
| 475 |
+
extension = extension[1:]
|
| 476 |
+
# Save image to disk.
|
| 477 |
+
dot.write(to_file, format=extension)
|
| 478 |
+
# Return the image as a Jupyter Image object, to be displayed in-line.
|
| 479 |
+
# Note that we cannot easily detect whether the code is running in a
|
| 480 |
+
# notebook, and thus we always return the Image if Jupyter is available.
|
| 481 |
+
if extension != "pdf":
|
| 482 |
+
try:
|
| 483 |
+
from IPython import display
|
| 484 |
+
|
| 485 |
+
return display.Image(filename=to_file)
|
| 486 |
+
except ImportError:
|
| 487 |
+
pass
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/module_utils.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import importlib
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class LazyModule:
|
| 5 |
+
def __init__(self, name, pip_name=None, import_error_msg=None):
|
| 6 |
+
self.name = name
|
| 7 |
+
self.pip_name = pip_name or name
|
| 8 |
+
self.import_error_msg = import_error_msg or (
|
| 9 |
+
f"This requires the {self.name} module. "
|
| 10 |
+
f"You can install it via `pip install {self.pip_name}`"
|
| 11 |
+
)
|
| 12 |
+
self.module = None
|
| 13 |
+
self._available = None
|
| 14 |
+
|
| 15 |
+
@property
|
| 16 |
+
def available(self):
|
| 17 |
+
if self._available is None:
|
| 18 |
+
try:
|
| 19 |
+
self.initialize()
|
| 20 |
+
self._available = True
|
| 21 |
+
except ImportError:
|
| 22 |
+
self._available = False
|
| 23 |
+
return self._available
|
| 24 |
+
|
| 25 |
+
def initialize(self):
|
| 26 |
+
try:
|
| 27 |
+
self.module = importlib.import_module(self.name)
|
| 28 |
+
except ImportError:
|
| 29 |
+
raise ImportError(self.import_error_msg)
|
| 30 |
+
|
| 31 |
+
def __getattr__(self, name):
|
| 32 |
+
if name == "_api_export_path":
|
| 33 |
+
raise AttributeError
|
| 34 |
+
if self.module is None:
|
| 35 |
+
self.initialize()
|
| 36 |
+
return getattr(self.module, name)
|
| 37 |
+
|
| 38 |
+
def __repr__(self):
|
| 39 |
+
return f"LazyModule({self.name})"
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
tensorflow = LazyModule("tensorflow")
|
| 43 |
+
gfile = LazyModule("tensorflow.io.gfile", pip_name="tensorflow")
|
| 44 |
+
tensorflow_io = LazyModule("tensorflow_io")
|
| 45 |
+
scipy = LazyModule("scipy")
|
| 46 |
+
jax = LazyModule("jax")
|
| 47 |
+
torchvision = LazyModule("torchvision")
|
| 48 |
+
torch_xla = LazyModule(
|
| 49 |
+
"torch_xla",
|
| 50 |
+
import_error_msg=(
|
| 51 |
+
"This requires the torch_xla module. You can install it via "
|
| 52 |
+
"`pip install torch-xla`. Additionally, you may need to update "
|
| 53 |
+
"LD_LIBRARY_PATH if necessary. Torch XLA builds a shared library, "
|
| 54 |
+
"_XLAC.so, which needs to link to the version of Python it was built "
|
| 55 |
+
"with. Use the following command to update LD_LIBRARY_PATH: "
|
| 56 |
+
"`export LD_LIBRARY_PATH=<path to Python>/lib:$LD_LIBRARY_PATH`"
|
| 57 |
+
),
|
| 58 |
+
)
|
| 59 |
+
optree = LazyModule("optree")
|
| 60 |
+
dmtree = LazyModule("tree")
|
| 61 |
+
tf2onnx = LazyModule("tf2onnx")
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/naming.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
import re
|
| 3 |
+
|
| 4 |
+
from keras.src.api_export import keras_export
|
| 5 |
+
from keras.src.backend.common import global_state
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def auto_name(prefix):
|
| 9 |
+
prefix = to_snake_case(prefix)
|
| 10 |
+
return uniquify(prefix)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def uniquify(name):
|
| 14 |
+
object_name_uids = global_state.get_global_attribute(
|
| 15 |
+
"object_name_uids",
|
| 16 |
+
default=collections.defaultdict(int),
|
| 17 |
+
set_to_default=True,
|
| 18 |
+
)
|
| 19 |
+
if name in object_name_uids:
|
| 20 |
+
unique_name = f"{name}_{object_name_uids[name]}"
|
| 21 |
+
else:
|
| 22 |
+
unique_name = name
|
| 23 |
+
object_name_uids[name] += 1
|
| 24 |
+
return unique_name
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def to_snake_case(name):
|
| 28 |
+
name = re.sub(r"\W+", "", name)
|
| 29 |
+
name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
|
| 30 |
+
name = re.sub("([a-z])([A-Z])", r"\1_\2", name).lower()
|
| 31 |
+
return name
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@keras_export("keras.backend.get_uid")
|
| 35 |
+
def get_uid(prefix=""):
|
| 36 |
+
"""Associates a string prefix with an integer counter.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
prefix: String prefix to index.
|
| 40 |
+
|
| 41 |
+
Returns:
|
| 42 |
+
Unique integer ID.
|
| 43 |
+
|
| 44 |
+
Example:
|
| 45 |
+
|
| 46 |
+
>>> get_uid('dense')
|
| 47 |
+
1
|
| 48 |
+
>>> get_uid('dense')
|
| 49 |
+
2
|
| 50 |
+
"""
|
| 51 |
+
object_name_uids = global_state.get_global_attribute(
|
| 52 |
+
"object_name_uids",
|
| 53 |
+
default=collections.defaultdict(int),
|
| 54 |
+
set_to_default=True,
|
| 55 |
+
)
|
| 56 |
+
object_name_uids[prefix] += 1
|
| 57 |
+
return object_name_uids[prefix]
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def reset_uids():
|
| 61 |
+
global_state.set_global_attribute(
|
| 62 |
+
"object_name_uids", collections.defaultdict(int)
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def get_object_name(obj):
|
| 67 |
+
if hasattr(obj, "name"): # Most Keras objects.
|
| 68 |
+
return obj.name
|
| 69 |
+
elif hasattr(obj, "__name__"): # Function.
|
| 70 |
+
return to_snake_case(obj.__name__)
|
| 71 |
+
elif hasattr(obj, "__class__"): # Class instance.
|
| 72 |
+
return to_snake_case(obj.__class__.__name__)
|
| 73 |
+
return to_snake_case(str(obj))
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/numerical_utils.py
ADDED
|
@@ -0,0 +1,225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from keras.src import backend
|
| 4 |
+
from keras.src.api_export import keras_export
|
| 5 |
+
from keras.src.utils import tf_utils
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@keras_export("keras.utils.normalize")
|
| 9 |
+
def normalize(x, axis=-1, order=2):
|
| 10 |
+
"""Normalizes an array.
|
| 11 |
+
|
| 12 |
+
If the input is a NumPy array, a NumPy array will be returned.
|
| 13 |
+
If it's a backend tensor, a backend tensor will be returned.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
x: Array to normalize.
|
| 17 |
+
axis: axis along which to normalize.
|
| 18 |
+
order: Normalization order (e.g. `order=2` for L2 norm).
|
| 19 |
+
|
| 20 |
+
Returns:
|
| 21 |
+
A normalized copy of the array.
|
| 22 |
+
"""
|
| 23 |
+
from keras.src import ops
|
| 24 |
+
|
| 25 |
+
if isinstance(x, np.ndarray):
|
| 26 |
+
# NumPy input
|
| 27 |
+
norm = np.atleast_1d(np.linalg.norm(x, order, axis))
|
| 28 |
+
norm[norm == 0] = 1
|
| 29 |
+
|
| 30 |
+
# axis cannot be `None`
|
| 31 |
+
axis = axis or -1
|
| 32 |
+
return x / np.expand_dims(norm, axis)
|
| 33 |
+
|
| 34 |
+
# Backend tensor input
|
| 35 |
+
return ops.nn.normalize(x, axis=axis, order=order)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
@keras_export("keras.utils.to_categorical")
|
| 39 |
+
def to_categorical(x, num_classes=None):
|
| 40 |
+
"""Converts a class vector (integers) to binary class matrix.
|
| 41 |
+
|
| 42 |
+
E.g. for use with `categorical_crossentropy`.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
x: Array-like with class values to be converted into a matrix
|
| 46 |
+
(integers from 0 to `num_classes - 1`).
|
| 47 |
+
num_classes: Total number of classes. If `None`, this would be inferred
|
| 48 |
+
as `max(x) + 1`. Defaults to `None`.
|
| 49 |
+
|
| 50 |
+
Returns:
|
| 51 |
+
A binary matrix representation of the input as a NumPy array. The class
|
| 52 |
+
axis is placed last.
|
| 53 |
+
|
| 54 |
+
Example:
|
| 55 |
+
|
| 56 |
+
>>> a = keras.utils.to_categorical([0, 1, 2, 3], num_classes=4)
|
| 57 |
+
>>> print(a)
|
| 58 |
+
[[1. 0. 0. 0.]
|
| 59 |
+
[0. 1. 0. 0.]
|
| 60 |
+
[0. 0. 1. 0.]
|
| 61 |
+
[0. 0. 0. 1.]]
|
| 62 |
+
|
| 63 |
+
>>> b = np.array([.9, .04, .03, .03,
|
| 64 |
+
... .3, .45, .15, .13,
|
| 65 |
+
... .04, .01, .94, .05,
|
| 66 |
+
... .12, .21, .5, .17],
|
| 67 |
+
... shape=[4, 4])
|
| 68 |
+
>>> loss = keras.ops.categorical_crossentropy(a, b)
|
| 69 |
+
>>> print(np.around(loss, 5))
|
| 70 |
+
[0.10536 0.82807 0.1011 1.77196]
|
| 71 |
+
|
| 72 |
+
>>> loss = keras.ops.categorical_crossentropy(a, a)
|
| 73 |
+
>>> print(np.around(loss, 5))
|
| 74 |
+
[0. 0. 0. 0.]
|
| 75 |
+
"""
|
| 76 |
+
if backend.is_tensor(x):
|
| 77 |
+
input_shape = backend.core.shape(x)
|
| 78 |
+
# Shrink the last dimension if the shape is (..., 1).
|
| 79 |
+
if (
|
| 80 |
+
input_shape is not None
|
| 81 |
+
and len(input_shape) > 1
|
| 82 |
+
and input_shape[-1] == 1
|
| 83 |
+
):
|
| 84 |
+
newshape = tuple(input_shape[:-1])
|
| 85 |
+
x = backend.numpy.reshape(x, newshape)
|
| 86 |
+
return backend.nn.one_hot(x, num_classes)
|
| 87 |
+
x = np.array(x, dtype="int64")
|
| 88 |
+
input_shape = x.shape
|
| 89 |
+
|
| 90 |
+
# Shrink the last dimension if the shape is (..., 1).
|
| 91 |
+
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
|
| 92 |
+
input_shape = tuple(input_shape[:-1])
|
| 93 |
+
|
| 94 |
+
x = x.reshape(-1)
|
| 95 |
+
if not num_classes:
|
| 96 |
+
num_classes = np.max(x) + 1
|
| 97 |
+
batch_size = x.shape[0]
|
| 98 |
+
categorical = np.zeros((batch_size, num_classes))
|
| 99 |
+
categorical[np.arange(batch_size), x] = 1
|
| 100 |
+
output_shape = input_shape + (num_classes,)
|
| 101 |
+
categorical = np.reshape(categorical, output_shape)
|
| 102 |
+
return categorical
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def encode_categorical_inputs(
|
| 106 |
+
inputs,
|
| 107 |
+
output_mode,
|
| 108 |
+
depth,
|
| 109 |
+
dtype,
|
| 110 |
+
sparse=False,
|
| 111 |
+
count_weights=None,
|
| 112 |
+
backend_module=None,
|
| 113 |
+
):
|
| 114 |
+
"""Encodes categorical inputs according to output_mode.
|
| 115 |
+
|
| 116 |
+
Args:
|
| 117 |
+
inputs: the inputs to encode.
|
| 118 |
+
output_mode: one of `"int"`, `"one_hot"`, `"multi_hot"`, or `"count"`.
|
| 119 |
+
depth: number of classes, this will be the last dimension of the output.
|
| 120 |
+
dtype: the dtype of the output, unless `count_weights` is not `None`.
|
| 121 |
+
sparse: whether the output should be sparse for backends supporting it.
|
| 122 |
+
count_weights: weights to apply if `output_mode` is `"count"`.
|
| 123 |
+
backend_module: the backend to use instead of the current one.
|
| 124 |
+
|
| 125 |
+
Returns: the encoded inputs.
|
| 126 |
+
"""
|
| 127 |
+
backend_module = backend_module or backend
|
| 128 |
+
|
| 129 |
+
if output_mode == "int":
|
| 130 |
+
return backend_module.cast(inputs, dtype=dtype)
|
| 131 |
+
|
| 132 |
+
rank_of_inputs = len(backend_module.shape(inputs))
|
| 133 |
+
|
| 134 |
+
# In all cases, we should uprank scalar input to a single sample.
|
| 135 |
+
if rank_of_inputs == 0:
|
| 136 |
+
inputs = backend_module.numpy.expand_dims(inputs, -1)
|
| 137 |
+
rank_of_inputs = 1
|
| 138 |
+
|
| 139 |
+
if (
|
| 140 |
+
backend_module.__name__.endswith("tensorflow")
|
| 141 |
+
and rank_of_inputs <= 2
|
| 142 |
+
and output_mode in ("multi_hot", "count")
|
| 143 |
+
):
|
| 144 |
+
# TF only fastpath. Uses bincount; faster. Doesn't work for rank 3+.
|
| 145 |
+
try:
|
| 146 |
+
return tf_utils.tf_encode_categorical_inputs(
|
| 147 |
+
inputs,
|
| 148 |
+
output_mode,
|
| 149 |
+
depth,
|
| 150 |
+
dtype=dtype,
|
| 151 |
+
sparse=sparse,
|
| 152 |
+
count_weights=count_weights,
|
| 153 |
+
)
|
| 154 |
+
except ValueError:
|
| 155 |
+
pass
|
| 156 |
+
|
| 157 |
+
if output_mode == "multi_hot":
|
| 158 |
+
return backend_module.nn.multi_hot(
|
| 159 |
+
inputs, depth, dtype=dtype, sparse=sparse
|
| 160 |
+
)
|
| 161 |
+
elif output_mode == "one_hot":
|
| 162 |
+
input_shape = backend_module.core.shape(inputs)
|
| 163 |
+
# Shrink the last dimension if the shape is (..., 1).
|
| 164 |
+
if (
|
| 165 |
+
input_shape is not None
|
| 166 |
+
and len(input_shape) > 1
|
| 167 |
+
and input_shape[-1] == 1
|
| 168 |
+
):
|
| 169 |
+
newshape = tuple(input_shape[:-1])
|
| 170 |
+
inputs = backend_module.numpy.reshape(inputs, newshape)
|
| 171 |
+
return backend_module.nn.one_hot(
|
| 172 |
+
inputs, depth, dtype=dtype, sparse=sparse
|
| 173 |
+
)
|
| 174 |
+
elif output_mode == "count":
|
| 175 |
+
# We don't use `ops.bincount` because its output has a dynamic shape
|
| 176 |
+
# (last dimension is the highest value of `inputs`). We implement a
|
| 177 |
+
# narrower use case where `minlength` and `maxlength` (not supported by
|
| 178 |
+
# `ops.bincount`) are the same and static value: `depth`. We also don't
|
| 179 |
+
# need to support indices that are negative or greater than `depth`.
|
| 180 |
+
reduction_axis = 1 if len(inputs.shape) > 1 else 0
|
| 181 |
+
|
| 182 |
+
if count_weights is not None:
|
| 183 |
+
dtype = count_weights.dtype
|
| 184 |
+
one_hot_encoding = backend_module.nn.one_hot(
|
| 185 |
+
inputs, depth, dtype=dtype, sparse=sparse
|
| 186 |
+
)
|
| 187 |
+
if count_weights is not None:
|
| 188 |
+
count_weights = backend_module.numpy.expand_dims(count_weights, -1)
|
| 189 |
+
one_hot_encoding = one_hot_encoding * count_weights
|
| 190 |
+
|
| 191 |
+
outputs = backend_module.numpy.sum(
|
| 192 |
+
one_hot_encoding,
|
| 193 |
+
axis=reduction_axis,
|
| 194 |
+
)
|
| 195 |
+
return outputs
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
def build_pos_neg_masks(
|
| 199 |
+
query_labels,
|
| 200 |
+
key_labels,
|
| 201 |
+
remove_diagonal=True,
|
| 202 |
+
):
|
| 203 |
+
from keras.src import ops
|
| 204 |
+
|
| 205 |
+
if ops.ndim(query_labels) == 1:
|
| 206 |
+
query_labels = ops.reshape(query_labels, (-1, 1))
|
| 207 |
+
|
| 208 |
+
if ops.ndim(key_labels) == 1:
|
| 209 |
+
key_labels = ops.reshape(key_labels, (-1, 1))
|
| 210 |
+
|
| 211 |
+
positive_mask = ops.equal(query_labels, ops.transpose(key_labels))
|
| 212 |
+
negative_mask = ops.logical_not(positive_mask)
|
| 213 |
+
|
| 214 |
+
if remove_diagonal:
|
| 215 |
+
positive_mask = ops.logical_and(
|
| 216 |
+
positive_mask,
|
| 217 |
+
~ops.eye(
|
| 218 |
+
ops.size(query_labels),
|
| 219 |
+
ops.size(key_labels),
|
| 220 |
+
k=0,
|
| 221 |
+
dtype="bool",
|
| 222 |
+
),
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
return positive_mask, negative_mask
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/progbar.py
ADDED
|
@@ -0,0 +1,269 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
import time
|
| 5 |
+
|
| 6 |
+
from keras.src import backend
|
| 7 |
+
from keras.src.api_export import keras_export
|
| 8 |
+
from keras.src.utils import io_utils
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@keras_export("keras.utils.Progbar")
|
| 12 |
+
class Progbar:
|
| 13 |
+
"""Displays a progress bar.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
target: Total number of steps expected, None if unknown.
|
| 17 |
+
width: Progress bar width on screen.
|
| 18 |
+
verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
|
| 19 |
+
stateful_metrics: Iterable of string names of metrics that should *not*
|
| 20 |
+
be averaged over time. Metrics in this list will be displayed as-is.
|
| 21 |
+
All others will be averaged by the progbar before display.
|
| 22 |
+
interval: Minimum visual progress update interval (in seconds).
|
| 23 |
+
unit_name: Display name for step counts (usually "step" or "sample").
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(
|
| 27 |
+
self,
|
| 28 |
+
target,
|
| 29 |
+
width=20,
|
| 30 |
+
verbose=1,
|
| 31 |
+
interval=0.05,
|
| 32 |
+
stateful_metrics=None,
|
| 33 |
+
unit_name="step",
|
| 34 |
+
):
|
| 35 |
+
self.target = target
|
| 36 |
+
self.width = width
|
| 37 |
+
self.verbose = verbose
|
| 38 |
+
self.interval = interval
|
| 39 |
+
self.unit_name = unit_name
|
| 40 |
+
if stateful_metrics:
|
| 41 |
+
self.stateful_metrics = set(stateful_metrics)
|
| 42 |
+
else:
|
| 43 |
+
self.stateful_metrics = set()
|
| 44 |
+
|
| 45 |
+
self._dynamic_display = (
|
| 46 |
+
(hasattr(sys.stdout, "isatty") and sys.stdout.isatty())
|
| 47 |
+
or "ipykernel" in sys.modules
|
| 48 |
+
or "posix" in sys.modules
|
| 49 |
+
or "PYCHARM_HOSTED" in os.environ
|
| 50 |
+
)
|
| 51 |
+
self._seen_so_far = 0
|
| 52 |
+
# We use a dict + list to avoid garbage collection
|
| 53 |
+
# issues found in OrderedDict
|
| 54 |
+
self._values = {}
|
| 55 |
+
self._values_order = []
|
| 56 |
+
self._start = time.time()
|
| 57 |
+
self._last_update = 0
|
| 58 |
+
self._time_at_epoch_start = self._start
|
| 59 |
+
self._time_after_first_step = None
|
| 60 |
+
self._prev_total_width = 0
|
| 61 |
+
|
| 62 |
+
def update(self, current, values=None, finalize=None):
|
| 63 |
+
"""Updates the progress bar.
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
current: Index of current step.
|
| 67 |
+
values: List of tuples: `(name, value_for_last_step)`. If `name` is
|
| 68 |
+
in `stateful_metrics`, `value_for_last_step` will be displayed
|
| 69 |
+
as-is. Else, an average of the metric over time will be
|
| 70 |
+
displayed.
|
| 71 |
+
finalize: Whether this is the last update for the progress bar. If
|
| 72 |
+
`None`, defaults to `current >= self.target`.
|
| 73 |
+
"""
|
| 74 |
+
if finalize is None:
|
| 75 |
+
if self.target is None:
|
| 76 |
+
finalize = False
|
| 77 |
+
else:
|
| 78 |
+
finalize = current >= self.target
|
| 79 |
+
|
| 80 |
+
values = values or []
|
| 81 |
+
for k, v in values:
|
| 82 |
+
if k not in self._values_order:
|
| 83 |
+
self._values_order.append(k)
|
| 84 |
+
if k not in self.stateful_metrics:
|
| 85 |
+
# In the case that progress bar doesn't have a target value in
|
| 86 |
+
# the first epoch, both on_batch_end and on_epoch_end will be
|
| 87 |
+
# called, which will cause 'current' and 'self._seen_so_far' to
|
| 88 |
+
# have the same value. Force the minimal value to 1 here,
|
| 89 |
+
# otherwise stateful_metric will be 0s.
|
| 90 |
+
value_base = max(current - self._seen_so_far, 1)
|
| 91 |
+
if k not in self._values:
|
| 92 |
+
self._values[k] = [v * value_base, value_base]
|
| 93 |
+
else:
|
| 94 |
+
self._values[k][0] += v * value_base
|
| 95 |
+
self._values[k][1] += value_base
|
| 96 |
+
else:
|
| 97 |
+
# Stateful metrics output a numeric value. This representation
|
| 98 |
+
# means "take an average from a single value" but keeps the
|
| 99 |
+
# numeric formatting.
|
| 100 |
+
self._values[k] = [v, 1]
|
| 101 |
+
self._seen_so_far = current
|
| 102 |
+
|
| 103 |
+
message = ""
|
| 104 |
+
special_char_len = 0
|
| 105 |
+
now = time.time()
|
| 106 |
+
time_per_unit = self._estimate_step_duration(current, now)
|
| 107 |
+
|
| 108 |
+
if self.verbose == 1:
|
| 109 |
+
if now - self._last_update < self.interval and not finalize:
|
| 110 |
+
return
|
| 111 |
+
|
| 112 |
+
if self._dynamic_display:
|
| 113 |
+
message += "\b" * self._prev_total_width
|
| 114 |
+
message += "\r"
|
| 115 |
+
else:
|
| 116 |
+
message += "\n"
|
| 117 |
+
|
| 118 |
+
if self.target is not None:
|
| 119 |
+
numdigits = int(math.log10(self.target)) + 1
|
| 120 |
+
bar = ("%" + str(numdigits) + "d/%d") % (current, self.target)
|
| 121 |
+
bar = f"\x1b[1m{bar}\x1b[0m "
|
| 122 |
+
special_char_len += 8
|
| 123 |
+
prog = float(current) / self.target
|
| 124 |
+
prog_width = int(self.width * prog)
|
| 125 |
+
|
| 126 |
+
if prog_width > 0:
|
| 127 |
+
bar += "\33[32m" + "━" * prog_width + "\x1b[0m"
|
| 128 |
+
special_char_len += 9
|
| 129 |
+
bar += "\33[37m" + "━" * (self.width - prog_width) + "\x1b[0m"
|
| 130 |
+
special_char_len += 9
|
| 131 |
+
|
| 132 |
+
else:
|
| 133 |
+
bar = "%7d/Unknown" % current
|
| 134 |
+
message += bar
|
| 135 |
+
|
| 136 |
+
# Add ETA if applicable
|
| 137 |
+
if self.target is not None and not finalize:
|
| 138 |
+
eta = time_per_unit * (self.target - current)
|
| 139 |
+
if eta > 3600:
|
| 140 |
+
eta_format = "%d:%02d:%02d" % (
|
| 141 |
+
eta // 3600,
|
| 142 |
+
(eta % 3600) // 60,
|
| 143 |
+
eta % 60,
|
| 144 |
+
)
|
| 145 |
+
elif eta > 60:
|
| 146 |
+
eta_format = "%d:%02d" % (eta // 60, eta % 60)
|
| 147 |
+
else:
|
| 148 |
+
eta_format = "%ds" % eta
|
| 149 |
+
info = f" \x1b[1m{eta_format}\x1b[0m"
|
| 150 |
+
else:
|
| 151 |
+
# Time elapsed since start, in seconds
|
| 152 |
+
info = f" \x1b[1m{now - self._start:.0f}s\x1b[0m"
|
| 153 |
+
special_char_len += 8
|
| 154 |
+
|
| 155 |
+
# Add time/step
|
| 156 |
+
info += self._format_time(time_per_unit, self.unit_name)
|
| 157 |
+
|
| 158 |
+
# Add metrics
|
| 159 |
+
for k in self._values_order:
|
| 160 |
+
info += f" - {k}:"
|
| 161 |
+
if isinstance(self._values[k], list):
|
| 162 |
+
avg = backend.convert_to_numpy(
|
| 163 |
+
backend.numpy.mean(
|
| 164 |
+
self._values[k][0] / max(1, self._values[k][1])
|
| 165 |
+
)
|
| 166 |
+
)
|
| 167 |
+
avg = float(avg)
|
| 168 |
+
if abs(avg) > 1e-3:
|
| 169 |
+
info += f" {avg:.4f}"
|
| 170 |
+
else:
|
| 171 |
+
info += f" {avg:.4e}"
|
| 172 |
+
else:
|
| 173 |
+
info += f" {self._values[k]}"
|
| 174 |
+
message += info
|
| 175 |
+
|
| 176 |
+
total_width = len(bar) + len(info) - special_char_len
|
| 177 |
+
if self._prev_total_width > total_width:
|
| 178 |
+
message += " " * (self._prev_total_width - total_width)
|
| 179 |
+
if finalize:
|
| 180 |
+
message += "\n"
|
| 181 |
+
|
| 182 |
+
io_utils.print_msg(message, line_break=False)
|
| 183 |
+
self._prev_total_width = total_width
|
| 184 |
+
message = ""
|
| 185 |
+
|
| 186 |
+
elif self.verbose == 2:
|
| 187 |
+
if finalize:
|
| 188 |
+
numdigits = int(math.log10(self.target)) + 1
|
| 189 |
+
count = ("%" + str(numdigits) + "d/%d") % (current, self.target)
|
| 190 |
+
info = f"{count} - {now - self._start:.0f}s"
|
| 191 |
+
info += " -" + self._format_time(time_per_unit, self.unit_name)
|
| 192 |
+
for k in self._values_order:
|
| 193 |
+
info += f" - {k}:"
|
| 194 |
+
avg = backend.convert_to_numpy(
|
| 195 |
+
backend.numpy.mean(
|
| 196 |
+
self._values[k][0] / max(1, self._values[k][1])
|
| 197 |
+
)
|
| 198 |
+
)
|
| 199 |
+
if avg > 1e-3:
|
| 200 |
+
info += f" {avg:.4f}"
|
| 201 |
+
else:
|
| 202 |
+
info += f" {avg:.4e}"
|
| 203 |
+
info += "\n"
|
| 204 |
+
message += info
|
| 205 |
+
io_utils.print_msg(message, line_break=False)
|
| 206 |
+
message = ""
|
| 207 |
+
|
| 208 |
+
self._last_update = now
|
| 209 |
+
|
| 210 |
+
def add(self, n, values=None):
|
| 211 |
+
self.update(self._seen_so_far + n, values)
|
| 212 |
+
|
| 213 |
+
def _format_time(self, time_per_unit, unit_name):
|
| 214 |
+
"""format a given duration to display to the user.
|
| 215 |
+
|
| 216 |
+
Given the duration, this function formats it in either milliseconds
|
| 217 |
+
or seconds and displays the unit (i.e. ms/step or s/epoch).
|
| 218 |
+
|
| 219 |
+
Args:
|
| 220 |
+
time_per_unit: the duration to display
|
| 221 |
+
unit_name: the name of the unit to display
|
| 222 |
+
|
| 223 |
+
Returns:
|
| 224 |
+
A string with the correctly formatted duration and units
|
| 225 |
+
"""
|
| 226 |
+
formatted = ""
|
| 227 |
+
if time_per_unit >= 1 or time_per_unit == 0:
|
| 228 |
+
formatted += f" {time_per_unit:.0f}s/{unit_name}"
|
| 229 |
+
elif time_per_unit >= 1e-3:
|
| 230 |
+
formatted += f" {time_per_unit * 1000.0:.0f}ms/{unit_name}"
|
| 231 |
+
else:
|
| 232 |
+
formatted += f" {time_per_unit * 1000000.0:.0f}us/{unit_name}"
|
| 233 |
+
return formatted
|
| 234 |
+
|
| 235 |
+
def _estimate_step_duration(self, current, now):
|
| 236 |
+
"""Estimate the duration of a single step.
|
| 237 |
+
|
| 238 |
+
Given the step number `current` and the corresponding time `now` this
|
| 239 |
+
function returns an estimate for how long a single step takes. If this
|
| 240 |
+
is called before one step has been completed (i.e. `current == 0`) then
|
| 241 |
+
zero is given as an estimate. The duration estimate ignores the duration
|
| 242 |
+
of the (assumed to be non-representative) first step for estimates when
|
| 243 |
+
more steps are available (i.e. `current>1`).
|
| 244 |
+
|
| 245 |
+
Args:
|
| 246 |
+
current: Index of current step.
|
| 247 |
+
now: The current time.
|
| 248 |
+
|
| 249 |
+
Returns: Estimate of the duration of a single step.
|
| 250 |
+
"""
|
| 251 |
+
if current:
|
| 252 |
+
# there are a few special scenarios here:
|
| 253 |
+
# 1) somebody is calling the progress bar without ever supplying
|
| 254 |
+
# step 1
|
| 255 |
+
# 2) somebody is calling the progress bar and supplies step one
|
| 256 |
+
# multiple times, e.g. as part of a finalizing call
|
| 257 |
+
# in these cases, we just fall back to the simple calculation
|
| 258 |
+
if self._time_after_first_step is not None and current > 1:
|
| 259 |
+
time_per_unit = (now - self._time_after_first_step) / (
|
| 260 |
+
current - 1
|
| 261 |
+
)
|
| 262 |
+
else:
|
| 263 |
+
time_per_unit = (now - self._start) / current
|
| 264 |
+
|
| 265 |
+
if current == 1:
|
| 266 |
+
self._time_after_first_step = now
|
| 267 |
+
return time_per_unit
|
| 268 |
+
else:
|
| 269 |
+
return 0
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/python_utils.py
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import binascii
|
| 2 |
+
import codecs
|
| 3 |
+
import marshal
|
| 4 |
+
import os
|
| 5 |
+
import types as python_types
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def default(method):
|
| 9 |
+
"""Decorates a method to detect overrides in subclasses."""
|
| 10 |
+
method._is_default = True
|
| 11 |
+
return method
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def is_default(method):
|
| 15 |
+
"""Check if a method is decorated with the `default` wrapper."""
|
| 16 |
+
return getattr(method, "_is_default", False)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def func_dump(func):
|
| 20 |
+
"""Serializes a user-defined function.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
func: the function to serialize.
|
| 24 |
+
|
| 25 |
+
Returns:
|
| 26 |
+
A tuple `(code, defaults, closure)`.
|
| 27 |
+
"""
|
| 28 |
+
if os.name == "nt":
|
| 29 |
+
raw_code = marshal.dumps(func.__code__).replace(b"\\", b"/")
|
| 30 |
+
code = codecs.encode(raw_code, "base64").decode("ascii")
|
| 31 |
+
else:
|
| 32 |
+
raw_code = marshal.dumps(func.__code__)
|
| 33 |
+
code = codecs.encode(raw_code, "base64").decode("ascii")
|
| 34 |
+
defaults = func.__defaults__
|
| 35 |
+
if func.__closure__:
|
| 36 |
+
closure = tuple(c.cell_contents for c in func.__closure__)
|
| 37 |
+
else:
|
| 38 |
+
closure = None
|
| 39 |
+
return code, defaults, closure
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def func_load(code, defaults=None, closure=None, globs=None):
|
| 43 |
+
"""Deserializes a user defined function.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
code: bytecode of the function.
|
| 47 |
+
defaults: defaults of the function.
|
| 48 |
+
closure: closure of the function.
|
| 49 |
+
globs: dictionary of global objects.
|
| 50 |
+
|
| 51 |
+
Returns:
|
| 52 |
+
A function object.
|
| 53 |
+
"""
|
| 54 |
+
if isinstance(code, (tuple, list)): # unpack previous dump
|
| 55 |
+
code, defaults, closure = code
|
| 56 |
+
if isinstance(defaults, list):
|
| 57 |
+
defaults = tuple(defaults)
|
| 58 |
+
|
| 59 |
+
def ensure_value_to_cell(value):
|
| 60 |
+
"""Ensures that a value is converted to a python cell object.
|
| 61 |
+
|
| 62 |
+
Args:
|
| 63 |
+
value: Any value that needs to be casted to the cell type
|
| 64 |
+
|
| 65 |
+
Returns:
|
| 66 |
+
A value wrapped as a cell object (see function "func_load")
|
| 67 |
+
"""
|
| 68 |
+
|
| 69 |
+
def dummy_fn():
|
| 70 |
+
value # just access it so it gets captured in .__closure__
|
| 71 |
+
|
| 72 |
+
cell_value = dummy_fn.__closure__[0]
|
| 73 |
+
if not isinstance(value, type(cell_value)):
|
| 74 |
+
return cell_value
|
| 75 |
+
return value
|
| 76 |
+
|
| 77 |
+
if closure is not None:
|
| 78 |
+
closure = tuple(ensure_value_to_cell(_) for _ in closure)
|
| 79 |
+
try:
|
| 80 |
+
raw_code = codecs.decode(code.encode("ascii"), "base64")
|
| 81 |
+
except (UnicodeEncodeError, binascii.Error):
|
| 82 |
+
raw_code = code.encode("raw_unicode_escape")
|
| 83 |
+
code = marshal.loads(raw_code)
|
| 84 |
+
if globs is None:
|
| 85 |
+
globs = globals()
|
| 86 |
+
return python_types.FunctionType(
|
| 87 |
+
code, globs, name=code.co_name, argdefs=defaults, closure=closure
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def to_list(x):
|
| 92 |
+
"""Normalizes a list/tensor into a list.
|
| 93 |
+
|
| 94 |
+
If a tensor is passed, we return
|
| 95 |
+
a list of size 1 containing the tensor.
|
| 96 |
+
|
| 97 |
+
Args:
|
| 98 |
+
x: target object to be normalized.
|
| 99 |
+
|
| 100 |
+
Returns:
|
| 101 |
+
A list.
|
| 102 |
+
"""
|
| 103 |
+
if isinstance(x, list):
|
| 104 |
+
return x
|
| 105 |
+
return [x]
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def remove_long_seq(maxlen, seq, label):
|
| 109 |
+
"""Removes sequences that exceed the maximum length.
|
| 110 |
+
|
| 111 |
+
Args:
|
| 112 |
+
maxlen: Int, maximum length of the output sequences.
|
| 113 |
+
seq: List of lists, where each sublist is a sequence.
|
| 114 |
+
label: List where each element is an integer.
|
| 115 |
+
|
| 116 |
+
Returns:
|
| 117 |
+
new_seq, new_label: shortened lists for `seq` and `label`.
|
| 118 |
+
"""
|
| 119 |
+
new_seq, new_label = [], []
|
| 120 |
+
for x, y in zip(seq, label):
|
| 121 |
+
if len(x) < maxlen:
|
| 122 |
+
new_seq.append(x)
|
| 123 |
+
new_label.append(y)
|
| 124 |
+
return new_seq, new_label
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def removeprefix(x, prefix):
|
| 128 |
+
"""Backport of `removeprefix` from PEP-616 (Python 3.9+)"""
|
| 129 |
+
|
| 130 |
+
if len(prefix) > 0 and x.startswith(prefix):
|
| 131 |
+
return x[len(prefix) :]
|
| 132 |
+
else:
|
| 133 |
+
return x
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def removesuffix(x, suffix):
|
| 137 |
+
"""Backport of `removesuffix` from PEP-616 (Python 3.9+)"""
|
| 138 |
+
|
| 139 |
+
if len(suffix) > 0 and x.endswith(suffix):
|
| 140 |
+
return x[: -len(suffix)]
|
| 141 |
+
else:
|
| 142 |
+
return x
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def remove_by_id(lst, value):
|
| 146 |
+
"""Remove a value from a list by id."""
|
| 147 |
+
for i, v in enumerate(lst):
|
| 148 |
+
if id(v) == id(value):
|
| 149 |
+
del lst[i]
|
| 150 |
+
return
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def pythonify_logs(logs):
|
| 154 |
+
"""Flatten and convert log values to Python-native types.
|
| 155 |
+
|
| 156 |
+
This function attempts to convert dict value by `float(value)` and skips
|
| 157 |
+
the conversion if it fails.
|
| 158 |
+
|
| 159 |
+
Args:
|
| 160 |
+
logs: A dict containing log values.
|
| 161 |
+
|
| 162 |
+
Returns:
|
| 163 |
+
A flattened dict with values converted to Python-native types if
|
| 164 |
+
possible.
|
| 165 |
+
"""
|
| 166 |
+
logs = logs or {}
|
| 167 |
+
result = {}
|
| 168 |
+
for key, value in sorted(logs.items()):
|
| 169 |
+
if isinstance(value, dict):
|
| 170 |
+
result.update(pythonify_logs(value))
|
| 171 |
+
else:
|
| 172 |
+
try:
|
| 173 |
+
value = float(value)
|
| 174 |
+
except:
|
| 175 |
+
pass
|
| 176 |
+
result[key] = value
|
| 177 |
+
return result
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/rng_utils.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from keras.src import backend
|
| 6 |
+
from keras.src.api_export import keras_export
|
| 7 |
+
from keras.src.utils.module_utils import tensorflow as tf
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@keras_export("keras.utils.set_random_seed")
|
| 11 |
+
def set_random_seed(seed):
|
| 12 |
+
"""Sets all random seeds (Python, NumPy, and backend framework, e.g. TF).
|
| 13 |
+
|
| 14 |
+
You can use this utility to make almost any Keras program fully
|
| 15 |
+
deterministic. Some limitations apply in cases where network communications
|
| 16 |
+
are involved (e.g. parameter server distribution), which creates additional
|
| 17 |
+
sources of randomness, or when certain non-deterministic cuDNN ops are
|
| 18 |
+
involved.
|
| 19 |
+
|
| 20 |
+
Calling this utility is equivalent to the following:
|
| 21 |
+
|
| 22 |
+
```python
|
| 23 |
+
import random
|
| 24 |
+
random.seed(seed)
|
| 25 |
+
|
| 26 |
+
import numpy as np
|
| 27 |
+
np.random.seed(seed)
|
| 28 |
+
|
| 29 |
+
import tensorflow as tf # Only if TF is installed
|
| 30 |
+
tf.random.set_seed(seed)
|
| 31 |
+
|
| 32 |
+
import torch # Only if the backend is 'torch'
|
| 33 |
+
torch.manual_seed(seed)
|
| 34 |
+
```
|
| 35 |
+
|
| 36 |
+
Note that the TensorFlow seed is set even if you're not using TensorFlow
|
| 37 |
+
as your backend framework, since many workflows leverage `tf.data`
|
| 38 |
+
pipelines (which feature random shuffling). Likewise many workflows
|
| 39 |
+
might leverage NumPy APIs.
|
| 40 |
+
|
| 41 |
+
Arguments:
|
| 42 |
+
seed: Integer, the random seed to use.
|
| 43 |
+
"""
|
| 44 |
+
if not isinstance(seed, int):
|
| 45 |
+
raise ValueError(
|
| 46 |
+
"Expected `seed` argument to be an integer. "
|
| 47 |
+
f"Received: seed={seed} (of type {type(seed)})"
|
| 48 |
+
)
|
| 49 |
+
random.seed(seed)
|
| 50 |
+
np.random.seed(seed)
|
| 51 |
+
if tf.available:
|
| 52 |
+
tf.random.set_seed(seed)
|
| 53 |
+
if backend.backend() == "torch":
|
| 54 |
+
import torch
|
| 55 |
+
|
| 56 |
+
torch.manual_seed(seed)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/sequence_utils.py
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from keras.src.api_export import keras_export
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@keras_export(
|
| 7 |
+
[
|
| 8 |
+
"keras.utils.pad_sequences",
|
| 9 |
+
"keras.preprocessing.sequence.pad_sequences",
|
| 10 |
+
]
|
| 11 |
+
)
|
| 12 |
+
def pad_sequences(
|
| 13 |
+
sequences,
|
| 14 |
+
maxlen=None,
|
| 15 |
+
dtype="int32",
|
| 16 |
+
padding="pre",
|
| 17 |
+
truncating="pre",
|
| 18 |
+
value=0.0,
|
| 19 |
+
):
|
| 20 |
+
"""Pads sequences to the same length.
|
| 21 |
+
|
| 22 |
+
This function transforms a list (of length `num_samples`)
|
| 23 |
+
of sequences (lists of integers)
|
| 24 |
+
into a 2D NumPy array of shape `(num_samples, num_timesteps)`.
|
| 25 |
+
`num_timesteps` is either the `maxlen` argument if provided,
|
| 26 |
+
or the length of the longest sequence in the list.
|
| 27 |
+
|
| 28 |
+
Sequences that are shorter than `num_timesteps`
|
| 29 |
+
are padded with `value` until they are `num_timesteps` long.
|
| 30 |
+
|
| 31 |
+
Sequences longer than `num_timesteps` are truncated
|
| 32 |
+
so that they fit the desired length.
|
| 33 |
+
|
| 34 |
+
The position where padding or truncation happens is determined by
|
| 35 |
+
the arguments `padding` and `truncating`, respectively.
|
| 36 |
+
Pre-padding or removing values from the beginning of the sequence is the
|
| 37 |
+
default.
|
| 38 |
+
|
| 39 |
+
>>> sequence = [[1], [2, 3], [4, 5, 6]]
|
| 40 |
+
>>> keras.utils.pad_sequences(sequence)
|
| 41 |
+
array([[0, 0, 1],
|
| 42 |
+
[0, 2, 3],
|
| 43 |
+
[4, 5, 6]], dtype=int32)
|
| 44 |
+
|
| 45 |
+
>>> keras.utils.pad_sequences(sequence, value=-1)
|
| 46 |
+
array([[-1, -1, 1],
|
| 47 |
+
[-1, 2, 3],
|
| 48 |
+
[ 4, 5, 6]], dtype=int32)
|
| 49 |
+
|
| 50 |
+
>>> keras.utils.pad_sequences(sequence, padding='post')
|
| 51 |
+
array([[1, 0, 0],
|
| 52 |
+
[2, 3, 0],
|
| 53 |
+
[4, 5, 6]], dtype=int32)
|
| 54 |
+
|
| 55 |
+
>>> keras.utils.pad_sequences(sequence, maxlen=2)
|
| 56 |
+
array([[0, 1],
|
| 57 |
+
[2, 3],
|
| 58 |
+
[5, 6]], dtype=int32)
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
sequences: List of sequences (each sequence is a list of integers).
|
| 62 |
+
maxlen: Optional Int, maximum length of all sequences. If not provided,
|
| 63 |
+
sequences will be padded to the length of the longest individual
|
| 64 |
+
sequence.
|
| 65 |
+
dtype: (Optional, defaults to `"int32"`). Type of the output sequences.
|
| 66 |
+
To pad sequences with variable length strings, you can use `object`.
|
| 67 |
+
padding: String, "pre" or "post" (optional, defaults to `"pre"`):
|
| 68 |
+
pad either before or after each sequence.
|
| 69 |
+
truncating: String, "pre" or "post" (optional, defaults to `"pre"`):
|
| 70 |
+
remove values from sequences larger than
|
| 71 |
+
`maxlen`, either at the beginning or at the end of the sequences.
|
| 72 |
+
value: Float or String, padding value. (Optional, defaults to `0.`)
|
| 73 |
+
|
| 74 |
+
Returns:
|
| 75 |
+
NumPy array with shape `(len(sequences), maxlen)`
|
| 76 |
+
"""
|
| 77 |
+
if not hasattr(sequences, "__len__"):
|
| 78 |
+
raise ValueError("`sequences` must be iterable.")
|
| 79 |
+
num_samples = len(sequences)
|
| 80 |
+
|
| 81 |
+
lengths = []
|
| 82 |
+
sample_shape = ()
|
| 83 |
+
flag = True
|
| 84 |
+
|
| 85 |
+
# take the sample shape from the first non empty sequence
|
| 86 |
+
# checking for consistency in the main loop below.
|
| 87 |
+
|
| 88 |
+
for x in sequences:
|
| 89 |
+
try:
|
| 90 |
+
lengths.append(len(x))
|
| 91 |
+
if flag and len(x):
|
| 92 |
+
sample_shape = np.asarray(x).shape[1:]
|
| 93 |
+
flag = False
|
| 94 |
+
except TypeError as e:
|
| 95 |
+
raise ValueError(
|
| 96 |
+
"`sequences` must be a list of iterables. "
|
| 97 |
+
f"Found non-iterable: {str(x)}"
|
| 98 |
+
) from e
|
| 99 |
+
|
| 100 |
+
if maxlen is None:
|
| 101 |
+
maxlen = np.max(lengths)
|
| 102 |
+
|
| 103 |
+
is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(
|
| 104 |
+
dtype, np.str_
|
| 105 |
+
)
|
| 106 |
+
if isinstance(value, str) and dtype is not object and not is_dtype_str:
|
| 107 |
+
raise ValueError(
|
| 108 |
+
f"`dtype` {dtype} is not compatible with `value`'s type: "
|
| 109 |
+
f"{type(value)}\nYou should set `dtype=object` for variable length "
|
| 110 |
+
"strings."
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype)
|
| 114 |
+
for idx, s in enumerate(sequences):
|
| 115 |
+
if not len(s):
|
| 116 |
+
continue # empty list/array was found
|
| 117 |
+
if truncating == "pre":
|
| 118 |
+
trunc = s[-maxlen:]
|
| 119 |
+
elif truncating == "post":
|
| 120 |
+
trunc = s[:maxlen]
|
| 121 |
+
else:
|
| 122 |
+
raise ValueError(f'Truncating type "{truncating}" not understood')
|
| 123 |
+
|
| 124 |
+
# check `trunc` has expected shape
|
| 125 |
+
trunc = np.asarray(trunc, dtype=dtype)
|
| 126 |
+
if trunc.shape[1:] != sample_shape:
|
| 127 |
+
raise ValueError(
|
| 128 |
+
f"Shape of sample {trunc.shape[1:]} of sequence at "
|
| 129 |
+
f"position {idx} is different from expected shape "
|
| 130 |
+
f"{sample_shape}"
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
if padding == "post":
|
| 134 |
+
x[idx, : len(trunc)] = trunc
|
| 135 |
+
elif padding == "pre":
|
| 136 |
+
x[idx, -len(trunc) :] = trunc
|
| 137 |
+
else:
|
| 138 |
+
raise ValueError(f'Padding type "{padding}" not understood')
|
| 139 |
+
return x
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/summary_utils.py
ADDED
|
@@ -0,0 +1,443 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import math
|
| 3 |
+
import re
|
| 4 |
+
import shutil
|
| 5 |
+
|
| 6 |
+
import rich
|
| 7 |
+
import rich.console
|
| 8 |
+
import rich.markup
|
| 9 |
+
|
| 10 |
+
# See https://github.com/keras-team/keras/issues/448
|
| 11 |
+
# for below imports
|
| 12 |
+
import rich.table
|
| 13 |
+
|
| 14 |
+
from keras.src import backend
|
| 15 |
+
from keras.src import tree
|
| 16 |
+
from keras.src.utils import dtype_utils
|
| 17 |
+
from keras.src.utils import io_utils
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def count_params(weights):
|
| 21 |
+
shapes = [v.shape for v in weights]
|
| 22 |
+
return int(sum(math.prod(p) for p in shapes))
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@functools.lru_cache(512)
|
| 26 |
+
def _compute_memory_size(shape, dtype):
|
| 27 |
+
weight_counts = math.prod(shape)
|
| 28 |
+
dtype = backend.standardize_dtype(dtype)
|
| 29 |
+
per_param_size = dtype_utils.dtype_size(dtype)
|
| 30 |
+
return weight_counts * per_param_size
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def weight_memory_size(weights):
|
| 34 |
+
"""Compute the memory footprint for weights based on their dtypes.
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
weights: An iterable contains the weights to compute weight size.
|
| 38 |
+
|
| 39 |
+
Returns:
|
| 40 |
+
The total memory size (in Bytes) of the weights.
|
| 41 |
+
"""
|
| 42 |
+
unique_weights = {id(w): w for w in weights}.values()
|
| 43 |
+
total_memory_size = 0
|
| 44 |
+
for w in unique_weights:
|
| 45 |
+
total_memory_size += _compute_memory_size(w.shape, w.dtype)
|
| 46 |
+
return total_memory_size / 8
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def readable_memory_size(weight_memory_size):
|
| 50 |
+
"""Convert the weight memory size (Bytes) to a readable string."""
|
| 51 |
+
units = ["B", "KB", "MB", "GB", "TB", "PB"]
|
| 52 |
+
scale = 1024
|
| 53 |
+
for unit in units:
|
| 54 |
+
if weight_memory_size / scale < 1:
|
| 55 |
+
return "{:.2f} {}".format(weight_memory_size, unit)
|
| 56 |
+
else:
|
| 57 |
+
weight_memory_size /= scale
|
| 58 |
+
return "{:.2f} {}".format(weight_memory_size, units[-1])
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def highlight_number(x):
|
| 62 |
+
"""Themes numbers in a summary using rich markup.
|
| 63 |
+
|
| 64 |
+
We use a separate color for `None`s, e.g. in a layer shape.
|
| 65 |
+
"""
|
| 66 |
+
if x is None:
|
| 67 |
+
return f"[color(45)]{x}[/]"
|
| 68 |
+
else:
|
| 69 |
+
return f"[color(34)]{x}[/]"
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def highlight_symbol(x):
|
| 73 |
+
"""Themes keras symbols in a summary using rich markup."""
|
| 74 |
+
return f"[color(33)]{x}[/]"
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def bold_text(x, color=None):
|
| 78 |
+
"""Bolds text using rich markup."""
|
| 79 |
+
if color:
|
| 80 |
+
return f"[bold][color({color})]{x}[/][/]"
|
| 81 |
+
return f"[bold]{x}[/]"
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def format_layer_shape(layer):
|
| 85 |
+
if not layer._inbound_nodes and not layer._build_shapes_dict:
|
| 86 |
+
return "?"
|
| 87 |
+
|
| 88 |
+
def format_shape(shape):
|
| 89 |
+
highlighted = [highlight_number(x) for x in shape]
|
| 90 |
+
return "(" + ", ".join(highlighted) + ")"
|
| 91 |
+
|
| 92 |
+
# There are 2 approaches to get output shapes:
|
| 93 |
+
# 1. Using `layer._inbound_nodes`, which is possible if the model is a
|
| 94 |
+
# Sequential or Functional.
|
| 95 |
+
# 2. Using `layer._build_shapes_dict`, which is possible if users manually
|
| 96 |
+
# build the layer.
|
| 97 |
+
if len(layer._inbound_nodes) > 0:
|
| 98 |
+
for i in range(len(layer._inbound_nodes)):
|
| 99 |
+
outputs = layer._inbound_nodes[i].output_tensors
|
| 100 |
+
output_shapes = tree.map_structure(
|
| 101 |
+
lambda x: format_shape(x.shape), outputs
|
| 102 |
+
)
|
| 103 |
+
else:
|
| 104 |
+
try:
|
| 105 |
+
if hasattr(layer, "output_shape"):
|
| 106 |
+
output_shapes = format_shape(layer.output_shape)
|
| 107 |
+
else:
|
| 108 |
+
outputs = layer.compute_output_shape(**layer._build_shapes_dict)
|
| 109 |
+
output_shapes = tree.map_shape_structure(
|
| 110 |
+
lambda x: format_shape(x), outputs
|
| 111 |
+
)
|
| 112 |
+
except NotImplementedError:
|
| 113 |
+
return "?"
|
| 114 |
+
if len(output_shapes) == 1:
|
| 115 |
+
return output_shapes[0]
|
| 116 |
+
out = str(output_shapes)
|
| 117 |
+
out = out.replace("'", "")
|
| 118 |
+
return out
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def print_summary(
|
| 122 |
+
model,
|
| 123 |
+
line_length=None,
|
| 124 |
+
positions=None,
|
| 125 |
+
print_fn=None,
|
| 126 |
+
expand_nested=False,
|
| 127 |
+
show_trainable=False,
|
| 128 |
+
layer_range=None,
|
| 129 |
+
):
|
| 130 |
+
"""Prints a summary of a model.
|
| 131 |
+
|
| 132 |
+
Args:
|
| 133 |
+
model: Keras model instance.
|
| 134 |
+
line_length: Total length of printed lines
|
| 135 |
+
(e.g. set this to adapt the display to different
|
| 136 |
+
terminal window sizes).
|
| 137 |
+
positions: Relative or absolute positions of log elements in each line.
|
| 138 |
+
If not provided, defaults to `[0.3, 0.6, 0.70, 1.]`.
|
| 139 |
+
print_fn: Print function to use.
|
| 140 |
+
It will be called on each line of the summary.
|
| 141 |
+
You can set it to a custom function
|
| 142 |
+
in order to capture the string summary.
|
| 143 |
+
It defaults to `print` (prints to stdout).
|
| 144 |
+
expand_nested: Whether to expand the nested models.
|
| 145 |
+
If not provided, defaults to `False`.
|
| 146 |
+
show_trainable: Whether to show if a layer is trainable.
|
| 147 |
+
If not provided, defaults to `False`.
|
| 148 |
+
layer_range: List or tuple containing two strings,
|
| 149 |
+
the starting layer name and ending layer name (both inclusive),
|
| 150 |
+
indicating the range of layers to be printed in the summary. The
|
| 151 |
+
strings could also be regexes instead of an exact name. In this
|
| 152 |
+
case, the starting layer will be the first layer that matches
|
| 153 |
+
`layer_range[0]` and the ending layer will be the last element that
|
| 154 |
+
matches `layer_range[1]`. By default (`None`) all
|
| 155 |
+
layers in the model are included in the summary.
|
| 156 |
+
"""
|
| 157 |
+
from keras.src.models import Functional
|
| 158 |
+
from keras.src.models import Sequential
|
| 159 |
+
|
| 160 |
+
if not print_fn and not io_utils.is_interactive_logging_enabled():
|
| 161 |
+
print_fn = io_utils.print_msg
|
| 162 |
+
|
| 163 |
+
if isinstance(model, Sequential):
|
| 164 |
+
sequential_like = True
|
| 165 |
+
layers = model.layers
|
| 166 |
+
elif not isinstance(model, Functional):
|
| 167 |
+
# We treat subclassed models as a simple sequence of layers, for logging
|
| 168 |
+
# purposes.
|
| 169 |
+
sequential_like = True
|
| 170 |
+
layers = model.layers
|
| 171 |
+
else:
|
| 172 |
+
layers = model._operations
|
| 173 |
+
sequential_like = True
|
| 174 |
+
nodes_by_depth = model._nodes_by_depth.values()
|
| 175 |
+
nodes = []
|
| 176 |
+
for v in nodes_by_depth:
|
| 177 |
+
if (len(v) > 1) or (
|
| 178 |
+
len(v) == 1 and len(tree.flatten(v[0].input_tensors)) > 1
|
| 179 |
+
):
|
| 180 |
+
# if the model has multiple nodes
|
| 181 |
+
# or if the nodes have multiple inbound_layers
|
| 182 |
+
# the model is no longer sequential
|
| 183 |
+
sequential_like = False
|
| 184 |
+
break
|
| 185 |
+
nodes += v
|
| 186 |
+
if sequential_like:
|
| 187 |
+
# search for shared layers
|
| 188 |
+
for layer in model.layers:
|
| 189 |
+
flag = False
|
| 190 |
+
for node in layer._inbound_nodes:
|
| 191 |
+
if node in nodes:
|
| 192 |
+
if flag:
|
| 193 |
+
sequential_like = False
|
| 194 |
+
break
|
| 195 |
+
else:
|
| 196 |
+
flag = True
|
| 197 |
+
if not sequential_like:
|
| 198 |
+
break
|
| 199 |
+
|
| 200 |
+
if sequential_like:
|
| 201 |
+
default_line_length = 88
|
| 202 |
+
positions = positions or [0.45, 0.80, 1.0]
|
| 203 |
+
# header names for the different log elements
|
| 204 |
+
header = ["Layer (type)", "Output Shape", "Param #"]
|
| 205 |
+
alignment = ["left", "left", "right"]
|
| 206 |
+
else:
|
| 207 |
+
default_line_length = 108
|
| 208 |
+
positions = positions or [0.3, 0.56, 0.74, 1.0]
|
| 209 |
+
# header names for the different log elements
|
| 210 |
+
header = ["Layer (type)", "Output Shape", "Param #", "Connected to"]
|
| 211 |
+
alignment = ["left", "left", "right", "left"]
|
| 212 |
+
relevant_nodes = []
|
| 213 |
+
for v in model._nodes_by_depth.values():
|
| 214 |
+
relevant_nodes += v
|
| 215 |
+
|
| 216 |
+
if show_trainable:
|
| 217 |
+
default_line_length += 12
|
| 218 |
+
positions = [p * 0.90 for p in positions] + [1.0]
|
| 219 |
+
header.append("Trainable")
|
| 220 |
+
alignment.append("center")
|
| 221 |
+
|
| 222 |
+
# Compute columns widths
|
| 223 |
+
default_line_length = min(
|
| 224 |
+
default_line_length, shutil.get_terminal_size().columns - 4
|
| 225 |
+
)
|
| 226 |
+
line_length = line_length or default_line_length
|
| 227 |
+
column_widths = []
|
| 228 |
+
current = 0
|
| 229 |
+
for pos in positions:
|
| 230 |
+
width = int(pos * line_length) - current
|
| 231 |
+
if width < 4:
|
| 232 |
+
raise ValueError("Insufficient console width to print summary.")
|
| 233 |
+
column_widths.append(width)
|
| 234 |
+
current += width
|
| 235 |
+
|
| 236 |
+
# Render summary as a rich table.
|
| 237 |
+
columns = []
|
| 238 |
+
# Right align parameter counts.
|
| 239 |
+
for i, name in enumerate(header):
|
| 240 |
+
column = rich.table.Column(
|
| 241 |
+
name,
|
| 242 |
+
justify=alignment[i],
|
| 243 |
+
width=column_widths[i],
|
| 244 |
+
)
|
| 245 |
+
columns.append(column)
|
| 246 |
+
|
| 247 |
+
table = rich.table.Table(*columns, width=line_length, show_lines=True)
|
| 248 |
+
|
| 249 |
+
def get_connections(layer):
|
| 250 |
+
connections = ""
|
| 251 |
+
for node in layer._inbound_nodes:
|
| 252 |
+
if relevant_nodes and node not in relevant_nodes:
|
| 253 |
+
# node is not part of the current network
|
| 254 |
+
continue
|
| 255 |
+
for kt in node.input_tensors:
|
| 256 |
+
keras_history = kt._keras_history
|
| 257 |
+
inbound_layer = keras_history.operation
|
| 258 |
+
node_index = highlight_number(keras_history.node_index)
|
| 259 |
+
tensor_index = highlight_number(keras_history.tensor_index)
|
| 260 |
+
if connections:
|
| 261 |
+
connections += ", "
|
| 262 |
+
connections += (
|
| 263 |
+
f"{inbound_layer.name}[{node_index}][{tensor_index}]"
|
| 264 |
+
)
|
| 265 |
+
if not connections:
|
| 266 |
+
connections = "-"
|
| 267 |
+
return connections
|
| 268 |
+
|
| 269 |
+
def get_layer_fields(layer, prefix=""):
|
| 270 |
+
output_shape = format_layer_shape(layer)
|
| 271 |
+
name = prefix + layer.name
|
| 272 |
+
cls_name = layer.__class__.__name__
|
| 273 |
+
name = rich.markup.escape(name)
|
| 274 |
+
name += f" ({highlight_symbol(rich.markup.escape(cls_name))})"
|
| 275 |
+
|
| 276 |
+
if not hasattr(layer, "built"):
|
| 277 |
+
params = highlight_number(0)
|
| 278 |
+
elif not layer.built:
|
| 279 |
+
params = highlight_number(0) + " (unbuilt)"
|
| 280 |
+
else:
|
| 281 |
+
params = highlight_number(f"{layer.count_params():,}")
|
| 282 |
+
|
| 283 |
+
fields = [name, output_shape, params]
|
| 284 |
+
if not sequential_like:
|
| 285 |
+
fields.append(get_connections(layer))
|
| 286 |
+
if show_trainable:
|
| 287 |
+
if hasattr(layer, "weights") and len(layer.weights) > 0:
|
| 288 |
+
fields.append(
|
| 289 |
+
bold_text("Y", color=34)
|
| 290 |
+
if layer.trainable
|
| 291 |
+
else bold_text("N", color=9)
|
| 292 |
+
)
|
| 293 |
+
else:
|
| 294 |
+
fields.append(bold_text("-"))
|
| 295 |
+
return fields
|
| 296 |
+
|
| 297 |
+
def print_layer(layer, nested_level=0):
|
| 298 |
+
if nested_level:
|
| 299 |
+
prefix = " " * nested_level + "└" + " "
|
| 300 |
+
else:
|
| 301 |
+
prefix = ""
|
| 302 |
+
|
| 303 |
+
fields = get_layer_fields(layer, prefix=prefix)
|
| 304 |
+
|
| 305 |
+
rows = [fields]
|
| 306 |
+
if expand_nested and hasattr(layer, "layers") and layer.layers:
|
| 307 |
+
nested_layers = layer.layers
|
| 308 |
+
nested_level += 1
|
| 309 |
+
for i in range(len(nested_layers)):
|
| 310 |
+
rows.extend(
|
| 311 |
+
print_layer(nested_layers[i], nested_level=nested_level)
|
| 312 |
+
)
|
| 313 |
+
return rows
|
| 314 |
+
|
| 315 |
+
# Render all layers to the rich table.
|
| 316 |
+
layer_range = get_layer_index_bound_by_layer_name(layers, layer_range)
|
| 317 |
+
for layer in layers[layer_range[0] : layer_range[1]]:
|
| 318 |
+
for row in print_layer(layer):
|
| 319 |
+
table.add_row(*row)
|
| 320 |
+
|
| 321 |
+
# After the table, append information about parameter count and size.
|
| 322 |
+
if hasattr(model, "_collected_trainable_weights"):
|
| 323 |
+
trainable_count = count_params(model._collected_trainable_weights)
|
| 324 |
+
trainable_memory_size = weight_memory_size(
|
| 325 |
+
model._collected_trainable_weights
|
| 326 |
+
)
|
| 327 |
+
else:
|
| 328 |
+
trainable_count = count_params(model.trainable_weights)
|
| 329 |
+
trainable_memory_size = weight_memory_size(model.trainable_weights)
|
| 330 |
+
|
| 331 |
+
non_trainable_count = count_params(model.non_trainable_weights)
|
| 332 |
+
non_trainable_memory_size = weight_memory_size(model.non_trainable_weights)
|
| 333 |
+
|
| 334 |
+
if model.compiled and model.optimizer and model.optimizer.built:
|
| 335 |
+
optimizer_weight_count = count_params(model.optimizer.variables)
|
| 336 |
+
optimizer_memory_size = weight_memory_size(model.optimizer.variables)
|
| 337 |
+
optimizer_built = True
|
| 338 |
+
else:
|
| 339 |
+
optimizer_weight_count = 0
|
| 340 |
+
optimizer_memory_size = 0
|
| 341 |
+
optimizer_built = False
|
| 342 |
+
|
| 343 |
+
total_count = trainable_count + non_trainable_count + optimizer_weight_count
|
| 344 |
+
total_memory_size = (
|
| 345 |
+
trainable_memory_size
|
| 346 |
+
+ non_trainable_memory_size
|
| 347 |
+
+ optimizer_memory_size
|
| 348 |
+
)
|
| 349 |
+
|
| 350 |
+
# Create a rich console for printing. Capture for non-interactive logging.
|
| 351 |
+
if print_fn:
|
| 352 |
+
console = rich.console.Console(
|
| 353 |
+
highlight=False, force_terminal=False, color_system=None
|
| 354 |
+
)
|
| 355 |
+
console.begin_capture()
|
| 356 |
+
else:
|
| 357 |
+
console = rich.console.Console(highlight=False)
|
| 358 |
+
|
| 359 |
+
# Print the to the console.
|
| 360 |
+
console.print(bold_text(f'Model: "{rich.markup.escape(model.name)}"'))
|
| 361 |
+
console.print(table)
|
| 362 |
+
console.print(
|
| 363 |
+
bold_text(" Total params: ")
|
| 364 |
+
+ highlight_number(f"{total_count:,}")
|
| 365 |
+
+ f" ({readable_memory_size(total_memory_size)})"
|
| 366 |
+
)
|
| 367 |
+
console.print(
|
| 368 |
+
bold_text(" Trainable params: ")
|
| 369 |
+
+ highlight_number(f"{trainable_count:,}")
|
| 370 |
+
+ f" ({readable_memory_size(trainable_memory_size)})"
|
| 371 |
+
)
|
| 372 |
+
console.print(
|
| 373 |
+
bold_text(" Non-trainable params: ")
|
| 374 |
+
+ highlight_number(f"{non_trainable_count:,}")
|
| 375 |
+
+ f" ({readable_memory_size(non_trainable_memory_size)})"
|
| 376 |
+
)
|
| 377 |
+
if optimizer_built:
|
| 378 |
+
console.print(
|
| 379 |
+
bold_text(" Optimizer params: ")
|
| 380 |
+
+ highlight_number(f"{optimizer_weight_count:,}")
|
| 381 |
+
+ f" ({readable_memory_size(optimizer_memory_size)})"
|
| 382 |
+
)
|
| 383 |
+
|
| 384 |
+
# Output captured summary for non-interactive logging.
|
| 385 |
+
if print_fn:
|
| 386 |
+
if print_fn is io_utils.print_msg:
|
| 387 |
+
print_fn(console.end_capture(), line_break=False)
|
| 388 |
+
else:
|
| 389 |
+
print_fn(console.end_capture())
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
def get_layer_index_bound_by_layer_name(layers, layer_range=None):
|
| 393 |
+
"""Get the layer indexes from the model based on layer names.
|
| 394 |
+
|
| 395 |
+
The layer indexes can be used to slice the model into sub models for
|
| 396 |
+
display.
|
| 397 |
+
|
| 398 |
+
Args:
|
| 399 |
+
model: `Model` instance.
|
| 400 |
+
layer_names: a list or tuple of 2 strings, the starting layer name and
|
| 401 |
+
ending layer name (both inclusive) for the result. All layers will
|
| 402 |
+
be included when `None` is provided.
|
| 403 |
+
|
| 404 |
+
Returns:
|
| 405 |
+
The index value of layer based on its unique name (layer_names).
|
| 406 |
+
Output will be [first_layer_index, last_layer_index + 1].
|
| 407 |
+
"""
|
| 408 |
+
if layer_range is not None:
|
| 409 |
+
if len(layer_range) != 2:
|
| 410 |
+
raise ValueError(
|
| 411 |
+
"layer_range must be a list or tuple of length 2. Received: "
|
| 412 |
+
f"layer_range = {layer_range} of length {len(layer_range)}"
|
| 413 |
+
)
|
| 414 |
+
if not isinstance(layer_range[0], str) or not isinstance(
|
| 415 |
+
layer_range[1], str
|
| 416 |
+
):
|
| 417 |
+
raise ValueError(
|
| 418 |
+
"layer_range should contain string type only. "
|
| 419 |
+
f"Received: {layer_range}"
|
| 420 |
+
)
|
| 421 |
+
else:
|
| 422 |
+
return [0, len(layers)]
|
| 423 |
+
|
| 424 |
+
lower_index = [
|
| 425 |
+
idx
|
| 426 |
+
for idx, layer in enumerate(layers)
|
| 427 |
+
if re.match(layer_range[0], layer.name)
|
| 428 |
+
]
|
| 429 |
+
upper_index = [
|
| 430 |
+
idx
|
| 431 |
+
for idx, layer in enumerate(layers)
|
| 432 |
+
if re.match(layer_range[1], layer.name)
|
| 433 |
+
]
|
| 434 |
+
|
| 435 |
+
if not lower_index or not upper_index:
|
| 436 |
+
raise ValueError(
|
| 437 |
+
"Passed layer_names do not match the layer names in the model. "
|
| 438 |
+
f"Received: {layer_range}"
|
| 439 |
+
)
|
| 440 |
+
|
| 441 |
+
if min(lower_index) > max(upper_index):
|
| 442 |
+
return [min(upper_index), max(lower_index) + 1]
|
| 443 |
+
return [min(lower_index), max(upper_index) + 1]
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/text_dataset_utils.py
ADDED
|
@@ -0,0 +1,291 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from keras.src.api_export import keras_export
|
| 4 |
+
from keras.src.utils import dataset_utils
|
| 5 |
+
from keras.src.utils.module_utils import tensorflow as tf
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@keras_export(
|
| 9 |
+
[
|
| 10 |
+
"keras.utils.text_dataset_from_directory",
|
| 11 |
+
"keras.preprocessing.text_dataset_from_directory",
|
| 12 |
+
]
|
| 13 |
+
)
|
| 14 |
+
def text_dataset_from_directory(
|
| 15 |
+
directory,
|
| 16 |
+
labels="inferred",
|
| 17 |
+
label_mode="int",
|
| 18 |
+
class_names=None,
|
| 19 |
+
batch_size=32,
|
| 20 |
+
max_length=None,
|
| 21 |
+
shuffle=True,
|
| 22 |
+
seed=None,
|
| 23 |
+
validation_split=None,
|
| 24 |
+
subset=None,
|
| 25 |
+
follow_links=False,
|
| 26 |
+
verbose=True,
|
| 27 |
+
):
|
| 28 |
+
"""Generates a `tf.data.Dataset` from text files in a directory.
|
| 29 |
+
|
| 30 |
+
If your directory structure is:
|
| 31 |
+
|
| 32 |
+
```
|
| 33 |
+
main_directory/
|
| 34 |
+
...class_a/
|
| 35 |
+
......a_text_1.txt
|
| 36 |
+
......a_text_2.txt
|
| 37 |
+
...class_b/
|
| 38 |
+
......b_text_1.txt
|
| 39 |
+
......b_text_2.txt
|
| 40 |
+
```
|
| 41 |
+
|
| 42 |
+
Then calling `text_dataset_from_directory(main_directory,
|
| 43 |
+
labels='inferred')` will return a `tf.data.Dataset` that yields batches of
|
| 44 |
+
texts from the subdirectories `class_a` and `class_b`, together with labels
|
| 45 |
+
0 and 1 (0 corresponding to `class_a` and 1 corresponding to `class_b`).
|
| 46 |
+
|
| 47 |
+
Only `.txt` files are supported at this time.
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
directory: Directory where the data is located.
|
| 51 |
+
If `labels` is `"inferred"`, it should contain
|
| 52 |
+
subdirectories, each containing text files for a class.
|
| 53 |
+
Otherwise, the directory structure is ignored.
|
| 54 |
+
labels: Either `"inferred"`
|
| 55 |
+
(labels are generated from the directory structure),
|
| 56 |
+
`None` (no labels),
|
| 57 |
+
or a list/tuple of integer labels of the same size as the number of
|
| 58 |
+
text files found in the directory. Labels should be sorted according
|
| 59 |
+
to the alphanumeric order of the text file paths
|
| 60 |
+
(obtained via `os.walk(directory)` in Python).
|
| 61 |
+
label_mode: String describing the encoding of `labels`. Options are:
|
| 62 |
+
- `"int"`: means that the labels are encoded as integers
|
| 63 |
+
(e.g. for `sparse_categorical_crossentropy` loss).
|
| 64 |
+
- `"categorical"` means that the labels are
|
| 65 |
+
encoded as a categorical vector
|
| 66 |
+
(e.g. for `categorical_crossentropy` loss).
|
| 67 |
+
- `"binary"` means that the labels (there can be only 2)
|
| 68 |
+
are encoded as `float32` scalars with values 0 or 1
|
| 69 |
+
(e.g. for `binary_crossentropy`).
|
| 70 |
+
- `None` (no labels).
|
| 71 |
+
class_names: Only valid if `"labels"` is `"inferred"`.
|
| 72 |
+
This is the explicit list of class names
|
| 73 |
+
(must match names of subdirectories). Used to control the order
|
| 74 |
+
of the classes (otherwise alphanumerical order is used).
|
| 75 |
+
batch_size: Size of the batches of data.
|
| 76 |
+
If `None`, the data will not be batched
|
| 77 |
+
(the dataset will yield individual samples).
|
| 78 |
+
Defaults to `32`.
|
| 79 |
+
max_length: Maximum size of a text string. Texts longer than this will
|
| 80 |
+
be truncated to `max_length`.
|
| 81 |
+
shuffle: Whether to shuffle the data.
|
| 82 |
+
If set to `False`, sorts the data in alphanumeric order.
|
| 83 |
+
Defaults to `True`.
|
| 84 |
+
seed: Optional random seed for shuffling and transformations.
|
| 85 |
+
validation_split: Optional float between 0 and 1,
|
| 86 |
+
fraction of data to reserve for validation.
|
| 87 |
+
subset: Subset of the data to return.
|
| 88 |
+
One of `"training"`, `"validation"` or `"both"`.
|
| 89 |
+
Only used if `validation_split` is set.
|
| 90 |
+
When `subset="both"`, the utility returns a tuple of two datasets
|
| 91 |
+
(the training and validation datasets respectively).
|
| 92 |
+
follow_links: Whether to visits subdirectories pointed to by symlinks.
|
| 93 |
+
Defaults to `False`.
|
| 94 |
+
verbose: Whether to display number information on classes and
|
| 95 |
+
number of files found. Defaults to `True`.
|
| 96 |
+
|
| 97 |
+
Returns:
|
| 98 |
+
|
| 99 |
+
A `tf.data.Dataset` object.
|
| 100 |
+
|
| 101 |
+
- If `label_mode` is `None`, it yields `string` tensors of shape
|
| 102 |
+
`(batch_size,)`, containing the contents of a batch of text files.
|
| 103 |
+
- Otherwise, it yields a tuple `(texts, labels)`, where `texts`
|
| 104 |
+
has shape `(batch_size,)` and `labels` follows the format described
|
| 105 |
+
below.
|
| 106 |
+
|
| 107 |
+
Rules regarding labels format:
|
| 108 |
+
|
| 109 |
+
- if `label_mode` is `int`, the labels are an `int32` tensor of shape
|
| 110 |
+
`(batch_size,)`.
|
| 111 |
+
- if `label_mode` is `binary`, the labels are a `float32` tensor of
|
| 112 |
+
1s and 0s of shape `(batch_size, 1)`.
|
| 113 |
+
- if `label_mode` is `categorical`, the labels are a `float32` tensor
|
| 114 |
+
of shape `(batch_size, num_classes)`, representing a one-hot
|
| 115 |
+
encoding of the class index.
|
| 116 |
+
"""
|
| 117 |
+
if labels not in ("inferred", None):
|
| 118 |
+
if not isinstance(labels, (list, tuple)):
|
| 119 |
+
raise ValueError(
|
| 120 |
+
"`labels` argument should be a list/tuple of integer labels, "
|
| 121 |
+
"of the same size as the number of text files in the target "
|
| 122 |
+
"directory. If you wish to infer the labels from the "
|
| 123 |
+
"subdirectory names in the target directory, "
|
| 124 |
+
'pass `labels="inferred"`. '
|
| 125 |
+
"If you wish to get a dataset that only contains text samples "
|
| 126 |
+
f"(no labels), pass `labels=None`. Received: labels={labels}"
|
| 127 |
+
)
|
| 128 |
+
if class_names:
|
| 129 |
+
raise ValueError(
|
| 130 |
+
"You can only pass `class_names` if "
|
| 131 |
+
f'`labels="inferred"`. Received: labels={labels}, and '
|
| 132 |
+
f"class_names={class_names}"
|
| 133 |
+
)
|
| 134 |
+
if label_mode not in {"int", "categorical", "binary", None}:
|
| 135 |
+
raise ValueError(
|
| 136 |
+
'`label_mode` argument must be one of "int", '
|
| 137 |
+
'"categorical", "binary", '
|
| 138 |
+
f"or None. Received: label_mode={label_mode}"
|
| 139 |
+
)
|
| 140 |
+
if labels is None or label_mode is None:
|
| 141 |
+
labels = None
|
| 142 |
+
label_mode = None
|
| 143 |
+
dataset_utils.check_validation_split_arg(
|
| 144 |
+
validation_split, subset, shuffle, seed
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
if seed is None:
|
| 148 |
+
seed = np.random.randint(1e6)
|
| 149 |
+
file_paths, labels, class_names = dataset_utils.index_directory(
|
| 150 |
+
directory,
|
| 151 |
+
labels,
|
| 152 |
+
formats=(".txt",),
|
| 153 |
+
class_names=class_names,
|
| 154 |
+
shuffle=shuffle,
|
| 155 |
+
seed=seed,
|
| 156 |
+
follow_links=follow_links,
|
| 157 |
+
verbose=verbose,
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
if label_mode == "binary" and len(class_names) != 2:
|
| 161 |
+
raise ValueError(
|
| 162 |
+
'When passing `label_mode="binary"`, there must be exactly 2 '
|
| 163 |
+
f"class_names. Received: class_names={class_names}"
|
| 164 |
+
)
|
| 165 |
+
if batch_size is not None:
|
| 166 |
+
shuffle_buffer_size = batch_size * 8
|
| 167 |
+
else:
|
| 168 |
+
shuffle_buffer_size = 1024
|
| 169 |
+
|
| 170 |
+
if subset == "both":
|
| 171 |
+
(
|
| 172 |
+
file_paths_train,
|
| 173 |
+
labels_train,
|
| 174 |
+
) = dataset_utils.get_training_or_validation_split(
|
| 175 |
+
file_paths, labels, validation_split, "training"
|
| 176 |
+
)
|
| 177 |
+
(
|
| 178 |
+
file_paths_val,
|
| 179 |
+
labels_val,
|
| 180 |
+
) = dataset_utils.get_training_or_validation_split(
|
| 181 |
+
file_paths, labels, validation_split, "validation"
|
| 182 |
+
)
|
| 183 |
+
if not file_paths_train:
|
| 184 |
+
raise ValueError(
|
| 185 |
+
f"No training text files found in directory {directory}. "
|
| 186 |
+
"Allowed format: .txt"
|
| 187 |
+
)
|
| 188 |
+
if not file_paths_val:
|
| 189 |
+
raise ValueError(
|
| 190 |
+
f"No validation text files found in directory {directory}. "
|
| 191 |
+
"Allowed format: .txt"
|
| 192 |
+
)
|
| 193 |
+
train_dataset = paths_and_labels_to_dataset(
|
| 194 |
+
file_paths=file_paths_train,
|
| 195 |
+
labels=labels_train,
|
| 196 |
+
label_mode=label_mode,
|
| 197 |
+
num_classes=len(class_names) if class_names else 0,
|
| 198 |
+
max_length=max_length,
|
| 199 |
+
shuffle=shuffle,
|
| 200 |
+
shuffle_buffer_size=shuffle_buffer_size,
|
| 201 |
+
seed=seed,
|
| 202 |
+
)
|
| 203 |
+
val_dataset = paths_and_labels_to_dataset(
|
| 204 |
+
file_paths=file_paths_val,
|
| 205 |
+
labels=labels_val,
|
| 206 |
+
label_mode=label_mode,
|
| 207 |
+
num_classes=len(class_names) if class_names else 0,
|
| 208 |
+
max_length=max_length,
|
| 209 |
+
shuffle=False,
|
| 210 |
+
)
|
| 211 |
+
|
| 212 |
+
if batch_size is not None:
|
| 213 |
+
train_dataset = train_dataset.batch(batch_size)
|
| 214 |
+
val_dataset = val_dataset.batch(batch_size)
|
| 215 |
+
|
| 216 |
+
train_dataset = train_dataset.prefetch(tf.data.AUTOTUNE)
|
| 217 |
+
val_dataset = val_dataset.prefetch(tf.data.AUTOTUNE)
|
| 218 |
+
|
| 219 |
+
# Users may need to reference `class_names`.
|
| 220 |
+
train_dataset.class_names = class_names
|
| 221 |
+
val_dataset.class_names = class_names
|
| 222 |
+
dataset = [train_dataset, val_dataset]
|
| 223 |
+
else:
|
| 224 |
+
file_paths, labels = dataset_utils.get_training_or_validation_split(
|
| 225 |
+
file_paths, labels, validation_split, subset
|
| 226 |
+
)
|
| 227 |
+
if not file_paths:
|
| 228 |
+
raise ValueError(
|
| 229 |
+
f"No text files found in directory {directory}. "
|
| 230 |
+
"Allowed format: .txt"
|
| 231 |
+
)
|
| 232 |
+
dataset = paths_and_labels_to_dataset(
|
| 233 |
+
file_paths=file_paths,
|
| 234 |
+
labels=labels,
|
| 235 |
+
label_mode=label_mode,
|
| 236 |
+
num_classes=len(class_names) if class_names else 0,
|
| 237 |
+
max_length=max_length,
|
| 238 |
+
shuffle=shuffle,
|
| 239 |
+
shuffle_buffer_size=shuffle_buffer_size,
|
| 240 |
+
seed=seed,
|
| 241 |
+
)
|
| 242 |
+
if batch_size is not None:
|
| 243 |
+
dataset = dataset.batch(batch_size)
|
| 244 |
+
dataset = dataset.prefetch(tf.data.AUTOTUNE)
|
| 245 |
+
|
| 246 |
+
# Users may need to reference `class_names`.
|
| 247 |
+
dataset.class_names = class_names
|
| 248 |
+
return dataset
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
def paths_and_labels_to_dataset(
|
| 252 |
+
file_paths,
|
| 253 |
+
labels,
|
| 254 |
+
label_mode,
|
| 255 |
+
num_classes,
|
| 256 |
+
max_length,
|
| 257 |
+
shuffle=False,
|
| 258 |
+
shuffle_buffer_size=None,
|
| 259 |
+
seed=None,
|
| 260 |
+
):
|
| 261 |
+
"""Constructs a dataset of text strings and labels."""
|
| 262 |
+
path_ds = tf.data.Dataset.from_tensor_slices(file_paths)
|
| 263 |
+
if label_mode:
|
| 264 |
+
label_ds = dataset_utils.labels_to_dataset(
|
| 265 |
+
labels, label_mode, num_classes
|
| 266 |
+
)
|
| 267 |
+
ds = tf.data.Dataset.zip((path_ds, label_ds))
|
| 268 |
+
else:
|
| 269 |
+
ds = path_ds
|
| 270 |
+
|
| 271 |
+
if shuffle:
|
| 272 |
+
ds = ds.shuffle(buffer_size=shuffle_buffer_size or 1024, seed=seed)
|
| 273 |
+
|
| 274 |
+
if label_mode:
|
| 275 |
+
ds = ds.map(
|
| 276 |
+
lambda x, y: (path_to_string_content(x, max_length), y),
|
| 277 |
+
num_parallel_calls=tf.data.AUTOTUNE,
|
| 278 |
+
)
|
| 279 |
+
else:
|
| 280 |
+
ds = ds.map(
|
| 281 |
+
lambda x: path_to_string_content(x, max_length),
|
| 282 |
+
num_parallel_calls=tf.data.AUTOTUNE,
|
| 283 |
+
)
|
| 284 |
+
return ds
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
def path_to_string_content(path, max_length):
|
| 288 |
+
txt = tf.io.read_file(path)
|
| 289 |
+
if max_length is not None:
|
| 290 |
+
txt = tf.strings.substr(txt, 0, max_length)
|
| 291 |
+
return txt
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/tf_utils.py
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src import backend
|
| 2 |
+
from keras.src.utils.module_utils import tensorflow as tf
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def get_tensor_spec(t, dynamic_batch=False, name=None):
|
| 6 |
+
"""Returns a `TensorSpec` given a single `Tensor` or `TensorSpec`."""
|
| 7 |
+
if isinstance(t, tf.TypeSpec):
|
| 8 |
+
spec = t
|
| 9 |
+
elif isinstance(t, tf.__internal__.CompositeTensor):
|
| 10 |
+
# Check for ExtensionTypes
|
| 11 |
+
spec = t._type_spec
|
| 12 |
+
elif hasattr(t, "shape") and hasattr(t, "dtype"):
|
| 13 |
+
spec = tf.TensorSpec(shape=t.shape, dtype=t.dtype, name=name)
|
| 14 |
+
else:
|
| 15 |
+
return None # Allow non-Tensors to pass through.
|
| 16 |
+
|
| 17 |
+
if not dynamic_batch:
|
| 18 |
+
return spec
|
| 19 |
+
|
| 20 |
+
shape = spec.shape
|
| 21 |
+
if shape.rank is None or shape.rank == 0:
|
| 22 |
+
return spec
|
| 23 |
+
|
| 24 |
+
shape_list = shape.as_list()
|
| 25 |
+
shape_list[0] = None
|
| 26 |
+
shape = tf.TensorShape(shape_list)
|
| 27 |
+
spec._shape = shape
|
| 28 |
+
return spec
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def ensure_tensor(inputs, dtype=None):
|
| 32 |
+
"""Ensures the input is a Tensor, SparseTensor or RaggedTensor."""
|
| 33 |
+
if not isinstance(inputs, (tf.Tensor, tf.SparseTensor, tf.RaggedTensor)):
|
| 34 |
+
if backend.backend() == "torch" and backend.is_tensor(inputs):
|
| 35 |
+
# Plain `np.asarray()` conversion fails with PyTorch.
|
| 36 |
+
inputs = backend.convert_to_numpy(inputs)
|
| 37 |
+
inputs = tf.convert_to_tensor(inputs, dtype)
|
| 38 |
+
if dtype is not None and inputs.dtype != dtype:
|
| 39 |
+
inputs = tf.cast(inputs, dtype)
|
| 40 |
+
return inputs
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def is_ragged_tensor(x):
|
| 44 |
+
return "ragged_tensor.RaggedTensor" in str(type(x))
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def sparse_bincount(inputs, depth, binary_output, dtype, count_weights=None):
|
| 48 |
+
"""Apply binary or count encoding to an input and return a sparse tensor."""
|
| 49 |
+
result = tf.sparse.bincount(
|
| 50 |
+
inputs,
|
| 51 |
+
weights=count_weights,
|
| 52 |
+
minlength=depth,
|
| 53 |
+
maxlength=depth,
|
| 54 |
+
axis=-1,
|
| 55 |
+
binary_output=binary_output,
|
| 56 |
+
)
|
| 57 |
+
result = tf.cast(result, dtype)
|
| 58 |
+
if inputs.shape.rank == 1:
|
| 59 |
+
output_shape = (depth,)
|
| 60 |
+
else:
|
| 61 |
+
batch_size = tf.shape(result)[0]
|
| 62 |
+
output_shape = (batch_size, depth)
|
| 63 |
+
result = tf.SparseTensor(
|
| 64 |
+
indices=result.indices, values=result.values, dense_shape=output_shape
|
| 65 |
+
)
|
| 66 |
+
return result
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def dense_bincount(inputs, depth, binary_output, dtype, count_weights=None):
|
| 70 |
+
"""Apply binary or count encoding to an input."""
|
| 71 |
+
result = tf.math.bincount(
|
| 72 |
+
inputs,
|
| 73 |
+
weights=count_weights,
|
| 74 |
+
minlength=depth,
|
| 75 |
+
maxlength=depth,
|
| 76 |
+
dtype=dtype,
|
| 77 |
+
axis=-1,
|
| 78 |
+
binary_output=binary_output,
|
| 79 |
+
)
|
| 80 |
+
if inputs.shape.rank == 1:
|
| 81 |
+
result.set_shape(tf.TensorShape((depth,)))
|
| 82 |
+
else:
|
| 83 |
+
batch_size = inputs.shape.as_list()[0]
|
| 84 |
+
result.set_shape(tf.TensorShape((batch_size, depth)))
|
| 85 |
+
return result
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def expand_dims(inputs, axis):
|
| 89 |
+
"""Expand dims on sparse, ragged, or dense tensors."""
|
| 90 |
+
if isinstance(inputs, tf.SparseTensor):
|
| 91 |
+
return tf.sparse.expand_dims(inputs, axis)
|
| 92 |
+
return tf.expand_dims(inputs, axis)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def tf_encode_categorical_inputs(
|
| 96 |
+
inputs,
|
| 97 |
+
output_mode,
|
| 98 |
+
depth,
|
| 99 |
+
dtype="float32",
|
| 100 |
+
sparse=False,
|
| 101 |
+
count_weights=None,
|
| 102 |
+
idf_weights=None,
|
| 103 |
+
):
|
| 104 |
+
"""Encodes categorical inputs according to output_mode.
|
| 105 |
+
|
| 106 |
+
Faster method that relies on bincount.
|
| 107 |
+
"""
|
| 108 |
+
|
| 109 |
+
if output_mode == "int":
|
| 110 |
+
return tf.identity(tf.cast(inputs, dtype))
|
| 111 |
+
|
| 112 |
+
original_shape = inputs.shape
|
| 113 |
+
# In all cases, we should uprank scalar input to a single sample.
|
| 114 |
+
if inputs.shape.rank == 0:
|
| 115 |
+
inputs = expand_dims(inputs, -1)
|
| 116 |
+
# One hot will unprank only if the final output dimension is not already 1.
|
| 117 |
+
if output_mode == "one_hot":
|
| 118 |
+
if inputs.shape[-1] != 1:
|
| 119 |
+
inputs = expand_dims(inputs, -1)
|
| 120 |
+
|
| 121 |
+
if inputs.shape.rank > 2:
|
| 122 |
+
raise ValueError(
|
| 123 |
+
"When output_mode is not `'int'`, maximum supported output rank "
|
| 124 |
+
f"is 2. Received output_mode {output_mode} and input shape "
|
| 125 |
+
f"{original_shape}, "
|
| 126 |
+
f"which would result in output rank {inputs.shape.rank}."
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
binary_output = output_mode in ("multi_hot", "one_hot")
|
| 130 |
+
if sparse:
|
| 131 |
+
bincounts = sparse_bincount(
|
| 132 |
+
inputs, depth, binary_output, dtype, count_weights
|
| 133 |
+
)
|
| 134 |
+
else:
|
| 135 |
+
bincounts = dense_bincount(
|
| 136 |
+
inputs, depth, binary_output, dtype, count_weights
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
bincounts = tf.cast(bincounts, dtype)
|
| 140 |
+
if output_mode != "tf_idf":
|
| 141 |
+
return bincounts
|
| 142 |
+
|
| 143 |
+
if idf_weights is None:
|
| 144 |
+
raise ValueError(
|
| 145 |
+
"When output mode is `'tf_idf'`, idf_weights must be provided. "
|
| 146 |
+
f"Received: output_mode={output_mode} and idf_weights={idf_weights}"
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
if sparse:
|
| 150 |
+
value_weights = tf.gather(idf_weights, bincounts.indices[:, -1])
|
| 151 |
+
return tf.SparseTensor(
|
| 152 |
+
bincounts.indices,
|
| 153 |
+
value_weights * bincounts.values,
|
| 154 |
+
bincounts.dense_shape,
|
| 155 |
+
)
|
| 156 |
+
else:
|
| 157 |
+
return tf.multiply(bincounts, idf_weights)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/timeseries_dataset_utils.py
ADDED
|
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from keras.src.api_export import keras_export
|
| 4 |
+
from keras.src.utils.module_utils import tensorflow as tf
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@keras_export(
|
| 8 |
+
[
|
| 9 |
+
"keras.utils.timeseries_dataset_from_array",
|
| 10 |
+
"keras.preprocessing.timeseries_dataset_from_array",
|
| 11 |
+
]
|
| 12 |
+
)
|
| 13 |
+
def timeseries_dataset_from_array(
|
| 14 |
+
data,
|
| 15 |
+
targets,
|
| 16 |
+
sequence_length,
|
| 17 |
+
sequence_stride=1,
|
| 18 |
+
sampling_rate=1,
|
| 19 |
+
batch_size=128,
|
| 20 |
+
shuffle=False,
|
| 21 |
+
seed=None,
|
| 22 |
+
start_index=None,
|
| 23 |
+
end_index=None,
|
| 24 |
+
):
|
| 25 |
+
"""Creates a dataset of sliding windows over a timeseries provided as array.
|
| 26 |
+
|
| 27 |
+
This function takes in a sequence of data-points gathered at
|
| 28 |
+
equal intervals, along with time series parameters such as
|
| 29 |
+
length of the sequences/windows, spacing between two sequence/windows, etc.,
|
| 30 |
+
to produce batches of timeseries inputs and targets.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
data: Numpy array or eager tensor
|
| 34 |
+
containing consecutive data points (timesteps).
|
| 35 |
+
Axis 0 is expected to be the time dimension.
|
| 36 |
+
targets: Targets corresponding to timesteps in `data`.
|
| 37 |
+
`targets[i]` should be the target
|
| 38 |
+
corresponding to the window that starts at index `i`
|
| 39 |
+
(see example 2 below).
|
| 40 |
+
Pass `None` if you don't have target data (in this case the dataset
|
| 41 |
+
will only yield the input data).
|
| 42 |
+
sequence_length: Length of the output sequences
|
| 43 |
+
(in number of timesteps).
|
| 44 |
+
sequence_stride: Period between successive output sequences.
|
| 45 |
+
For stride `s`, output samples would
|
| 46 |
+
start at index `data[i]`, `data[i + s]`, `data[i + 2 * s]`, etc.
|
| 47 |
+
sampling_rate: Period between successive individual timesteps
|
| 48 |
+
within sequences. For rate `r`, timesteps
|
| 49 |
+
`data[i], data[i + r], ... data[i + sequence_length]`
|
| 50 |
+
are used for creating a sample sequence.
|
| 51 |
+
batch_size: Number of timeseries samples in each batch
|
| 52 |
+
(except maybe the last one). If `None`, the data will not be batched
|
| 53 |
+
(the dataset will yield individual samples).
|
| 54 |
+
shuffle: Whether to shuffle output samples,
|
| 55 |
+
or instead draw them in chronological order.
|
| 56 |
+
seed: Optional int; random seed for shuffling.
|
| 57 |
+
start_index: Optional int; data points earlier (exclusive)
|
| 58 |
+
than `start_index` will not be used
|
| 59 |
+
in the output sequences. This is useful to reserve part of the
|
| 60 |
+
data for test or validation.
|
| 61 |
+
end_index: Optional int; data points later (exclusive) than `end_index`
|
| 62 |
+
will not be used in the output sequences.
|
| 63 |
+
This is useful to reserve part of the data for test or validation.
|
| 64 |
+
|
| 65 |
+
Returns:
|
| 66 |
+
|
| 67 |
+
A `tf.data.Dataset` instance. If `targets` was passed, the dataset yields
|
| 68 |
+
tuple `(batch_of_sequences, batch_of_targets)`. If not, the dataset yields
|
| 69 |
+
only `batch_of_sequences`.
|
| 70 |
+
|
| 71 |
+
Example 1:
|
| 72 |
+
|
| 73 |
+
Consider indices `[0, 1, ... 98]`.
|
| 74 |
+
With `sequence_length=10, sampling_rate=2, sequence_stride=3`,
|
| 75 |
+
`shuffle=False`, the dataset will yield batches of sequences
|
| 76 |
+
composed of the following indices:
|
| 77 |
+
|
| 78 |
+
```
|
| 79 |
+
First sequence: [0 2 4 6 8 10 12 14 16 18]
|
| 80 |
+
Second sequence: [3 5 7 9 11 13 15 17 19 21]
|
| 81 |
+
Third sequence: [6 8 10 12 14 16 18 20 22 24]
|
| 82 |
+
...
|
| 83 |
+
Last sequence: [78 80 82 84 86 88 90 92 94 96]
|
| 84 |
+
```
|
| 85 |
+
|
| 86 |
+
In this case the last 2 data points are discarded since no full sequence
|
| 87 |
+
can be generated to include them (the next sequence would have started
|
| 88 |
+
at index 81, and thus its last step would have gone over 98).
|
| 89 |
+
|
| 90 |
+
Example 2: Temporal regression.
|
| 91 |
+
|
| 92 |
+
Consider an array `data` of scalar values, of shape `(steps,)`.
|
| 93 |
+
To generate a dataset that uses the past 10
|
| 94 |
+
timesteps to predict the next timestep, you would use:
|
| 95 |
+
|
| 96 |
+
```python
|
| 97 |
+
input_data = data[:-10]
|
| 98 |
+
targets = data[10:]
|
| 99 |
+
dataset = timeseries_dataset_from_array(
|
| 100 |
+
input_data, targets, sequence_length=10)
|
| 101 |
+
for batch in dataset:
|
| 102 |
+
inputs, targets = batch
|
| 103 |
+
assert np.array_equal(inputs[0], data[:10]) # First sequence: steps [0-9]
|
| 104 |
+
# Corresponding target: step 10
|
| 105 |
+
assert np.array_equal(targets[0], data[10])
|
| 106 |
+
break
|
| 107 |
+
```
|
| 108 |
+
|
| 109 |
+
Example 3: Temporal regression for many-to-many architectures.
|
| 110 |
+
|
| 111 |
+
Consider two arrays of scalar values `X` and `Y`,
|
| 112 |
+
both of shape `(100,)`. The resulting dataset should consist samples with
|
| 113 |
+
20 timestamps each. The samples should not overlap.
|
| 114 |
+
To generate a dataset that uses the current timestamp
|
| 115 |
+
to predict the corresponding target timestep, you would use:
|
| 116 |
+
|
| 117 |
+
```python
|
| 118 |
+
X = np.arange(100)
|
| 119 |
+
Y = X*2
|
| 120 |
+
|
| 121 |
+
sample_length = 20
|
| 122 |
+
input_dataset = timeseries_dataset_from_array(
|
| 123 |
+
X, None, sequence_length=sample_length, sequence_stride=sample_length)
|
| 124 |
+
target_dataset = timeseries_dataset_from_array(
|
| 125 |
+
Y, None, sequence_length=sample_length, sequence_stride=sample_length)
|
| 126 |
+
|
| 127 |
+
for batch in zip(input_dataset, target_dataset):
|
| 128 |
+
inputs, targets = batch
|
| 129 |
+
assert np.array_equal(inputs[0], X[:sample_length])
|
| 130 |
+
|
| 131 |
+
# second sample equals output timestamps 20-40
|
| 132 |
+
assert np.array_equal(targets[1], Y[sample_length:2*sample_length])
|
| 133 |
+
break
|
| 134 |
+
```
|
| 135 |
+
"""
|
| 136 |
+
if start_index:
|
| 137 |
+
if start_index < 0:
|
| 138 |
+
raise ValueError(
|
| 139 |
+
"`start_index` must be 0 or greater. Received: "
|
| 140 |
+
f"start_index={start_index}"
|
| 141 |
+
)
|
| 142 |
+
if start_index >= len(data):
|
| 143 |
+
raise ValueError(
|
| 144 |
+
"`start_index` must be lower than the length of the "
|
| 145 |
+
f"data. Received: start_index={start_index}, for data "
|
| 146 |
+
f"of length {len(data)}"
|
| 147 |
+
)
|
| 148 |
+
if end_index:
|
| 149 |
+
if start_index and end_index <= start_index:
|
| 150 |
+
raise ValueError(
|
| 151 |
+
"`end_index` must be higher than `start_index`. "
|
| 152 |
+
f"Received: start_index={start_index}, and "
|
| 153 |
+
f"end_index={end_index} "
|
| 154 |
+
)
|
| 155 |
+
if end_index >= len(data):
|
| 156 |
+
raise ValueError(
|
| 157 |
+
"`end_index` must be lower than the length of the "
|
| 158 |
+
f"data. Received: end_index={end_index}, for data of "
|
| 159 |
+
f"length {len(data)}"
|
| 160 |
+
)
|
| 161 |
+
if end_index <= 0:
|
| 162 |
+
raise ValueError(
|
| 163 |
+
"`end_index` must be higher than 0. "
|
| 164 |
+
f"Received: end_index={end_index}"
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
# Validate strides
|
| 168 |
+
if sampling_rate <= 0:
|
| 169 |
+
raise ValueError(
|
| 170 |
+
"`sampling_rate` must be higher than 0. Received: "
|
| 171 |
+
f"sampling_rate={sampling_rate}"
|
| 172 |
+
)
|
| 173 |
+
if sampling_rate >= len(data):
|
| 174 |
+
raise ValueError(
|
| 175 |
+
"`sampling_rate` must be lower than the length of the "
|
| 176 |
+
f"data. Received: sampling_rate={sampling_rate}, for data "
|
| 177 |
+
f"of length {len(data)}"
|
| 178 |
+
)
|
| 179 |
+
if sequence_stride <= 0:
|
| 180 |
+
raise ValueError(
|
| 181 |
+
"`sequence_stride` must be higher than 0. Received: "
|
| 182 |
+
f"sequence_stride={sequence_stride}"
|
| 183 |
+
)
|
| 184 |
+
if sequence_stride >= len(data):
|
| 185 |
+
raise ValueError(
|
| 186 |
+
"`sequence_stride` must be lower than the length of the "
|
| 187 |
+
f"data. Received: sequence_stride={sequence_stride}, for "
|
| 188 |
+
f"data of length {len(data)}"
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
if start_index is None:
|
| 192 |
+
start_index = 0
|
| 193 |
+
if end_index is None:
|
| 194 |
+
end_index = len(data)
|
| 195 |
+
|
| 196 |
+
# Determine the lowest dtype to store start positions (to lower memory
|
| 197 |
+
# usage).
|
| 198 |
+
num_seqs = end_index - start_index - (sequence_length - 1) * sampling_rate
|
| 199 |
+
if targets is not None:
|
| 200 |
+
num_seqs = min(num_seqs, len(targets))
|
| 201 |
+
if num_seqs < 2147483647:
|
| 202 |
+
index_dtype = "int32"
|
| 203 |
+
else:
|
| 204 |
+
index_dtype = "int64"
|
| 205 |
+
|
| 206 |
+
# Generate start positions
|
| 207 |
+
start_positions = np.arange(0, num_seqs, sequence_stride, dtype=index_dtype)
|
| 208 |
+
if shuffle:
|
| 209 |
+
if seed is None:
|
| 210 |
+
seed = np.random.randint(1e6)
|
| 211 |
+
rng = np.random.RandomState(seed)
|
| 212 |
+
rng.shuffle(start_positions)
|
| 213 |
+
|
| 214 |
+
sequence_length = tf.cast(sequence_length, dtype=index_dtype)
|
| 215 |
+
sampling_rate = tf.cast(sampling_rate, dtype=index_dtype)
|
| 216 |
+
|
| 217 |
+
positions_ds = tf.data.Dataset.from_tensors(start_positions).repeat()
|
| 218 |
+
|
| 219 |
+
# For each initial window position, generates indices of the window elements
|
| 220 |
+
indices = tf.data.Dataset.zip(
|
| 221 |
+
(tf.data.Dataset.range(len(start_positions)), positions_ds)
|
| 222 |
+
).map(
|
| 223 |
+
lambda i, positions: tf.range(
|
| 224 |
+
positions[i],
|
| 225 |
+
positions[i] + sequence_length * sampling_rate,
|
| 226 |
+
sampling_rate,
|
| 227 |
+
),
|
| 228 |
+
num_parallel_calls=tf.data.AUTOTUNE,
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
dataset = sequences_from_indices(data, indices, start_index, end_index)
|
| 232 |
+
if targets is not None:
|
| 233 |
+
indices = tf.data.Dataset.zip(
|
| 234 |
+
(tf.data.Dataset.range(len(start_positions)), positions_ds)
|
| 235 |
+
).map(
|
| 236 |
+
lambda i, positions: positions[i],
|
| 237 |
+
num_parallel_calls=tf.data.AUTOTUNE,
|
| 238 |
+
)
|
| 239 |
+
target_ds = sequences_from_indices(
|
| 240 |
+
targets, indices, start_index, end_index
|
| 241 |
+
)
|
| 242 |
+
dataset = tf.data.Dataset.zip((dataset, target_ds))
|
| 243 |
+
dataset = dataset.prefetch(tf.data.AUTOTUNE)
|
| 244 |
+
if batch_size is not None:
|
| 245 |
+
if shuffle:
|
| 246 |
+
# Shuffle locally at each iteration
|
| 247 |
+
dataset = dataset.shuffle(buffer_size=batch_size * 8, seed=seed)
|
| 248 |
+
dataset = dataset.batch(batch_size)
|
| 249 |
+
else:
|
| 250 |
+
if shuffle:
|
| 251 |
+
dataset = dataset.shuffle(buffer_size=1024, seed=seed)
|
| 252 |
+
return dataset
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def sequences_from_indices(array, indices_ds, start_index, end_index):
|
| 256 |
+
dataset = tf.data.Dataset.from_tensors(array[start_index:end_index])
|
| 257 |
+
dataset = tf.data.Dataset.zip((dataset.repeat(), indices_ds)).map(
|
| 258 |
+
lambda steps, inds: tf.gather(steps, inds),
|
| 259 |
+
num_parallel_calls=tf.data.AUTOTUNE,
|
| 260 |
+
)
|
| 261 |
+
return dataset
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/torch_utils.py
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import io
|
| 2 |
+
|
| 3 |
+
from packaging.version import parse
|
| 4 |
+
|
| 5 |
+
from keras.src import backend
|
| 6 |
+
from keras.src.api_export import keras_export
|
| 7 |
+
from keras.src.layers import Layer
|
| 8 |
+
from keras.src.ops import convert_to_numpy
|
| 9 |
+
from keras.src.ops import convert_to_tensor
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@keras_export("keras.layers.TorchModuleWrapper")
|
| 13 |
+
class TorchModuleWrapper(Layer):
|
| 14 |
+
"""Torch module wrapper layer.
|
| 15 |
+
|
| 16 |
+
`TorchModuleWrapper` is a wrapper class that can turn any
|
| 17 |
+
`torch.nn.Module` into a Keras layer, in particular by making its
|
| 18 |
+
parameters trackable by Keras.
|
| 19 |
+
|
| 20 |
+
`TorchModuleWrapper` is only compatible with the PyTorch backend and
|
| 21 |
+
cannot be used with the TensorFlow or JAX backends.
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
module: `torch.nn.Module` instance. If it's a `LazyModule`
|
| 25 |
+
instance, then its parameters must be initialized before
|
| 26 |
+
passing the instance to `TorchModuleWrapper` (e.g. by calling
|
| 27 |
+
it once).
|
| 28 |
+
name: The name of the layer (string).
|
| 29 |
+
|
| 30 |
+
Example:
|
| 31 |
+
|
| 32 |
+
Here's an example of how the `TorchModuleWrapper` can be used with vanilla
|
| 33 |
+
PyTorch modules.
|
| 34 |
+
|
| 35 |
+
```python
|
| 36 |
+
import torch
|
| 37 |
+
import torch.nn as nn
|
| 38 |
+
import torch.nn.functional as F
|
| 39 |
+
|
| 40 |
+
import keras
|
| 41 |
+
from keras.layers import TorchModuleWrapper
|
| 42 |
+
|
| 43 |
+
class Classifier(keras.Model):
|
| 44 |
+
def __init__(self, **kwargs):
|
| 45 |
+
super().__init__(**kwargs)
|
| 46 |
+
# Wrap `torch.nn.Module`s with `TorchModuleWrapper`
|
| 47 |
+
# if they contain parameters
|
| 48 |
+
self.conv1 = TorchModuleWrapper(
|
| 49 |
+
nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(3, 3))
|
| 50 |
+
)
|
| 51 |
+
self.conv2 = TorchModuleWrapper(
|
| 52 |
+
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3))
|
| 53 |
+
)
|
| 54 |
+
self.pool = nn.MaxPool2d(kernel_size=(2, 2))
|
| 55 |
+
self.flatten = nn.Flatten()
|
| 56 |
+
self.dropout = nn.Dropout(p=0.5)
|
| 57 |
+
self.fc = TorchModuleWrapper(nn.Linear(1600, 10))
|
| 58 |
+
|
| 59 |
+
def call(self, inputs):
|
| 60 |
+
x = F.relu(self.conv1(inputs))
|
| 61 |
+
x = self.pool(x)
|
| 62 |
+
x = F.relu(self.conv2(x))
|
| 63 |
+
x = self.pool(x)
|
| 64 |
+
x = self.flatten(x)
|
| 65 |
+
x = self.dropout(x)
|
| 66 |
+
x = self.fc(x)
|
| 67 |
+
return F.softmax(x, dim=1)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
model = Classifier()
|
| 71 |
+
model.build((1, 28, 28))
|
| 72 |
+
print("Output shape:", model(torch.ones(1, 1, 28, 28).to("cuda")).shape)
|
| 73 |
+
|
| 74 |
+
model.compile(
|
| 75 |
+
loss="sparse_categorical_crossentropy",
|
| 76 |
+
optimizer="adam",
|
| 77 |
+
metrics=["accuracy"]
|
| 78 |
+
)
|
| 79 |
+
model.fit(train_loader, epochs=5)
|
| 80 |
+
```
|
| 81 |
+
"""
|
| 82 |
+
|
| 83 |
+
def __init__(self, module, name=None, **kwargs):
|
| 84 |
+
super().__init__(name=name, **kwargs)
|
| 85 |
+
import torch.nn as nn
|
| 86 |
+
|
| 87 |
+
from keras.src.backend.torch.core import get_device
|
| 88 |
+
|
| 89 |
+
if (
|
| 90 |
+
isinstance(module, nn.modules.lazy.LazyModuleMixin)
|
| 91 |
+
and module.has_uninitialized_params()
|
| 92 |
+
):
|
| 93 |
+
raise ValueError(
|
| 94 |
+
"LazyModules are not supported unless they "
|
| 95 |
+
"are already initialized. "
|
| 96 |
+
f"Received uninitialized LazyModule: module={module}"
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
self.module = module.to(get_device())
|
| 100 |
+
self._track_module_parameters()
|
| 101 |
+
|
| 102 |
+
def parameters(self, recurse=True):
|
| 103 |
+
return self.module.parameters(recurse=recurse)
|
| 104 |
+
|
| 105 |
+
def _track_module_parameters(self):
|
| 106 |
+
for param in self.module.parameters():
|
| 107 |
+
# The Variable will reuse the raw `param`
|
| 108 |
+
# and simply wrap it.
|
| 109 |
+
variable = backend.Variable(
|
| 110 |
+
initializer=param, trainable=param.requires_grad
|
| 111 |
+
)
|
| 112 |
+
self._track_variable(variable)
|
| 113 |
+
self.built = True
|
| 114 |
+
|
| 115 |
+
def call(self, *args, training=None, **kwargs):
|
| 116 |
+
if training is False:
|
| 117 |
+
self.eval()
|
| 118 |
+
else:
|
| 119 |
+
self.train()
|
| 120 |
+
return self.module(*args, **kwargs)
|
| 121 |
+
|
| 122 |
+
def save_own_variables(self, store):
|
| 123 |
+
"""Saves model's state from `state_dict`.
|
| 124 |
+
`model.parameters` excludes some of model's state like
|
| 125 |
+
`BatchNorm` mean and variance. So, use `state_dict` to obtain
|
| 126 |
+
all of model's state.
|
| 127 |
+
"""
|
| 128 |
+
state_dict = self.module.state_dict()
|
| 129 |
+
for key in state_dict.keys():
|
| 130 |
+
store[key] = convert_to_numpy(state_dict[key])
|
| 131 |
+
|
| 132 |
+
def load_own_variables(self, store):
|
| 133 |
+
"""Loads model's state via `state_dict`."""
|
| 134 |
+
state_dict = {}
|
| 135 |
+
for key in store.keys():
|
| 136 |
+
if isinstance(key, bytes):
|
| 137 |
+
key = key.decode()
|
| 138 |
+
state_dict[key] = convert_to_tensor(store[key])
|
| 139 |
+
self.module.load_state_dict(state_dict)
|
| 140 |
+
|
| 141 |
+
def get_config(self):
|
| 142 |
+
base_config = super().get_config()
|
| 143 |
+
import torch
|
| 144 |
+
|
| 145 |
+
buffer = io.BytesIO()
|
| 146 |
+
torch.save(self.module, buffer)
|
| 147 |
+
config = {"module": buffer.getvalue()}
|
| 148 |
+
return {**base_config, **config}
|
| 149 |
+
|
| 150 |
+
@classmethod
|
| 151 |
+
def from_config(cls, config):
|
| 152 |
+
import torch
|
| 153 |
+
|
| 154 |
+
if "module" in config:
|
| 155 |
+
buffer = io.BytesIO(config["module"])
|
| 156 |
+
config["module"] = torch.load(buffer)
|
| 157 |
+
return cls(**config)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def no_grad(orig_func):
|
| 161 |
+
import torch
|
| 162 |
+
|
| 163 |
+
if parse(torch.__version__) >= parse("2.1.0"):
|
| 164 |
+
return torch.no_grad(orig_func)
|
| 165 |
+
else:
|
| 166 |
+
return orig_func
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py
ADDED
|
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
import os
|
| 3 |
+
import traceback
|
| 4 |
+
import types
|
| 5 |
+
from functools import wraps
|
| 6 |
+
|
| 7 |
+
from keras.src import backend
|
| 8 |
+
from keras.src import tree
|
| 9 |
+
from keras.src.api_export import keras_export
|
| 10 |
+
from keras.src.backend.common import global_state
|
| 11 |
+
|
| 12 |
+
_EXCLUDED_PATHS = (
|
| 13 |
+
os.path.abspath(os.path.join(__file__, "..", "..")),
|
| 14 |
+
os.path.join("tensorflow", "python"),
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@keras_export("keras.config.enable_traceback_filtering")
|
| 19 |
+
def enable_traceback_filtering():
|
| 20 |
+
"""Turn on traceback filtering.
|
| 21 |
+
|
| 22 |
+
Raw Keras tracebacks (also known as stack traces)
|
| 23 |
+
involve many internal frames, which can be
|
| 24 |
+
challenging to read through, while not being actionable for end users.
|
| 25 |
+
By default, Keras filters internal frames in most exceptions that it
|
| 26 |
+
raises, to keep traceback short, readable, and focused on what's
|
| 27 |
+
actionable for you (your own code).
|
| 28 |
+
|
| 29 |
+
See also `keras.config.disable_traceback_filtering()` and
|
| 30 |
+
`keras.config.is_traceback_filtering_enabled()`.
|
| 31 |
+
|
| 32 |
+
If you have previously disabled traceback filtering via
|
| 33 |
+
`keras.config.disable_traceback_filtering()`, you can re-enable it via
|
| 34 |
+
`keras.config.enable_traceback_filtering()`.
|
| 35 |
+
"""
|
| 36 |
+
global_state.set_global_attribute("traceback_filtering", True)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@keras_export("keras.config.disable_traceback_filtering")
|
| 40 |
+
def disable_traceback_filtering():
|
| 41 |
+
"""Turn off traceback filtering.
|
| 42 |
+
|
| 43 |
+
Raw Keras tracebacks (also known as stack traces)
|
| 44 |
+
involve many internal frames, which can be
|
| 45 |
+
challenging to read through, while not being actionable for end users.
|
| 46 |
+
By default, Keras filters internal frames in most exceptions that it
|
| 47 |
+
raises, to keep traceback short, readable, and focused on what's
|
| 48 |
+
actionable for you (your own code).
|
| 49 |
+
|
| 50 |
+
See also `keras.config.enable_traceback_filtering()` and
|
| 51 |
+
`keras.config.is_traceback_filtering_enabled()`.
|
| 52 |
+
|
| 53 |
+
If you have previously disabled traceback filtering via
|
| 54 |
+
`keras.config.disable_traceback_filtering()`, you can re-enable it via
|
| 55 |
+
`keras.config.enable_traceback_filtering()`.
|
| 56 |
+
"""
|
| 57 |
+
global_state.set_global_attribute("traceback_filtering", False)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
@keras_export("keras.config.is_traceback_filtering_enabled")
|
| 61 |
+
def is_traceback_filtering_enabled():
|
| 62 |
+
"""Check if traceback filtering is enabled.
|
| 63 |
+
|
| 64 |
+
Raw Keras tracebacks (also known as stack traces)
|
| 65 |
+
involve many internal frames, which can be
|
| 66 |
+
challenging to read through, while not being actionable for end users.
|
| 67 |
+
By default, Keras filters internal frames in most exceptions that it
|
| 68 |
+
raises, to keep traceback short, readable, and focused on what's
|
| 69 |
+
actionable for you (your own code).
|
| 70 |
+
|
| 71 |
+
See also `keras.config.enable_traceback_filtering()` and
|
| 72 |
+
`keras.config.disable_traceback_filtering()`.
|
| 73 |
+
|
| 74 |
+
If you have previously disabled traceback filtering via
|
| 75 |
+
`keras.config.disable_traceback_filtering()`, you can re-enable it via
|
| 76 |
+
`keras.config.enable_traceback_filtering()`.
|
| 77 |
+
|
| 78 |
+
Returns:
|
| 79 |
+
Boolean, `True` if traceback filtering is enabled,
|
| 80 |
+
and `False` otherwise.
|
| 81 |
+
"""
|
| 82 |
+
return global_state.get_global_attribute("traceback_filtering", True)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def include_frame(fname):
|
| 86 |
+
for exclusion in _EXCLUDED_PATHS:
|
| 87 |
+
if exclusion in fname:
|
| 88 |
+
return False
|
| 89 |
+
return True
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def _process_traceback_frames(tb):
|
| 93 |
+
"""Iterate through traceback frames and return a new, filtered traceback."""
|
| 94 |
+
last_tb = None
|
| 95 |
+
tb_list = list(traceback.walk_tb(tb))
|
| 96 |
+
for f, line_no in reversed(tb_list):
|
| 97 |
+
if include_frame(f.f_code.co_filename):
|
| 98 |
+
last_tb = types.TracebackType(last_tb, f, f.f_lasti, line_no)
|
| 99 |
+
if last_tb is None and tb_list:
|
| 100 |
+
# If no frames were kept during filtering, create a new traceback
|
| 101 |
+
# from the outermost function.
|
| 102 |
+
f, line_no = tb_list[-1]
|
| 103 |
+
last_tb = types.TracebackType(last_tb, f, f.f_lasti, line_no)
|
| 104 |
+
return last_tb
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def filter_traceback(fn):
|
| 108 |
+
"""Filter out Keras-internal traceback frames in exceptions raised by fn."""
|
| 109 |
+
|
| 110 |
+
@wraps(fn)
|
| 111 |
+
def error_handler(*args, **kwargs):
|
| 112 |
+
if not is_traceback_filtering_enabled():
|
| 113 |
+
return fn(*args, **kwargs)
|
| 114 |
+
|
| 115 |
+
filtered_tb = None
|
| 116 |
+
try:
|
| 117 |
+
return fn(*args, **kwargs)
|
| 118 |
+
except Exception as e:
|
| 119 |
+
filtered_tb = _process_traceback_frames(e.__traceback__)
|
| 120 |
+
# To get the full stack trace, call:
|
| 121 |
+
# `keras.config.disable_traceback_filtering()`
|
| 122 |
+
raise e.with_traceback(filtered_tb) from None
|
| 123 |
+
finally:
|
| 124 |
+
del filtered_tb
|
| 125 |
+
|
| 126 |
+
return error_handler
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def inject_argument_info_in_traceback(fn, object_name=None):
|
| 130 |
+
"""Add information about call argument values to an error message.
|
| 131 |
+
|
| 132 |
+
Arguments:
|
| 133 |
+
fn: Function to wrap. Exceptions raised by the this function will be
|
| 134 |
+
re-raised with additional information added to the error message,
|
| 135 |
+
displaying the values of the different arguments that the function
|
| 136 |
+
was called with.
|
| 137 |
+
object_name: String, display name of the class/function being called,
|
| 138 |
+
e.g. `'layer "layer_name" (LayerClass)'`.
|
| 139 |
+
|
| 140 |
+
Returns:
|
| 141 |
+
A wrapped version of `fn`.
|
| 142 |
+
"""
|
| 143 |
+
if backend.backend() == "tensorflow":
|
| 144 |
+
from tensorflow import errors as tf_errors
|
| 145 |
+
else:
|
| 146 |
+
tf_errors = None
|
| 147 |
+
|
| 148 |
+
@wraps(fn)
|
| 149 |
+
def error_handler(*args, **kwargs):
|
| 150 |
+
if not is_traceback_filtering_enabled():
|
| 151 |
+
return fn(*args, **kwargs)
|
| 152 |
+
|
| 153 |
+
signature = None
|
| 154 |
+
bound_signature = None
|
| 155 |
+
try:
|
| 156 |
+
return fn(*args, **kwargs)
|
| 157 |
+
except Exception as e:
|
| 158 |
+
if hasattr(e, "_keras_call_info_injected"):
|
| 159 |
+
# Only inject info for the innermost failing call
|
| 160 |
+
raise e
|
| 161 |
+
signature = inspect.signature(fn)
|
| 162 |
+
try:
|
| 163 |
+
# The first argument is `self`, so filter it out
|
| 164 |
+
bound_signature = signature.bind(*args, **kwargs)
|
| 165 |
+
except TypeError:
|
| 166 |
+
# Likely unbindable arguments
|
| 167 |
+
raise e
|
| 168 |
+
|
| 169 |
+
# Add argument context
|
| 170 |
+
arguments_context = []
|
| 171 |
+
for arg in list(signature.parameters.values()):
|
| 172 |
+
if arg.name in bound_signature.arguments:
|
| 173 |
+
value = tree.map_structure(
|
| 174 |
+
format_argument_value,
|
| 175 |
+
bound_signature.arguments[arg.name],
|
| 176 |
+
)
|
| 177 |
+
else:
|
| 178 |
+
value = arg.default
|
| 179 |
+
arguments_context.append(f" • {arg.name}={value}")
|
| 180 |
+
if arguments_context:
|
| 181 |
+
arguments_context = "\n".join(arguments_context)
|
| 182 |
+
# Get original error message and append information to it.
|
| 183 |
+
if tf_errors is not None and isinstance(e, tf_errors.OpError):
|
| 184 |
+
message = e.message
|
| 185 |
+
elif e.args:
|
| 186 |
+
# Canonically, the 1st argument in an exception is the error
|
| 187 |
+
# message. This works for all built-in Python exceptions.
|
| 188 |
+
message = e.args[0]
|
| 189 |
+
else:
|
| 190 |
+
message = ""
|
| 191 |
+
display_name = f"{object_name if object_name else fn.__name__}"
|
| 192 |
+
message = (
|
| 193 |
+
f"Exception encountered when calling {display_name}.\n\n"
|
| 194 |
+
f"\x1b[1m{message}\x1b[0m\n\n"
|
| 195 |
+
f"Arguments received by {display_name}:\n"
|
| 196 |
+
f"{arguments_context}"
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
# Reraise exception, with added context
|
| 200 |
+
if tf_errors is not None and isinstance(e, tf_errors.OpError):
|
| 201 |
+
new_e = e.__class__(e.node_def, e.op, message, e.error_code)
|
| 202 |
+
else:
|
| 203 |
+
try:
|
| 204 |
+
# For standard exceptions such as ValueError, TypeError,
|
| 205 |
+
# etc.
|
| 206 |
+
new_e = e.__class__(message)
|
| 207 |
+
except TypeError:
|
| 208 |
+
# For any custom error that doesn't have a standard
|
| 209 |
+
# signature.
|
| 210 |
+
new_e = RuntimeError(message)
|
| 211 |
+
new_e._keras_call_info_injected = True
|
| 212 |
+
else:
|
| 213 |
+
new_e = e
|
| 214 |
+
raise new_e.with_traceback(e.__traceback__) from None
|
| 215 |
+
finally:
|
| 216 |
+
del signature
|
| 217 |
+
del bound_signature
|
| 218 |
+
|
| 219 |
+
return error_handler
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def format_argument_value(value):
|
| 223 |
+
if backend.is_tensor(value):
|
| 224 |
+
# Simplified representation for eager / graph tensors
|
| 225 |
+
# to keep messages readable
|
| 226 |
+
if backend.backend() == "tensorflow":
|
| 227 |
+
tensor_cls = "tf.Tensor"
|
| 228 |
+
elif backend.backend() == "jax":
|
| 229 |
+
tensor_cls = "jnp.ndarray"
|
| 230 |
+
elif backend.backend() == "torch":
|
| 231 |
+
tensor_cls = "torch.Tensor"
|
| 232 |
+
elif backend.backend() == "numpy":
|
| 233 |
+
tensor_cls = "np.ndarray"
|
| 234 |
+
else:
|
| 235 |
+
tensor_cls = "array"
|
| 236 |
+
|
| 237 |
+
return (
|
| 238 |
+
f"{tensor_cls}(shape={value.shape}, "
|
| 239 |
+
f"dtype={backend.standardize_dtype(value.dtype)})"
|
| 240 |
+
)
|
| 241 |
+
return repr(value)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/utils/tracking.py
ADDED
|
@@ -0,0 +1,290 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from functools import wraps
|
| 2 |
+
|
| 3 |
+
from keras.src import tree
|
| 4 |
+
from keras.src.backend.common.global_state import get_global_attribute
|
| 5 |
+
from keras.src.backend.common.global_state import set_global_attribute
|
| 6 |
+
from keras.src.utils import python_utils
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class DotNotTrackScope:
|
| 10 |
+
def __enter__(self):
|
| 11 |
+
self.original_value = is_tracking_enabled()
|
| 12 |
+
set_global_attribute("tracking_on", False)
|
| 13 |
+
|
| 14 |
+
def __exit__(self, *args, **kwargs):
|
| 15 |
+
set_global_attribute("tracking_on", self.original_value)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def is_tracking_enabled():
|
| 19 |
+
return get_global_attribute("tracking_on", True)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def no_automatic_dependency_tracking(fn):
|
| 23 |
+
@wraps(fn)
|
| 24 |
+
def wrapper(*args, **kwargs):
|
| 25 |
+
with DotNotTrackScope():
|
| 26 |
+
return fn(*args, **kwargs)
|
| 27 |
+
|
| 28 |
+
return wrapper
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class Tracker:
|
| 32 |
+
"""Attribute tracker, used for e.g. Variable tracking.
|
| 33 |
+
|
| 34 |
+
Monitors certain attribute types
|
| 35 |
+
and put them in appropriate lists in case of a match.
|
| 36 |
+
|
| 37 |
+
Also passively tracks certain mutable collections
|
| 38 |
+
(dict, list) so that items added to them later
|
| 39 |
+
still get tracked. This is done by wrapping these
|
| 40 |
+
collections into an equivalent, tracking-aware object.
|
| 41 |
+
|
| 42 |
+
Example:
|
| 43 |
+
|
| 44 |
+
```python
|
| 45 |
+
def __init__(self):
|
| 46 |
+
self.tracker = Tracker(
|
| 47 |
+
# Format: `name: (test_fn, store)`
|
| 48 |
+
{
|
| 49 |
+
"variables":
|
| 50 |
+
(lambda x: isinstance(x, Variable), self._variables),
|
| 51 |
+
"metrics": (lambda x: isinstance(x, Metric), self._metrics),
|
| 52 |
+
"layers": (lambda x: isinstance(x, Layer), self._layers),
|
| 53 |
+
}
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
def __setattr__(self, name, value):
|
| 57 |
+
if hasattr(self, "_tracker"):
|
| 58 |
+
value = self._tracker.track(value)
|
| 59 |
+
return super().__setattr__(name, value)
|
| 60 |
+
```
|
| 61 |
+
"""
|
| 62 |
+
|
| 63 |
+
def __init__(self, config, exclusions=None):
|
| 64 |
+
self.config = config
|
| 65 |
+
self.stored_ids = {name: set() for name in self.config.keys()}
|
| 66 |
+
self.locked = False
|
| 67 |
+
self._lock_violation_msg = None
|
| 68 |
+
self.exclusions = exclusions or {}
|
| 69 |
+
|
| 70 |
+
def track(self, attr):
|
| 71 |
+
if not is_tracking_enabled():
|
| 72 |
+
return attr
|
| 73 |
+
|
| 74 |
+
for store_name, (is_attr_type, _) in self.config.items():
|
| 75 |
+
if is_attr_type(attr):
|
| 76 |
+
if store_name in self.exclusions:
|
| 77 |
+
for excl in self.exclusions[store_name]:
|
| 78 |
+
if self.is_in_store(excl, attr):
|
| 79 |
+
return attr
|
| 80 |
+
if not self.is_in_store(store_name, attr):
|
| 81 |
+
self.add_to_store(store_name, attr)
|
| 82 |
+
return attr
|
| 83 |
+
if isinstance(attr, tuple) and hasattr(attr, "_fields"):
|
| 84 |
+
# Named tuple case.
|
| 85 |
+
wrapped_attr = {}
|
| 86 |
+
for name, e in attr._asdict().items():
|
| 87 |
+
wrapped_attr[name] = self.track(e)
|
| 88 |
+
return attr.__class__(**wrapped_attr)
|
| 89 |
+
if isinstance(attr, tuple):
|
| 90 |
+
wrapped_attr = []
|
| 91 |
+
for e in attr:
|
| 92 |
+
wrapped_attr.append(self.track(e))
|
| 93 |
+
return attr.__class__(wrapped_attr)
|
| 94 |
+
elif isinstance(attr, list):
|
| 95 |
+
return TrackedList(attr, self)
|
| 96 |
+
elif isinstance(attr, dict):
|
| 97 |
+
# TODO: OrderedDict?
|
| 98 |
+
return TrackedDict(attr, self)
|
| 99 |
+
elif isinstance(attr, set):
|
| 100 |
+
return TrackedSet(attr, self)
|
| 101 |
+
return attr
|
| 102 |
+
|
| 103 |
+
def untrack(self, value):
|
| 104 |
+
for store_name in self.stored_ids.keys():
|
| 105 |
+
if id(value) in self.stored_ids[store_name]:
|
| 106 |
+
self.stored_ids[store_name].remove(id(value))
|
| 107 |
+
python_utils.remove_by_id(self.config[store_name][1], value)
|
| 108 |
+
|
| 109 |
+
def lock(self, msg=None):
|
| 110 |
+
self.locked = True
|
| 111 |
+
if msg is not None:
|
| 112 |
+
self._lock_violation_msg = msg
|
| 113 |
+
|
| 114 |
+
def unlock(self):
|
| 115 |
+
self.locked = False
|
| 116 |
+
|
| 117 |
+
def add_to_store(self, store_name, value):
|
| 118 |
+
if self.locked:
|
| 119 |
+
raise ValueError(self._lock_violation_msg)
|
| 120 |
+
self.config[store_name][1].append(value)
|
| 121 |
+
self.stored_ids[store_name].add(id(value))
|
| 122 |
+
|
| 123 |
+
def is_in_store(self, store_name, value):
|
| 124 |
+
return id(value) in self.stored_ids[store_name]
|
| 125 |
+
|
| 126 |
+
def replace_tracked_value(self, store_name, old_value, new_value):
|
| 127 |
+
if not self.is_in_store(store_name, old_value):
|
| 128 |
+
raise ValueError(f"Unknown value: {old_value}")
|
| 129 |
+
store_list = self.config[store_name][1]
|
| 130 |
+
index = store_list.index(old_value)
|
| 131 |
+
store_list[index] = new_value
|
| 132 |
+
self.stored_ids[store_name].remove(id(old_value))
|
| 133 |
+
self.stored_ids[store_name].add(id(new_value))
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
@tree.register_tree_node_class
|
| 137 |
+
class TrackedList(list):
|
| 138 |
+
def __init__(self, values=None, tracker=None):
|
| 139 |
+
self.tracker = tracker
|
| 140 |
+
if tracker and values:
|
| 141 |
+
values = [tracker.track(v) for v in values]
|
| 142 |
+
super().__init__(values or [])
|
| 143 |
+
|
| 144 |
+
def append(self, value):
|
| 145 |
+
if self.tracker:
|
| 146 |
+
self.tracker.track(value)
|
| 147 |
+
super().append(value)
|
| 148 |
+
|
| 149 |
+
def insert(self, index, value):
|
| 150 |
+
if self.tracker:
|
| 151 |
+
self.tracker.track(value)
|
| 152 |
+
super().insert(index, value)
|
| 153 |
+
|
| 154 |
+
def extend(self, values):
|
| 155 |
+
if self.tracker:
|
| 156 |
+
values = [self.tracker.track(v) for v in values]
|
| 157 |
+
super().extend(values)
|
| 158 |
+
|
| 159 |
+
def remove(self, value):
|
| 160 |
+
if self.tracker:
|
| 161 |
+
self.tracker.untrack(value)
|
| 162 |
+
try:
|
| 163 |
+
super().remove(value)
|
| 164 |
+
except ValueError:
|
| 165 |
+
python_utils.remove_by_id(self, value)
|
| 166 |
+
|
| 167 |
+
def pop(self, index=-1):
|
| 168 |
+
if self.tracker:
|
| 169 |
+
value = self[index]
|
| 170 |
+
self.tracker.untrack(value)
|
| 171 |
+
return super().pop(index)
|
| 172 |
+
else:
|
| 173 |
+
return super().pop(index)
|
| 174 |
+
|
| 175 |
+
def clear(self):
|
| 176 |
+
if self.tracker:
|
| 177 |
+
for value in self:
|
| 178 |
+
self.tracker.untrack(value)
|
| 179 |
+
super().clear()
|
| 180 |
+
|
| 181 |
+
def __delitem__(self, index):
|
| 182 |
+
value = self[index] # Get value before removing
|
| 183 |
+
super().__delitem__(index)
|
| 184 |
+
if self.tracker:
|
| 185 |
+
self.tracker.untrack(value)
|
| 186 |
+
|
| 187 |
+
def tree_flatten(self):
|
| 188 |
+
# For optree / dmtree
|
| 189 |
+
return (self, None)
|
| 190 |
+
|
| 191 |
+
@classmethod
|
| 192 |
+
def tree_unflatten(cls, metadata, children):
|
| 193 |
+
# For optree / dmtree
|
| 194 |
+
return cls(children)
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
@tree.register_tree_node_class
|
| 198 |
+
class TrackedDict(dict):
|
| 199 |
+
def __init__(self, values=None, tracker=None):
|
| 200 |
+
self.tracker = tracker
|
| 201 |
+
if tracker and values:
|
| 202 |
+
values = {k: tracker.track(v) for k, v in values.items()}
|
| 203 |
+
super().__init__(values or [])
|
| 204 |
+
|
| 205 |
+
def __setitem__(self, key, value):
|
| 206 |
+
if self.tracker:
|
| 207 |
+
self.tracker.track(value)
|
| 208 |
+
super().__setitem__(key, value)
|
| 209 |
+
|
| 210 |
+
def update(self, mapping):
|
| 211 |
+
if self.tracker:
|
| 212 |
+
mapping = {k: self.tracker.track(v) for k, v in mapping.items()}
|
| 213 |
+
super().update(mapping)
|
| 214 |
+
|
| 215 |
+
def pop(self, key, default=None):
|
| 216 |
+
if self.tracker:
|
| 217 |
+
value = super().pop(key, default)
|
| 218 |
+
if value is not default:
|
| 219 |
+
self.tracker.untrack(value)
|
| 220 |
+
return value
|
| 221 |
+
else:
|
| 222 |
+
return super().pop(key, default)
|
| 223 |
+
|
| 224 |
+
def popitem(self):
|
| 225 |
+
key, value = super().popitem()
|
| 226 |
+
if self.tracker:
|
| 227 |
+
self.tracker.untrack(value)
|
| 228 |
+
return key, value
|
| 229 |
+
|
| 230 |
+
def clear(self):
|
| 231 |
+
if self.tracker:
|
| 232 |
+
for value in self.values():
|
| 233 |
+
self.tracker.untrack(value)
|
| 234 |
+
super().clear()
|
| 235 |
+
|
| 236 |
+
def tree_flatten(self):
|
| 237 |
+
# For optree / dmtree
|
| 238 |
+
keys = sorted(list(self.keys()))
|
| 239 |
+
values = [self[k] for k in keys]
|
| 240 |
+
return values, keys, keys
|
| 241 |
+
|
| 242 |
+
@classmethod
|
| 243 |
+
def tree_unflatten(cls, keys, values):
|
| 244 |
+
# For optree / dmtree
|
| 245 |
+
return cls(zip(keys, values))
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
@tree.register_tree_node_class
|
| 249 |
+
class TrackedSet(set):
|
| 250 |
+
def __init__(self, values=None, tracker=None):
|
| 251 |
+
self.tracker = tracker
|
| 252 |
+
if tracker and values:
|
| 253 |
+
values = {tracker.track(v) for v in values}
|
| 254 |
+
super().__init__(values or [])
|
| 255 |
+
|
| 256 |
+
def add(self, value):
|
| 257 |
+
if self.tracker:
|
| 258 |
+
self.tracker.track(value)
|
| 259 |
+
super().add(value)
|
| 260 |
+
|
| 261 |
+
def update(self, values):
|
| 262 |
+
if self.tracker:
|
| 263 |
+
values = [self.tracker.track(v) for v in values]
|
| 264 |
+
super().update(values)
|
| 265 |
+
|
| 266 |
+
def remove(self, value):
|
| 267 |
+
if self.tracker:
|
| 268 |
+
self.tracker.untrack(value)
|
| 269 |
+
super().remove(value)
|
| 270 |
+
|
| 271 |
+
def pop(self):
|
| 272 |
+
value = super().pop()
|
| 273 |
+
if self.tracker:
|
| 274 |
+
self.tracker.untrack(value)
|
| 275 |
+
return value
|
| 276 |
+
|
| 277 |
+
def clear(self):
|
| 278 |
+
if self.tracker:
|
| 279 |
+
for value in self:
|
| 280 |
+
self.tracker.untrack(value)
|
| 281 |
+
super().clear()
|
| 282 |
+
|
| 283 |
+
def tree_flatten(self):
|
| 284 |
+
# For optree / dmtree
|
| 285 |
+
return (self, None)
|
| 286 |
+
|
| 287 |
+
@classmethod
|
| 288 |
+
def tree_unflatten(cls, metadata, children):
|
| 289 |
+
# For optree / dmtree
|
| 290 |
+
return cls(children)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/version.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src.api_export import keras_export
|
| 2 |
+
|
| 3 |
+
# Unique source of truth for the version number.
|
| 4 |
+
__version__ = "3.8.0"
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@keras_export("keras.version")
|
| 8 |
+
def version():
|
| 9 |
+
return __version__
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keras.src.visualization import draw_bounding_boxes
|
| 2 |
+
from keras.src.visualization import plot_image_gallery
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (307 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/draw_bounding_boxes.cpython-310.pyc
ADDED
|
Binary file (5.45 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/draw_segmentation_masks.cpython-310.pyc
ADDED
|
Binary file (4.38 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/plot_bounding_box_gallery.cpython-310.pyc
ADDED
|
Binary file (5.49 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/plot_image_gallery.cpython-310.pyc
ADDED
|
Binary file (4.99 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/__pycache__/plot_segmentation_mask_gallery.cpython-310.pyc
ADDED
|
Binary file (3.83 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/draw_bounding_boxes.py
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from keras.src import backend
|
| 4 |
+
from keras.src import ops
|
| 5 |
+
from keras.src.api_export import keras_export
|
| 6 |
+
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
|
| 7 |
+
convert_format,
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
try:
|
| 11 |
+
import cv2
|
| 12 |
+
except ImportError:
|
| 13 |
+
cv2 = None
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@keras_export("keras.visualization.draw_bounding_boxes")
|
| 17 |
+
def draw_bounding_boxes(
|
| 18 |
+
images,
|
| 19 |
+
bounding_boxes,
|
| 20 |
+
bounding_box_format,
|
| 21 |
+
class_mapping=None,
|
| 22 |
+
color=(128, 128, 128),
|
| 23 |
+
line_thickness=2,
|
| 24 |
+
text_thickness=1,
|
| 25 |
+
font_scale=1.0,
|
| 26 |
+
data_format=None,
|
| 27 |
+
):
|
| 28 |
+
"""Draws bounding boxes on images.
|
| 29 |
+
|
| 30 |
+
This function draws bounding boxes on a batch of images. It supports
|
| 31 |
+
different bounding box formats and can optionally display class labels
|
| 32 |
+
and confidences.
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
images: A batch of images as a 4D tensor or NumPy array. Shape should be
|
| 36 |
+
`(batch_size, height, width, channels)`.
|
| 37 |
+
bounding_boxes: A dictionary containing bounding box data. Should have
|
| 38 |
+
the following keys:
|
| 39 |
+
- `boxes`: A tensor or array of shape `(batch_size, num_boxes, 4)`
|
| 40 |
+
containing the bounding box coordinates in the specified format.
|
| 41 |
+
- `labels`: A tensor or array of shape `(batch_size, num_boxes)`
|
| 42 |
+
containing the class labels for each bounding box.
|
| 43 |
+
- `confidences` (Optional): A tensor or array of shape
|
| 44 |
+
`(batch_size, num_boxes)` containing the confidence scores for
|
| 45 |
+
each bounding box.
|
| 46 |
+
bounding_box_format: A string specifying the format of the bounding
|
| 47 |
+
boxes. Refer [keras-io](TODO)
|
| 48 |
+
class_mapping: A dictionary mapping class IDs (integers) to class labels
|
| 49 |
+
(strings). Used to display class labels next to the bounding boxes.
|
| 50 |
+
Defaults to None (no labels displayed).
|
| 51 |
+
color: A tuple or list representing the RGB color of the bounding boxes.
|
| 52 |
+
For example, `(255, 0, 0)` for red. Defaults to `(128, 128, 128)`.
|
| 53 |
+
line_thickness: An integer specifying the thickness of the bounding box
|
| 54 |
+
lines. Defaults to `2`.
|
| 55 |
+
text_thickness: An integer specifying the thickness of the text labels.
|
| 56 |
+
Defaults to `1`.
|
| 57 |
+
font_scale: A float specifying the scale of the font used for text
|
| 58 |
+
labels. Defaults to `1.0`.
|
| 59 |
+
data_format: A string, either `"channels_last"` or `"channels_first"`,
|
| 60 |
+
specifying the order of dimensions in the input images. Defaults to
|
| 61 |
+
the `image_data_format` value found in your Keras config file at
|
| 62 |
+
`~/.keras/keras.json`. If you never set it, then it will be
|
| 63 |
+
"channels_last".
|
| 64 |
+
|
| 65 |
+
Returns:
|
| 66 |
+
A NumPy array of the annotated images with the bounding boxes drawn.
|
| 67 |
+
The array will have the same shape as the input `images`.
|
| 68 |
+
|
| 69 |
+
Raises:
|
| 70 |
+
ValueError: If `images` is not a 4D tensor/array, if `bounding_boxes` is
|
| 71 |
+
not a dictionary, or if `bounding_boxes` does not contain `"boxes"`
|
| 72 |
+
and `"labels"` keys.
|
| 73 |
+
TypeError: If `bounding_boxes` is not a dictionary.
|
| 74 |
+
ImportError: If `cv2` (OpenCV) is not installed.
|
| 75 |
+
"""
|
| 76 |
+
|
| 77 |
+
if cv2 is None:
|
| 78 |
+
raise ImportError(
|
| 79 |
+
"The `draw_bounding_boxes` function requires the `cv2` package "
|
| 80 |
+
" (OpenCV). Please install it with `pip install opencv-python`."
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
class_mapping = class_mapping or {}
|
| 84 |
+
text_thickness = (
|
| 85 |
+
text_thickness or line_thickness
|
| 86 |
+
) # Default text_thickness if not provided.
|
| 87 |
+
data_format = data_format or backend.image_data_format()
|
| 88 |
+
images_shape = ops.shape(images)
|
| 89 |
+
if len(images_shape) != 4:
|
| 90 |
+
raise ValueError(
|
| 91 |
+
"`images` must be batched 4D tensor. "
|
| 92 |
+
f"Received: images.shape={images_shape}"
|
| 93 |
+
)
|
| 94 |
+
if not isinstance(bounding_boxes, dict):
|
| 95 |
+
raise TypeError(
|
| 96 |
+
"`bounding_boxes` should be a dict. "
|
| 97 |
+
f"Received: bounding_boxes={bounding_boxes} of type "
|
| 98 |
+
f"{type(bounding_boxes)}"
|
| 99 |
+
)
|
| 100 |
+
if "boxes" not in bounding_boxes or "labels" not in bounding_boxes:
|
| 101 |
+
raise ValueError(
|
| 102 |
+
"`bounding_boxes` should be a dict containing 'boxes' and "
|
| 103 |
+
f"'labels' keys. Received: bounding_boxes={bounding_boxes}"
|
| 104 |
+
)
|
| 105 |
+
if data_format == "channels_last":
|
| 106 |
+
h_axis = -3
|
| 107 |
+
w_axis = -2
|
| 108 |
+
else:
|
| 109 |
+
h_axis = -2
|
| 110 |
+
w_axis = -1
|
| 111 |
+
height = images_shape[h_axis]
|
| 112 |
+
width = images_shape[w_axis]
|
| 113 |
+
bounding_boxes = bounding_boxes.copy()
|
| 114 |
+
bounding_boxes = convert_format(
|
| 115 |
+
bounding_boxes, bounding_box_format, "xyxy", height, width
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
# To numpy array
|
| 119 |
+
images = ops.convert_to_numpy(images).astype("uint8")
|
| 120 |
+
boxes = ops.convert_to_numpy(bounding_boxes["boxes"])
|
| 121 |
+
labels = ops.convert_to_numpy(bounding_boxes["labels"])
|
| 122 |
+
if "confidences" in bounding_boxes:
|
| 123 |
+
confidences = ops.convert_to_numpy(bounding_boxes["confidences"])
|
| 124 |
+
else:
|
| 125 |
+
confidences = None
|
| 126 |
+
|
| 127 |
+
result = []
|
| 128 |
+
batch_size = images.shape[0]
|
| 129 |
+
for i in range(batch_size):
|
| 130 |
+
_image = images[i]
|
| 131 |
+
_box = boxes[i]
|
| 132 |
+
_class = labels[i]
|
| 133 |
+
for box_i in range(_box.shape[0]):
|
| 134 |
+
x1, y1, x2, y2 = _box[box_i].astype("int32")
|
| 135 |
+
c = _class[box_i].astype("int32")
|
| 136 |
+
if c == -1:
|
| 137 |
+
continue
|
| 138 |
+
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
|
| 139 |
+
c = int(c)
|
| 140 |
+
# Draw bounding box
|
| 141 |
+
cv2.rectangle(_image, (x1, y1), (x2, y2), color, line_thickness)
|
| 142 |
+
|
| 143 |
+
if c in class_mapping:
|
| 144 |
+
label = class_mapping[c]
|
| 145 |
+
if confidences is not None:
|
| 146 |
+
conf = confidences[i][box_i]
|
| 147 |
+
label = f"{label} | {conf:.2f}"
|
| 148 |
+
|
| 149 |
+
font_x1, font_y1 = _find_text_location(
|
| 150 |
+
x1, y1, font_scale, text_thickness
|
| 151 |
+
)
|
| 152 |
+
cv2.putText(
|
| 153 |
+
img=_image,
|
| 154 |
+
text=label,
|
| 155 |
+
org=(font_x1, font_y1),
|
| 156 |
+
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
|
| 157 |
+
fontScale=font_scale,
|
| 158 |
+
color=color,
|
| 159 |
+
thickness=text_thickness,
|
| 160 |
+
)
|
| 161 |
+
result.append(_image)
|
| 162 |
+
return np.stack(result, axis=0)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def _find_text_location(x, y, font_scale, thickness):
|
| 166 |
+
font_height = int(font_scale * 12)
|
| 167 |
+
target_y = y - 8
|
| 168 |
+
if target_y - (2 * font_height) > 0:
|
| 169 |
+
return x, y - 8
|
| 170 |
+
|
| 171 |
+
line_offset = thickness
|
| 172 |
+
static_offset = 3
|
| 173 |
+
|
| 174 |
+
return (
|
| 175 |
+
x + static_offset,
|
| 176 |
+
y + (2 * font_height) + line_offset + static_offset,
|
| 177 |
+
)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/draw_segmentation_masks.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from keras.src import backend
|
| 4 |
+
from keras.src import ops
|
| 5 |
+
from keras.src.api_export import keras_export
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@keras_export("keras.visualization.draw_segmentation_masks")
|
| 9 |
+
def draw_segmentation_masks(
|
| 10 |
+
images,
|
| 11 |
+
segmentation_masks,
|
| 12 |
+
num_classes=None,
|
| 13 |
+
color_mapping=None,
|
| 14 |
+
alpha=0.8,
|
| 15 |
+
blend=True,
|
| 16 |
+
ignore_index=-1,
|
| 17 |
+
data_format=None,
|
| 18 |
+
):
|
| 19 |
+
"""Draws segmentation masks on images.
|
| 20 |
+
|
| 21 |
+
The function overlays segmentation masks on the input images.
|
| 22 |
+
The masks are blended with the images using the specified alpha value.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
images: A batch of images as a 4D tensor or NumPy array. Shape
|
| 26 |
+
should be (batch_size, height, width, channels).
|
| 27 |
+
segmentation_masks: A batch of segmentation masks as a 3D or 4D tensor
|
| 28 |
+
or NumPy array. Shape should be (batch_size, height, width) or
|
| 29 |
+
(batch_size, height, width, 1). The values represent class indices
|
| 30 |
+
starting from 1 up to `num_classes`. Class 0 is reserved for
|
| 31 |
+
the background and will be ignored if `ignore_index` is not 0.
|
| 32 |
+
num_classes: The number of segmentation classes. If `None`, it is
|
| 33 |
+
inferred from the maximum value in `segmentation_masks`.
|
| 34 |
+
color_mapping: A dictionary mapping class indices to RGB colors.
|
| 35 |
+
If `None`, a default color palette is generated. The keys should be
|
| 36 |
+
integers starting from 1 up to `num_classes`.
|
| 37 |
+
alpha: The opacity of the segmentation masks. Must be in the range
|
| 38 |
+
`[0, 1]`.
|
| 39 |
+
blend: Whether to blend the masks with the input image using the
|
| 40 |
+
`alpha` value. If `False`, the masks are drawn directly on the
|
| 41 |
+
images without blending. Defaults to `True`.
|
| 42 |
+
ignore_index: The class index to ignore. Mask pixels with this value
|
| 43 |
+
will not be drawn. Defaults to -1.
|
| 44 |
+
data_format: Image data format, either `"channels_last"` or
|
| 45 |
+
`"channels_first"`. Defaults to the `image_data_format` value found
|
| 46 |
+
in your Keras config file at `~/.keras/keras.json`. If you never
|
| 47 |
+
set it, then it will be `"channels_last"`.
|
| 48 |
+
|
| 49 |
+
Returns:
|
| 50 |
+
A NumPy array of the images with the segmentation masks overlaid.
|
| 51 |
+
|
| 52 |
+
Raises:
|
| 53 |
+
ValueError: If the input `images` is not a 4D tensor or NumPy array.
|
| 54 |
+
TypeError: If the input `segmentation_masks` is not an integer type.
|
| 55 |
+
"""
|
| 56 |
+
data_format = data_format or backend.image_data_format()
|
| 57 |
+
images_shape = ops.shape(images)
|
| 58 |
+
if len(images_shape) != 4:
|
| 59 |
+
raise ValueError(
|
| 60 |
+
"`images` must be batched 4D tensor. "
|
| 61 |
+
f"Received: images.shape={images_shape}"
|
| 62 |
+
)
|
| 63 |
+
if data_format == "channels_first":
|
| 64 |
+
images = ops.transpose(images, (0, 2, 3, 1))
|
| 65 |
+
segmentation_masks = ops.transpose(segmentation_masks, (0, 2, 3, 1))
|
| 66 |
+
images = ops.convert_to_tensor(images, dtype="float32")
|
| 67 |
+
segmentation_masks = ops.convert_to_tensor(segmentation_masks)
|
| 68 |
+
|
| 69 |
+
if not backend.is_int_dtype(segmentation_masks.dtype):
|
| 70 |
+
dtype = backend.standardize_dtype(segmentation_masks.dtype)
|
| 71 |
+
raise TypeError(
|
| 72 |
+
"`segmentation_masks` must be in integer dtype. "
|
| 73 |
+
f"Received: segmentation_masks.dtype={dtype}"
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
# Infer num_classes
|
| 77 |
+
if num_classes is None:
|
| 78 |
+
num_classes = int(ops.convert_to_numpy(ops.max(segmentation_masks)))
|
| 79 |
+
if color_mapping is None:
|
| 80 |
+
colors = _generate_color_palette(num_classes)
|
| 81 |
+
else:
|
| 82 |
+
colors = [color_mapping[i] for i in range(num_classes)]
|
| 83 |
+
valid_masks = ops.not_equal(segmentation_masks, ignore_index)
|
| 84 |
+
valid_masks = ops.squeeze(valid_masks, axis=-1)
|
| 85 |
+
segmentation_masks = ops.one_hot(segmentation_masks, num_classes)
|
| 86 |
+
segmentation_masks = segmentation_masks[..., 0, :]
|
| 87 |
+
segmentation_masks = ops.convert_to_numpy(segmentation_masks)
|
| 88 |
+
|
| 89 |
+
# Replace class with color
|
| 90 |
+
masks = segmentation_masks
|
| 91 |
+
masks = np.transpose(masks, axes=(3, 0, 1, 2)).astype("bool")
|
| 92 |
+
images_to_draw = ops.convert_to_numpy(images).copy()
|
| 93 |
+
for mask, color in zip(masks, colors):
|
| 94 |
+
color = np.array(color, dtype=images_to_draw.dtype)
|
| 95 |
+
images_to_draw[mask, ...] = color[None, :]
|
| 96 |
+
images_to_draw = ops.convert_to_tensor(images_to_draw)
|
| 97 |
+
outputs = ops.cast(images_to_draw, dtype="float32")
|
| 98 |
+
|
| 99 |
+
if blend:
|
| 100 |
+
outputs = images * (1 - alpha) + outputs * alpha
|
| 101 |
+
outputs = ops.where(valid_masks[..., None], outputs, images)
|
| 102 |
+
outputs = ops.cast(outputs, dtype="uint8")
|
| 103 |
+
outputs = ops.convert_to_numpy(outputs)
|
| 104 |
+
return outputs
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def _generate_color_palette(num_classes: int):
|
| 108 |
+
palette = np.array([2**25 - 1, 2**15 - 1, 2**21 - 1])
|
| 109 |
+
return [((i * palette) % 255).tolist() for i in range(num_classes)]
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/plot_bounding_box_gallery.py
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from keras.src import backend
|
| 6 |
+
from keras.src import ops
|
| 7 |
+
from keras.src.api_export import keras_export
|
| 8 |
+
from keras.src.visualization.draw_bounding_boxes import draw_bounding_boxes
|
| 9 |
+
from keras.src.visualization.plot_image_gallery import plot_image_gallery
|
| 10 |
+
|
| 11 |
+
try:
|
| 12 |
+
from matplotlib import patches # For legend patches
|
| 13 |
+
except ImportError:
|
| 14 |
+
patches = None
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@keras_export("keras.visualization.plot_bounding_box_gallery")
|
| 18 |
+
def plot_bounding_box_gallery(
|
| 19 |
+
images,
|
| 20 |
+
bounding_box_format,
|
| 21 |
+
y_true=None,
|
| 22 |
+
y_pred=None,
|
| 23 |
+
value_range=(0, 255),
|
| 24 |
+
true_color=(0, 188, 212),
|
| 25 |
+
pred_color=(255, 235, 59),
|
| 26 |
+
line_thickness=2,
|
| 27 |
+
font_scale=1.0,
|
| 28 |
+
text_thickness=None,
|
| 29 |
+
class_mapping=None,
|
| 30 |
+
ground_truth_mapping=None,
|
| 31 |
+
prediction_mapping=None,
|
| 32 |
+
legend=False,
|
| 33 |
+
legend_handles=None,
|
| 34 |
+
rows=None,
|
| 35 |
+
cols=None,
|
| 36 |
+
data_format=None,
|
| 37 |
+
**kwargs,
|
| 38 |
+
):
|
| 39 |
+
"""Plots a gallery of images with bounding boxes.
|
| 40 |
+
|
| 41 |
+
This function can display both ground truth and predicted bounding boxes on
|
| 42 |
+
a set of images. It supports various bounding box formats and can include
|
| 43 |
+
class labels and a legend.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
images: A 4D tensor or NumPy array of images. Shape should be
|
| 47 |
+
`(batch_size, height, width, channels)`.
|
| 48 |
+
bounding_box_format: The format of the bounding boxes.
|
| 49 |
+
Refer [keras-io](TODO)
|
| 50 |
+
y_true: A dictionary containing the ground truth bounding boxes and
|
| 51 |
+
labels. Should have the same structure as the `bounding_boxes`
|
| 52 |
+
argument in `keras.visualization.draw_bounding_boxes`.
|
| 53 |
+
Defaults to `None`.
|
| 54 |
+
y_pred: A dictionary containing the predicted bounding boxes and labels.
|
| 55 |
+
Should have the same structure as `y_true`. Defaults to `None`.
|
| 56 |
+
value_range: A tuple specifying the value range of the images
|
| 57 |
+
(e.g., `(0, 255)` or `(0, 1)`). Defaults to `(0, 255)`.
|
| 58 |
+
true_color: A tuple of three integers representing the RGB color for the
|
| 59 |
+
ground truth bounding boxes. Defaults to `(0, 188, 212)`.
|
| 60 |
+
pred_color: A tuple of three integers representing the RGB color for the
|
| 61 |
+
predicted bounding boxes. Defaults to `(255, 235, 59)`.
|
| 62 |
+
line_thickness: The thickness of the bounding box lines. Defaults to 2.
|
| 63 |
+
font_scale: The scale of the font used for labels. Defaults to 1.0.
|
| 64 |
+
text_thickness: The thickness of the bounding box text. Defaults to
|
| 65 |
+
`line_thickness`.
|
| 66 |
+
class_mapping: A dictionary mapping class IDs to class names. Used f
|
| 67 |
+
or both ground truth and predicted boxes if `ground_truth_mapping`
|
| 68 |
+
and `prediction_mapping` are not provided. Defaults to `None`.
|
| 69 |
+
ground_truth_mapping: A dictionary mapping class IDs to class names
|
| 70 |
+
specifically for ground truth boxes. Overrides `class_mapping`
|
| 71 |
+
for ground truth. Defaults to `None`.
|
| 72 |
+
prediction_mapping: A dictionary mapping class IDs to class names
|
| 73 |
+
specifically for predicted boxes. Overrides `class_mapping` for
|
| 74 |
+
predictions. Defaults to `None`.
|
| 75 |
+
legend: A boolean indicating whether to show a legend.
|
| 76 |
+
Defaults to `False`.
|
| 77 |
+
legend_handles: A list of matplotlib `Patch` objects to use for the
|
| 78 |
+
legend. If this is provided, the `legend` argument will be ignored.
|
| 79 |
+
Defaults to `None`.
|
| 80 |
+
rows: The number of rows in the image gallery. Required if the images
|
| 81 |
+
are not batched. Defaults to `None`.
|
| 82 |
+
cols: The number of columns in the image gallery. Required if the images
|
| 83 |
+
are not batched. Defaults to `None`.
|
| 84 |
+
data_format: The image data format `"channels_last"` or
|
| 85 |
+
`"channels_first"`. Defaults to the Keras backend data format.
|
| 86 |
+
kwargs: Additional keyword arguments to be passed to
|
| 87 |
+
`keras.visualization.plot_image_gallery`.
|
| 88 |
+
|
| 89 |
+
Returns:
|
| 90 |
+
The output of `keras.visualization.plot_image_gallery`.
|
| 91 |
+
|
| 92 |
+
Raises:
|
| 93 |
+
ValueError: If `images` is not a 4D tensor/array or if both `legend` a
|
| 94 |
+
nd `legend_handles` are specified.
|
| 95 |
+
ImportError: if matplotlib is not installed
|
| 96 |
+
"""
|
| 97 |
+
if patches is None:
|
| 98 |
+
raise ImportError(
|
| 99 |
+
"The `plot_bounding_box_gallery` function requires the "
|
| 100 |
+
" `matplotlib` package. Please install it with "
|
| 101 |
+
" `pip install matplotlib`."
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
prediction_mapping = prediction_mapping or class_mapping
|
| 105 |
+
ground_truth_mapping = ground_truth_mapping or class_mapping
|
| 106 |
+
data_format = data_format or backend.image_data_format()
|
| 107 |
+
images_shape = ops.shape(images)
|
| 108 |
+
if len(images_shape) != 4:
|
| 109 |
+
raise ValueError(
|
| 110 |
+
"`images` must be batched 4D tensor. "
|
| 111 |
+
f"Received: images.shape={images_shape}"
|
| 112 |
+
)
|
| 113 |
+
if data_format == "channels_first": # Ensure correct data format
|
| 114 |
+
images = ops.transpose(images, (0, 2, 3, 1))
|
| 115 |
+
plotted_images = ops.convert_to_numpy(images)
|
| 116 |
+
|
| 117 |
+
draw_fn = functools.partial(
|
| 118 |
+
draw_bounding_boxes,
|
| 119 |
+
bounding_box_format=bounding_box_format,
|
| 120 |
+
line_thickness=line_thickness,
|
| 121 |
+
text_thickness=text_thickness,
|
| 122 |
+
font_scale=font_scale,
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
if y_true is not None:
|
| 126 |
+
plotted_images = draw_fn(
|
| 127 |
+
plotted_images,
|
| 128 |
+
y_true,
|
| 129 |
+
color=true_color,
|
| 130 |
+
class_mapping=ground_truth_mapping,
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
if y_pred is not None:
|
| 134 |
+
plotted_images = draw_fn(
|
| 135 |
+
plotted_images,
|
| 136 |
+
y_pred,
|
| 137 |
+
color=pred_color,
|
| 138 |
+
class_mapping=prediction_mapping,
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
if legend:
|
| 142 |
+
if legend_handles:
|
| 143 |
+
raise ValueError(
|
| 144 |
+
"Only pass `legend` OR `legend_handles` to "
|
| 145 |
+
"`keras.visualization.plot_bounding_box_gallery()`."
|
| 146 |
+
)
|
| 147 |
+
legend_handles = [
|
| 148 |
+
patches.Patch(
|
| 149 |
+
color=np.array(true_color) / 255.0, # Normalize color
|
| 150 |
+
label="Ground Truth",
|
| 151 |
+
),
|
| 152 |
+
patches.Patch(
|
| 153 |
+
color=np.array(pred_color) / 255.0, # Normalize color
|
| 154 |
+
label="Prediction",
|
| 155 |
+
),
|
| 156 |
+
]
|
| 157 |
+
|
| 158 |
+
return plot_image_gallery(
|
| 159 |
+
plotted_images,
|
| 160 |
+
value_range=value_range,
|
| 161 |
+
legend_handles=legend_handles,
|
| 162 |
+
rows=rows,
|
| 163 |
+
cols=cols,
|
| 164 |
+
**kwargs,
|
| 165 |
+
)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/keras/src/visualization/plot_image_gallery.py
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from keras.src import backend
|
| 6 |
+
from keras.src import ops
|
| 7 |
+
from keras.src.api_export import keras_export
|
| 8 |
+
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
|
| 9 |
+
BaseImagePreprocessingLayer,
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
try:
|
| 13 |
+
import matplotlib.pyplot as plt
|
| 14 |
+
except ImportError:
|
| 15 |
+
plt = None
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def _extract_image_batch(images, num_images, batch_size):
|
| 19 |
+
"""Extracts a batch of images for plotting.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
images: The 4D tensor or NumPy array of images.
|
| 23 |
+
num_images: The number of images to extract.
|
| 24 |
+
batch_size: The original batch size of the images.
|
| 25 |
+
|
| 26 |
+
Returns:
|
| 27 |
+
A 4D tensor or NumPy array containing the extracted images.
|
| 28 |
+
|
| 29 |
+
Raises:
|
| 30 |
+
ValueError: If `images` is not a 4D tensor/array.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
if len(ops.shape(images)) != 4:
|
| 34 |
+
raise ValueError(
|
| 35 |
+
"`plot_images_gallery()` requires you to "
|
| 36 |
+
"batch your `np.array` samples together."
|
| 37 |
+
)
|
| 38 |
+
num_samples = min(num_images, batch_size)
|
| 39 |
+
sample = images[:num_samples, ...]
|
| 40 |
+
|
| 41 |
+
return sample
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
@keras_export("keras.visualization.plot_image_gallery")
|
| 45 |
+
def plot_image_gallery(
|
| 46 |
+
images,
|
| 47 |
+
rows=None,
|
| 48 |
+
cols=None,
|
| 49 |
+
value_range=(0, 255),
|
| 50 |
+
scale=2,
|
| 51 |
+
path=None,
|
| 52 |
+
show=None,
|
| 53 |
+
transparent=True,
|
| 54 |
+
dpi=60,
|
| 55 |
+
legend_handles=None,
|
| 56 |
+
data_format=None,
|
| 57 |
+
):
|
| 58 |
+
"""Displays a gallery of images.
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
images: A 4D tensor or NumPy array of images. Shape should be
|
| 62 |
+
`(batch_size, height, width, channels)`.
|
| 63 |
+
value_range: A tuple specifying the value range of the images
|
| 64 |
+
(e.g., `(0, 255)` or `(0, 1)`). Defaults to `(0, 255)`.
|
| 65 |
+
rows: The number of rows in the gallery. If `None`, it's calculated
|
| 66 |
+
based on the number of images and `cols`. Defaults to `None`.
|
| 67 |
+
cols: The number of columns in the gallery. If `None`, it's calculated
|
| 68 |
+
based on the number of images and `rows`. Defaults to `None`.
|
| 69 |
+
scale: A float controlling the size of the displayed images. The images
|
| 70 |
+
are scaled by this factor. Defaults to `2`.
|
| 71 |
+
path: The path to save the generated gallery image. If `None`, the
|
| 72 |
+
image is displayed using `plt.show()`. Defaults to `None`.
|
| 73 |
+
show: Whether to display the image using `plt.show()`. If `True`, the
|
| 74 |
+
image is displayed. If `False`, the image is not displayed.
|
| 75 |
+
Ignored if `path` is not `None`. Defaults to `True` if `path`
|
| 76 |
+
is `None`, `False` otherwise.
|
| 77 |
+
transparent: A boolean, whether to save the figure with a transparent
|
| 78 |
+
background. Defaults to `True`.
|
| 79 |
+
dpi: The DPI (dots per inch) for saving the figure. Defaults to 60.
|
| 80 |
+
legend_handles: A list of matplotlib `Patch` objects to use as legend
|
| 81 |
+
handles. Defaults to `None`.
|
| 82 |
+
data_format: The image data format `"channels_last"` or
|
| 83 |
+
`"channels_first"`. Defaults to the Keras backend data format.
|
| 84 |
+
|
| 85 |
+
Raises:
|
| 86 |
+
ValueError: If both `path` and `show` are set to non-`None` values or if
|
| 87 |
+
`images` is not a 4D tensor or array.
|
| 88 |
+
ImportError: if matplotlib is not installed.
|
| 89 |
+
"""
|
| 90 |
+
if plt is None:
|
| 91 |
+
raise ImportError(
|
| 92 |
+
"The `plot_image_gallery` function requires the `matplotlib` "
|
| 93 |
+
"package. Please install it with `pip install matplotlib`."
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
if path is not None and show:
|
| 97 |
+
raise ValueError(
|
| 98 |
+
"plot_gallery() expects either `path` to be set, or `show` "
|
| 99 |
+
"to be true."
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
show = show if show is not None else (path is None)
|
| 103 |
+
data_format = data_format or backend.image_data_format()
|
| 104 |
+
|
| 105 |
+
batch_size = ops.shape(images)[0] if len(ops.shape(images)) == 4 else 1
|
| 106 |
+
|
| 107 |
+
rows = rows or int(math.ceil(math.sqrt(batch_size)))
|
| 108 |
+
cols = cols or int(math.ceil(batch_size // rows))
|
| 109 |
+
num_images = rows * cols
|
| 110 |
+
|
| 111 |
+
images = _extract_image_batch(images, num_images, batch_size)
|
| 112 |
+
if (
|
| 113 |
+
data_format == "channels_first"
|
| 114 |
+
): # Ensure correct data format for plotting
|
| 115 |
+
images = ops.transpose(images, (0, 2, 3, 1))
|
| 116 |
+
# Generate subplots
|
| 117 |
+
fig, axes = plt.subplots(
|
| 118 |
+
nrows=rows,
|
| 119 |
+
ncols=cols,
|
| 120 |
+
figsize=(cols * scale, rows * scale),
|
| 121 |
+
frameon=False,
|
| 122 |
+
layout="tight",
|
| 123 |
+
squeeze=True,
|
| 124 |
+
sharex="row",
|
| 125 |
+
sharey="col",
|
| 126 |
+
)
|
| 127 |
+
fig.subplots_adjust(wspace=0, hspace=0)
|
| 128 |
+
|
| 129 |
+
if isinstance(axes, np.ndarray) and len(axes.shape) == 1:
|
| 130 |
+
expand_axis = 0 if rows == 1 else -1
|
| 131 |
+
axes = np.expand_dims(axes, expand_axis)
|
| 132 |
+
|
| 133 |
+
if legend_handles is not None:
|
| 134 |
+
fig.legend(handles=legend_handles, loc="lower center")
|
| 135 |
+
|
| 136 |
+
images = BaseImagePreprocessingLayer()._transform_value_range(
|
| 137 |
+
images=images, original_range=value_range, target_range=(0, 255)
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
images = ops.convert_to_numpy(images)
|
| 141 |
+
if data_format == "channels_first":
|
| 142 |
+
images = images.transpose(0, 2, 3, 1)
|
| 143 |
+
|
| 144 |
+
for row in range(rows):
|
| 145 |
+
for col in range(cols):
|
| 146 |
+
index = row * cols + col
|
| 147 |
+
current_axis = (
|
| 148 |
+
axes[row, col] if isinstance(axes, np.ndarray) else axes
|
| 149 |
+
)
|
| 150 |
+
current_axis.imshow(images[index].astype("uint8"))
|
| 151 |
+
current_axis.margins(x=0, y=0)
|
| 152 |
+
current_axis.axis("off")
|
| 153 |
+
|
| 154 |
+
if path is not None:
|
| 155 |
+
plt.savefig(
|
| 156 |
+
fname=path,
|
| 157 |
+
pad_inches=0,
|
| 158 |
+
bbox_inches="tight",
|
| 159 |
+
transparent=transparent,
|
| 160 |
+
dpi=dpi,
|
| 161 |
+
)
|
| 162 |
+
plt.close()
|
| 163 |
+
elif show:
|
| 164 |
+
plt.show()
|
| 165 |
+
plt.close()
|