text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
# Use with PyArrow This document is a quick introduction to using `datasets` with PyArrow, with a particular focus on how to process datasets using Arrow compute functions, and how to convert a dataset to PyArrow or from PyArrow. This is particularly useful as it allows fast zero-copy operations, since `datasets` uses PyArrow under the hood. ## Dataset format By default, datasets return regular Python objects: integers, floats, strings, lists, etc. To get PyArrow Tables or Arrays instead, you can set the format of the dataset to `pyarrow` using [`Dataset.with_format`]: ```py >>> from datasets import Dataset >>> data = {"col_0": ["a", "b", "c", "d"], "col_1": [0., 0., 1., 1.]} >>> ds = Dataset.from_dict(data) >>> ds = ds.with_format("arrow") >>> ds[0] # pa.Table pyarrow.Table col_0: string col_1: double ---- col_0: [["a"]] col_1: [[0]] >>> ds[:2] # pa.Table pyarrow.Table col_0: string col_1: double ---- col_0: [["a","b"]] col_1: [[0,0]] >>> ds["data"] # pa.array <pyarrow.lib.ChunkedArray object at 0x1394312a0> [ [ "a", "b", "c", "d" ] ] ``` This also works for `IterableDataset` objects obtained e.g. using `load_dataset(..., streaming=True)`: ```py >>> ds = ds.with_format("arrow") >>> for table in ds.iter(batch_size=2): ... print(table) ... break pyarrow.Table col_0: string col_1: double ---- col_0: [["a","b"]] col_1: [[0,0]] ``` ## Process data PyArrow functions are generally faster than regular hand-written python functions, and therefore they are a good option to optimize data processing. You can use Arrow compute functions to process a dataset in [`Dataset.map`] or [`Dataset.filter`]: ```python >>> import pyarrow.compute as pc >>> from datasets import Dataset >>> data = {"col_0": ["a", "b", "c", "d"], "col_1": [0., 0., 1., 1.]} >>> ds = Dataset.from_dict(data) >>> ds = ds.with_format("arrow") >>> ds = ds.map(lambda t: t.append_column("col_2", pc.add(t["col_1"], 1)), batched=True) >>> ds[:2] pyarrow.Table col_0: string col_1: double col_2: double ---- col_0: [["a","b"]] col_1: [[0,0]] col_2: [[1,1]] >>> ds = ds.filter(lambda t: pc.equal(t["col_0"], "b"), batched=True) >>> ds[0] pyarrow.Table col_0: string col_1: double col_2: double ---- col_0: [["b"]] col_1: [[0]] col_2: [[1]] ``` We use `batched=True` because it is faster to process batches of data in PyArrow rather than row by row. It's also possible to use `batch_size=` in `map()` to set the size of each `table`. This also works for [`IterableDataset.map`] and [`IterableDataset.filter`]. ## Import or Export from PyArrow A [`Dataset`] is a wrapper of a PyArrow Table, you can instantiate a Dataset directly from the Table: ```python ds = Dataset(table) ``` You can access the PyArrow Table of a dataset using [`Dataset.data`], which returns a [`MemoryMappedTable`] or a [`InMemoryTable`] or a [`ConcatenationTable`], depending on the origin of the Arrow data and the operations that were applied. Those objects wrap the underlying PyArrow table accessible at `Dataset.data.table`. This table contains all the data of the dataset, but there might also be an indices mapping at `Dataset._indices` which maps the dataset rows indices to the PyArrow Table rows indices. This can happen if the dataset has been shuffled with [`Dataset.shuffle`] or if only a subset of the rows are used (e.g. after a [`Dataset.select`]). In the general case, you can export a dataset to a PyArrow Table using `table = ds.with_format("arrow")[:]`.
datasets/docs/source/use_with_pyarrow.mdx/0
{ "file_path": "datasets/docs/source/use_with_pyarrow.mdx", "repo_id": "datasets", "token_count": 1257 }
100
#!/usr/bin/env python from argparse import ArgumentParser from datasets.commands.delete_from_hub import DeleteFromHubCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def parse_unknown_args(unknown_args): return {key.lstrip("-"): value for key, value in zip(unknown_args[::2], unknown_args[1::2])} def main(): parser = ArgumentParser( "HuggingFace Datasets CLI tool", usage="datasets-cli <command> [<args>]", allow_abbrev=False ) commands_parser = parser.add_subparsers(help="datasets-cli command helpers") set_verbosity_info() # Register commands EnvironmentCommand.register_subcommand(commands_parser) TestCommand.register_subcommand(commands_parser) DeleteFromHubCommand.register_subcommand(commands_parser) # Parse args args, unknown_args = parser.parse_known_args() if not hasattr(args, "func"): parser.print_help() exit(1) kwargs = parse_unknown_args(unknown_args) # Run service = args.func(args, **kwargs) service.run() if __name__ == "__main__": main()
datasets/src/datasets/commands/datasets_cli.py/0
{ "file_path": "datasets/src/datasets/commands/datasets_cli.py", "repo_id": "datasets", "token_count": 411 }
101
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """This class handle features definition in datasets and some utilities to display table type.""" import copy import json import re import sys from collections.abc import Iterable, Mapping from collections.abc import Sequence as SequenceABC from collections.abc import Sequence as Sequence_ from dataclasses import InitVar, dataclass, field, fields from functools import reduce, wraps from operator import mul from typing import Any, Callable, ClassVar, Literal, Optional, Union import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc import pyarrow.types from pandas.api.extensions import ExtensionArray as PandasExtensionArray from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype from .. import config from ..naming import camelcase_to_snakecase, snakecase_to_camelcase from ..table import array_cast from ..utils import experimental, logging from ..utils.py_utils import asdict, first_non_null_value, zip_dict from .audio import Audio from .image import Image, encode_pil_image from .pdf import Pdf, encode_pdfplumber_pdf from .translation import Translation, TranslationVariableLanguages from .video import Video logger = logging.get_logger(__name__) def _arrow_to_datasets_dtype(arrow_type: pa.DataType) -> str: """ _arrow_to_datasets_dtype takes a pyarrow.DataType and converts it to a datasets string dtype. In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))` """ if pyarrow.types.is_null(arrow_type): return "null" elif pyarrow.types.is_boolean(arrow_type): return "bool" elif pyarrow.types.is_int8(arrow_type): return "int8" elif pyarrow.types.is_int16(arrow_type): return "int16" elif pyarrow.types.is_int32(arrow_type): return "int32" elif pyarrow.types.is_int64(arrow_type): return "int64" elif pyarrow.types.is_uint8(arrow_type): return "uint8" elif pyarrow.types.is_uint16(arrow_type): return "uint16" elif pyarrow.types.is_uint32(arrow_type): return "uint32" elif pyarrow.types.is_uint64(arrow_type): return "uint64" elif pyarrow.types.is_float16(arrow_type): return "float16" # pyarrow dtype is "halffloat" elif pyarrow.types.is_float32(arrow_type): return "float32" # pyarrow dtype is "float" elif pyarrow.types.is_float64(arrow_type): return "float64" # pyarrow dtype is "double" elif pyarrow.types.is_time32(arrow_type): return f"time32[{pa.type_for_alias(str(arrow_type)).unit}]" elif pyarrow.types.is_time64(arrow_type): return f"time64[{pa.type_for_alias(str(arrow_type)).unit}]" elif pyarrow.types.is_timestamp(arrow_type): if arrow_type.tz is None: return f"timestamp[{arrow_type.unit}]" elif arrow_type.tz: return f"timestamp[{arrow_type.unit}, tz={arrow_type.tz}]" else: raise ValueError(f"Unexpected timestamp object {arrow_type}.") elif pyarrow.types.is_date32(arrow_type): return "date32" # pyarrow dtype is "date32[day]" elif pyarrow.types.is_date64(arrow_type): return "date64" # pyarrow dtype is "date64[ms]" elif pyarrow.types.is_duration(arrow_type): return f"duration[{arrow_type.unit}]" elif pyarrow.types.is_decimal128(arrow_type): return f"decimal128({arrow_type.precision}, {arrow_type.scale})" elif pyarrow.types.is_decimal256(arrow_type): return f"decimal256({arrow_type.precision}, {arrow_type.scale})" elif pyarrow.types.is_binary(arrow_type): return "binary" elif pyarrow.types.is_large_binary(arrow_type): return "large_binary" elif pyarrow.types.is_string(arrow_type): return "string" elif pyarrow.types.is_large_string(arrow_type): return "large_string" elif pyarrow.types.is_dictionary(arrow_type): return _arrow_to_datasets_dtype(arrow_type.value_type) else: raise ValueError(f"Arrow type {arrow_type} does not have a datasets dtype equivalent.") def string_to_arrow(datasets_dtype: str) -> pa.DataType: """ string_to_arrow takes a datasets string dtype and converts it to a pyarrow.DataType. In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))` This is necessary because the datasets.Value() primitive type is constructed using a string dtype Value(dtype=str) But Features.type (via `get_nested_type()` expects to resolve Features into a pyarrow Schema, which means that each Value() must be able to resolve into a corresponding pyarrow.DataType, which is the purpose of this function. """ def _dtype_error_msg(dtype, pa_dtype, examples=None, urls=None): msg = f"{dtype} is not a validly formatted string representation of the pyarrow {pa_dtype} type." if examples: examples = ", ".join(examples[:-1]) + " or " + examples[-1] if len(examples) > 1 else examples[0] msg += f"\nValid examples include: {examples}." if urls: urls = ", ".join(urls[:-1]) + " and " + urls[-1] if len(urls) > 1 else urls[0] msg += f"\nFor more insformation, see: {urls}." return msg if datasets_dtype in pa.__dict__: return pa.__dict__[datasets_dtype]() if (datasets_dtype + "_") in pa.__dict__: return pa.__dict__[datasets_dtype + "_"]() timestamp_matches = re.search(r"^timestamp\[(.*)\]$", datasets_dtype) if timestamp_matches: timestamp_internals = timestamp_matches.group(1) internals_matches = re.search(r"^(s|ms|us|ns),\s*tz=([a-zA-Z0-9/_+\-:]*)$", timestamp_internals) if timestamp_internals in ["s", "ms", "us", "ns"]: return pa.timestamp(timestamp_internals) elif internals_matches: return pa.timestamp(internals_matches.group(1), internals_matches.group(2)) else: raise ValueError( _dtype_error_msg( datasets_dtype, "timestamp", examples=["timestamp[us]", "timestamp[us, tz=America/New_York"], urls=["https://arrow.apache.org/docs/python/generated/pyarrow.timestamp.html"], ) ) duration_matches = re.search(r"^duration\[(.*)\]$", datasets_dtype) if duration_matches: duration_internals = duration_matches.group(1) if duration_internals in ["s", "ms", "us", "ns"]: return pa.duration(duration_internals) else: raise ValueError( _dtype_error_msg( datasets_dtype, "duration", examples=["duration[s]", "duration[us]"], urls=["https://arrow.apache.org/docs/python/generated/pyarrow.duration.html"], ) ) time_matches = re.search(r"^time(.*)\[(.*)\]$", datasets_dtype) if time_matches: time_internals_bits = time_matches.group(1) if time_internals_bits == "32": time_internals_unit = time_matches.group(2) if time_internals_unit in ["s", "ms"]: return pa.time32(time_internals_unit) else: raise ValueError( f"{time_internals_unit} is not a valid unit for the pyarrow time32 type. Supported units: s (second) and ms (millisecond)." ) elif time_internals_bits == "64": time_internals_unit = time_matches.group(2) if time_internals_unit in ["us", "ns"]: return pa.time64(time_internals_unit) else: raise ValueError( f"{time_internals_unit} is not a valid unit for the pyarrow time64 type. Supported units: us (microsecond) and ns (nanosecond)." ) else: raise ValueError( _dtype_error_msg( datasets_dtype, "time", examples=["time32[s]", "time64[us]"], urls=[ "https://arrow.apache.org/docs/python/generated/pyarrow.time32.html", "https://arrow.apache.org/docs/python/generated/pyarrow.time64.html", ], ) ) decimal_matches = re.search(r"^decimal(.*)\((.*)\)$", datasets_dtype) if decimal_matches: decimal_internals_bits = decimal_matches.group(1) if decimal_internals_bits == "128": decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2)) if decimal_internals_precision_and_scale: precision = decimal_internals_precision_and_scale.group(1) scale = decimal_internals_precision_and_scale.group(2) return pa.decimal128(int(precision), int(scale)) else: raise ValueError( _dtype_error_msg( datasets_dtype, "decimal128", examples=["decimal128(10, 2)", "decimal128(4, -2)"], urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html"], ) ) elif decimal_internals_bits == "256": decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2)) if decimal_internals_precision_and_scale: precision = decimal_internals_precision_and_scale.group(1) scale = decimal_internals_precision_and_scale.group(2) return pa.decimal256(int(precision), int(scale)) else: raise ValueError( _dtype_error_msg( datasets_dtype, "decimal256", examples=["decimal256(30, 2)", "decimal256(38, -4)"], urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html"], ) ) else: raise ValueError( _dtype_error_msg( datasets_dtype, "decimal", examples=["decimal128(12, 3)", "decimal256(40, 6)"], urls=[ "https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html", "https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html", ], ) ) raise ValueError( f"Neither {datasets_dtype} nor {datasets_dtype + '_'} seems to be a pyarrow data type. " f"Please make sure to use a correct data type, see: " f"https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions" ) def _cast_to_python_objects(obj: Any, only_1d_for_numpy: bool, optimize_list_casting: bool) -> tuple[Any, bool]: """ Cast pytorch/tensorflow/pandas objects to python numpy array/lists. It works recursively. If `optimize_list_casting` is True, to avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted. If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same. This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example. Args: obj: the object (nested struct) to cast. only_1d_for_numpy (bool): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays. Indeed Arrow only support converting 1-dimensional array values. optimize_list_casting (bool): whether to optimize list casting by checking the first non-null element to see if it needs to be casted and if it doesn't, not checking the rest of the list elements. Returns: casted_obj: the casted object has_changed (bool): True if the object has been changed, False if it is identical """ if config.TF_AVAILABLE and "tensorflow" in sys.modules: import tensorflow as tf if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if config.JAX_AVAILABLE and "jax" in sys.modules: import jax.numpy as jnp if config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if config.PDFPLUMBER_AVAILABLE and "pdfplumber" in sys.modules: import pdfplumber if config.TORCHCODEC_AVAILABLE and "torchcodec" in sys.modules: from torchcodec.decoders import AudioDecoder, VideoDecoder if isinstance(obj, np.ndarray): if obj.ndim == 0: return obj[()], True elif not only_1d_for_numpy or obj.ndim == 1: return obj, False else: return ( [ _cast_to_python_objects( x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for x in obj ], True, ) elif config.TORCH_AVAILABLE and "torch" in sys.modules and isinstance(obj, torch.Tensor): if obj.dtype == torch.bfloat16: return _cast_to_python_objects( obj.detach().to(torch.float).cpu().numpy(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting, )[0], True if obj.ndim == 0: return obj.detach().cpu().numpy()[()], True elif not only_1d_for_numpy or obj.ndim == 1: return obj.detach().cpu().numpy(), True else: return ( [ _cast_to_python_objects( x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for x in obj.detach().cpu().numpy() ], True, ) elif config.TF_AVAILABLE and "tensorflow" in sys.modules and isinstance(obj, tf.Tensor): if obj.ndim == 0: return obj.numpy()[()], True elif not only_1d_for_numpy or obj.ndim == 1: return obj.numpy(), True else: return ( [ _cast_to_python_objects( x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for x in obj.numpy() ], True, ) elif config.JAX_AVAILABLE and "jax" in sys.modules and isinstance(obj, jnp.ndarray): if obj.ndim == 0: return np.asarray(obj)[()], True elif not only_1d_for_numpy or obj.ndim == 1: return np.asarray(obj), True else: return ( [ _cast_to_python_objects( x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for x in np.asarray(obj) ], True, ) elif config.PIL_AVAILABLE and "PIL" in sys.modules and isinstance(obj, PIL.Image.Image): return encode_pil_image(obj), True elif config.PDFPLUMBER_AVAILABLE and "pdfplumber" in sys.modules and isinstance(obj, pdfplumber.pdf.PDF): return encode_pdfplumber_pdf(obj), True elif isinstance(obj, pd.Series): return ( _cast_to_python_objects( obj.tolist(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0], True, ) elif isinstance(obj, pd.DataFrame): return ( { key: _cast_to_python_objects( value, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for key, value in obj.to_dict("series").items() }, True, ) elif isinstance(obj, pd.Timestamp): return obj.to_pydatetime(), True elif isinstance(obj, pd.Timedelta): return obj.to_pytimedelta(), True elif isinstance(obj, Mapping): has_changed = not isinstance(obj, dict) output = {} for k, v in obj.items(): casted_v, has_changed_v = _cast_to_python_objects( v, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting ) has_changed |= has_changed_v output[k] = casted_v return output if has_changed else obj, has_changed elif hasattr(obj, "__array__"): if np.isscalar(obj): return obj, False else: return ( _cast_to_python_objects( obj.__array__(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0], True, ) elif isinstance(obj, (list, tuple)): if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt): break casted_first_elmt, has_changed_first_elmt = _cast_to_python_objects( first_elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting ) if has_changed_first_elmt or not optimize_list_casting: return ( [ _cast_to_python_objects( elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for elmt in obj ], True, ) else: if isinstance(obj, (list, tuple)): return obj, False else: return list(obj), True else: return obj, False elif config.TORCHCODEC_AVAILABLE and "torchcodec" in sys.modules and isinstance(obj, VideoDecoder): v = Video() return v.encode_example(obj), True elif config.TORCHCODEC_AVAILABLE and "torchcodec" in sys.modules and isinstance(obj, AudioDecoder): a = Audio() return a.encode_example(obj), True else: return obj, False def cast_to_python_objects(obj: Any, only_1d_for_numpy=False, optimize_list_casting=True) -> Any: """ Cast numpy/pytorch/tensorflow/pandas objects to python lists. It works recursively. If `optimize_list_casting` is True, To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted. If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same. This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example. Args: obj: the object (nested struct) to cast only_1d_for_numpy (bool, default ``False``): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays. Indeed Arrow only support converting 1-dimensional array values. optimize_list_casting (bool, default ``True``): whether to optimize list casting by checking the first non-null element to see if it needs to be casted and if it doesn't, not checking the rest of the list elements. Returns: casted_obj: the casted object """ return _cast_to_python_objects( obj, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] @dataclass(repr=False) class Value: """ Scalar feature value of a particular data type. The possible dtypes of `Value` are as follows: - `null` - `bool` - `int8` - `int16` - `int32` - `int64` - `uint8` - `uint16` - `uint32` - `uint64` - `float16` - `float32` (alias float) - `float64` (alias double) - `time32[(s|ms)]` - `time64[(us|ns)]` - `timestamp[(s|ms|us|ns)]` - `timestamp[(s|ms|us|ns), tz=(tzstring)]` - `date32` - `date64` - `duration[(s|ms|us|ns)]` - `decimal128(precision, scale)` - `decimal256(precision, scale)` - `binary` - `large_binary` - `string` - `large_string` Args: dtype (`str`): Name of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'stars': Value('int32')}) >>> features {'stars': Value('int32')} ``` """ dtype: str id: Optional[str] = field(default=None, repr=False) # Automatically constructed pa_type: ClassVar[Any] = None _type: str = field(default="Value", init=False, repr=False) def __post_init__(self): if self.dtype == "double": # fix inferred type self.dtype = "float64" if self.dtype == "float": # fix inferred type self.dtype = "float32" self.pa_type = string_to_arrow(self.dtype) def __call__(self): return self.pa_type def encode_example(self, value): if pa.types.is_boolean(self.pa_type): return bool(value) elif pa.types.is_integer(self.pa_type): return int(value) elif pa.types.is_floating(self.pa_type): return float(value) elif pa.types.is_string(self.pa_type): return str(value) else: return value def __repr__(self): return f"{type(self).__name__}('{self.dtype}')" class _ArrayXD: def __post_init__(self): self.shape = tuple(self.shape) def __call__(self): pa_type = globals()[self.__class__.__name__ + "ExtensionType"](self.shape, self.dtype) return pa_type def encode_example(self, value): return value @dataclass class Array2D(_ArrayXD): """Create a two-dimensional array. Args: shape (`tuple`): Size of each dimension. dtype (`str`): Name of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array2D(shape=(1, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = field(default=None, repr=False) # Automatically constructed _type: str = field(default="Array2D", init=False, repr=False) @dataclass class Array3D(_ArrayXD): """Create a three-dimensional array. Args: shape (`tuple`): Size of each dimension. dtype (`str`): Name of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array3D(shape=(1, 2, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = field(default=None, repr=False) # Automatically constructed _type: str = field(default="Array3D", init=False, repr=False) @dataclass class Array4D(_ArrayXD): """Create a four-dimensional array. Args: shape (`tuple`): Size of each dimension. dtype (`str`): Name of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array4D(shape=(1, 2, 2, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = field(default=None, repr=False) # Automatically constructed _type: str = field(default="Array4D", init=False, repr=False) @dataclass class Array5D(_ArrayXD): """Create a five-dimensional array. Args: shape (`tuple`): Size of each dimension. dtype (`str`): Name of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array5D(shape=(1, 2, 2, 3, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = field(default=None, repr=False) # Automatically constructed _type: str = field(default="Array5D", init=False, repr=False) class _ArrayXDExtensionType(pa.ExtensionType): ndims: Optional[int] = None def __init__(self, shape: tuple, dtype: str): if self.ndims is None or self.ndims <= 1: raise ValueError("You must instantiate an array type with a value for dim that is > 1") if len(shape) != self.ndims: raise ValueError(f"shape={shape} and ndims={self.ndims} don't match") for dim in range(1, self.ndims): if shape[dim] is None: raise ValueError(f"Support only dynamic size on first dimension. Got: {shape}") self.shape = tuple(shape) self.value_type = dtype self.storage_dtype = self._generate_dtype(self.value_type) pa.ExtensionType.__init__(self, self.storage_dtype, f"{self.__class__.__module__}.{self.__class__.__name__}") def __arrow_ext_serialize__(self): return json.dumps((self.shape, self.value_type)).encode() @classmethod def __arrow_ext_deserialize__(cls, storage_type, serialized): args = json.loads(serialized) return cls(*args) # This was added to pa.ExtensionType in pyarrow >= 13.0.0 def __reduce__(self): return self.__arrow_ext_deserialize__, (self.storage_type, self.__arrow_ext_serialize__()) def __hash__(self): return hash((self.__class__, self.shape, self.value_type)) def __arrow_ext_class__(self): return ArrayExtensionArray def _generate_dtype(self, dtype): dtype = string_to_arrow(dtype) for d in reversed(self.shape): dtype = pa.list_(dtype) # Don't specify the size of the list, since fixed length list arrays have issues # being validated after slicing in pyarrow 0.17.1 return dtype def to_pandas_dtype(self): return PandasArrayExtensionDtype(self.value_type) class Array2DExtensionType(_ArrayXDExtensionType): ndims = 2 class Array3DExtensionType(_ArrayXDExtensionType): ndims = 3 class Array4DExtensionType(_ArrayXDExtensionType): ndims = 4 class Array5DExtensionType(_ArrayXDExtensionType): ndims = 5 # Register the extension types for deserialization pa.register_extension_type(Array2DExtensionType((1, 2), "int64")) pa.register_extension_type(Array3DExtensionType((1, 2, 3), "int64")) pa.register_extension_type(Array4DExtensionType((1, 2, 3, 4), "int64")) pa.register_extension_type(Array5DExtensionType((1, 2, 3, 4, 5), "int64")) def _is_zero_copy_only(pa_type: pa.DataType, unnest: bool = False) -> bool: """ When converting a pyarrow array to a numpy array, we must know whether this could be done in zero-copy or not. This function returns the value of the ``zero_copy_only`` parameter to pass to ``.to_numpy()``, given the type of the pyarrow array. # zero copy is available for all primitive types except booleans and temporal types (date, time, timestamp or duration) # primitive types are types for which the physical representation in arrow and in numpy # https://github.com/wesm/arrow/blob/c07b9b48cf3e0bbbab493992a492ae47e5b04cad/python/pyarrow/types.pxi#L821 # see https://arrow.apache.org/docs/python/generated/pyarrow.Array.html#pyarrow.Array.to_numpy # and https://issues.apache.org/jira/browse/ARROW-2871?jql=text%20~%20%22boolean%20to_numpy%22 """ def _unnest_pa_type(pa_type: pa.DataType) -> pa.DataType: if pa.types.is_list(pa_type): return _unnest_pa_type(pa_type.value_type) return pa_type if unnest: pa_type = _unnest_pa_type(pa_type) return pa.types.is_primitive(pa_type) and not (pa.types.is_boolean(pa_type) or pa.types.is_temporal(pa_type)) class ArrayExtensionArray(pa.ExtensionArray): def __array__(self): zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True) return self.to_numpy(zero_copy_only=zero_copy_only) def __getitem__(self, i): return self.storage[i] def to_numpy(self, zero_copy_only=True): storage: pa.ListArray = self.storage null_mask = storage.is_null().to_numpy(zero_copy_only=False) if self.type.shape[0] is not None: size = 1 null_indices = np.arange(len(storage))[null_mask] - np.arange(np.sum(null_mask)) for i in range(self.type.ndims): size *= self.type.shape[i] storage = storage.flatten() numpy_arr = storage.to_numpy(zero_copy_only=zero_copy_only) numpy_arr = numpy_arr.reshape(len(self) - len(null_indices), *self.type.shape) if len(null_indices): numpy_arr = np.insert(numpy_arr.astype(np.float64), null_indices, np.nan, axis=0) else: shape = self.type.shape ndims = self.type.ndims arrays = [] first_dim_offsets = np.array([off.as_py() for off in storage.offsets]) for i, is_null in enumerate(null_mask): if is_null: arrays.append(np.nan) else: storage_el = storage[i : i + 1] first_dim = first_dim_offsets[i + 1] - first_dim_offsets[i] # flatten storage for _ in range(ndims): storage_el = storage_el.flatten() numpy_arr = storage_el.to_numpy(zero_copy_only=zero_copy_only) arrays.append(numpy_arr.reshape(first_dim, *shape[1:])) if len(np.unique(np.diff(first_dim_offsets))) > 1: # ragged numpy_arr = np.empty(len(arrays), dtype=object) numpy_arr[:] = arrays else: numpy_arr = np.array(arrays) return numpy_arr def to_pylist(self, maps_as_pydicts: Optional[Literal["lossy", "strict"]] = None): zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True) numpy_arr = self.to_numpy(zero_copy_only=zero_copy_only) if self.type.shape[0] is None and numpy_arr.dtype == object: return [arr.tolist() for arr in numpy_arr.tolist()] else: return numpy_arr.tolist() class PandasArrayExtensionDtype(PandasExtensionDtype): _metadata = "value_type" def __init__(self, value_type: Union["PandasArrayExtensionDtype", np.dtype]): self._value_type = value_type def __from_arrow__(self, array: Union[pa.Array, pa.ChunkedArray]): if isinstance(array, pa.ChunkedArray): array = array.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in array.chunks])) zero_copy_only = _is_zero_copy_only(array.storage.type, unnest=True) numpy_arr = array.to_numpy(zero_copy_only=zero_copy_only) return PandasArrayExtensionArray(numpy_arr) @classmethod def construct_array_type(cls): return PandasArrayExtensionArray @property def type(self) -> type: return np.ndarray @property def kind(self) -> str: return "O" @property def name(self) -> str: return f"array[{self.value_type}]" @property def value_type(self) -> np.dtype: return self._value_type class PandasArrayExtensionArray(PandasExtensionArray): def __init__(self, data: np.ndarray, copy: bool = False): self._data = data if not copy else np.array(data) self._dtype = PandasArrayExtensionDtype(data.dtype) def __array__(self, dtype=None): """ Convert to NumPy Array. Note that Pandas expects a 1D array when dtype is set to object. But for other dtypes, the returned shape is the same as the one of ``data``. More info about pandas 1D requirement for PandasExtensionArray here: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.api.extensions.ExtensionArray.html#pandas.api.extensions.ExtensionArray """ if dtype == np.dtype(object): out = np.empty(len(self._data), dtype=object) for i in range(len(self._data)): out[i] = self._data[i] return out if dtype is None: return self._data else: return self._data.astype(dtype) def copy(self, deep: bool = False) -> "PandasArrayExtensionArray": return PandasArrayExtensionArray(self._data, copy=True) @classmethod def _from_sequence( cls, scalars, dtype: Optional[PandasArrayExtensionDtype] = None, copy: bool = False ) -> "PandasArrayExtensionArray": if len(scalars) > 1 and all( isinstance(x, np.ndarray) and x.shape == scalars[0].shape and x.dtype == scalars[0].dtype for x in scalars ): data = np.array(scalars, dtype=dtype if dtype is None else dtype.value_type, copy=copy) else: data = np.empty(len(scalars), dtype=object) data[:] = scalars return cls(data, copy=copy) @classmethod def _concat_same_type(cls, to_concat: Sequence_["PandasArrayExtensionArray"]) -> "PandasArrayExtensionArray": if len(to_concat) > 1 and all( va._data.shape == to_concat[0]._data.shape and va._data.dtype == to_concat[0]._data.dtype for va in to_concat ): data = np.vstack([va._data for va in to_concat]) else: data = np.empty(len(to_concat), dtype=object) data[:] = [va._data for va in to_concat] return cls(data, copy=False) @property def dtype(self) -> PandasArrayExtensionDtype: return self._dtype @property def nbytes(self) -> int: return self._data.nbytes def isna(self) -> np.ndarray: return np.array([pd.isna(arr).any() for arr in self._data]) def __setitem__(self, key: Union[int, slice, np.ndarray], value: Any) -> None: raise NotImplementedError() def __getitem__(self, item: Union[int, slice, np.ndarray]) -> Union[np.ndarray, "PandasArrayExtensionArray"]: if isinstance(item, int): return self._data[item] return PandasArrayExtensionArray(self._data[item], copy=False) def take( self, indices: Sequence_[int], allow_fill: bool = False, fill_value: bool = None ) -> "PandasArrayExtensionArray": indices: np.ndarray = np.asarray(indices, dtype=int) if allow_fill: fill_value = ( self.dtype.na_value if fill_value is None else np.asarray(fill_value, dtype=self.dtype.value_type) ) mask = indices == -1 if (indices < -1).any(): raise ValueError("Invalid value in `indices`, must be all >= -1 for `allow_fill` is True") elif len(self) > 0: pass elif not np.all(mask): raise IndexError("Invalid take for empty PandasArrayExtensionArray, must be all -1.") else: data = np.array([fill_value] * len(indices), dtype=self.dtype.value_type) return PandasArrayExtensionArray(data, copy=False) took = self._data.take(indices, axis=0) if allow_fill and mask.any(): took[mask] = [fill_value] * np.sum(mask) return PandasArrayExtensionArray(took, copy=False) def __len__(self) -> int: return len(self._data) def __eq__(self, other) -> np.ndarray: if not isinstance(other, PandasArrayExtensionArray): raise NotImplementedError(f"Invalid type to compare to: {type(other)}") return (self._data == other._data).all() def pandas_types_mapper(dtype): if isinstance(dtype, _ArrayXDExtensionType): return PandasArrayExtensionDtype(dtype.value_type) @dataclass class ClassLabel: """Feature type for integer class labels. There are 3 ways to define a `ClassLabel`, which correspond to the 3 arguments: * `num_classes`: Create 0 to (num_classes-1) labels. * `names`: List of label strings. * `names_file`: File containing the list of labels. Under the hood the labels are stored as integers. You can use negative integers to represent unknown/missing labels. Args: num_classes (`int`, *optional*): Number of classes. All labels must be < `num_classes`. names (`list` of `str`, *optional*): String names for the integer classes. The order in which the names are provided is kept. names_file (`str`, *optional*): Path to a file with names for the integer classes, one per line. Example: ```py >>> from datasets import Features, ClassLabel >>> features = Features({'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'])}) >>> features {'label': ClassLabel(names=['bad', 'ok', 'good'])} ``` """ num_classes: InitVar[Optional[int]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict names: list[str] = None names_file: InitVar[Optional[str]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict id: Optional[str] = field(default=None, repr=False) # Automatically constructed dtype: ClassVar[str] = "int64" pa_type: ClassVar[Any] = pa.int64() _str2int: ClassVar[dict[str, int]] = None _int2str: ClassVar[dict[int, int]] = None _type: str = field(default="ClassLabel", init=False, repr=False) def __post_init__(self, num_classes, names_file): self.num_classes = num_classes self.names_file = names_file if self.names_file is not None and self.names is not None: raise ValueError("Please provide either names or names_file but not both.") # Set self.names if self.names is None: if self.names_file is not None: self.names = self._load_names_from_file(self.names_file) elif self.num_classes is not None: self.names = [str(i) for i in range(self.num_classes)] else: raise ValueError("Please provide either num_classes, names or names_file.") elif not isinstance(self.names, SequenceABC): raise TypeError(f"Please provide names as a list, is {type(self.names)}") # Set self.num_classes if self.num_classes is None: self.num_classes = len(self.names) elif self.num_classes != len(self.names): raise ValueError( "ClassLabel number of names do not match the defined num_classes. " f"Got {len(self.names)} names VS {self.num_classes} num_classes" ) # Prepare mappings self._int2str = [str(name) for name in self.names] self._str2int = {name: i for i, name in enumerate(self._int2str)} if len(self._int2str) != len(self._str2int): raise ValueError("Some label names are duplicated. Each label name should be unique.") def __call__(self): return self.pa_type def str2int(self, values: Union[str, Iterable]) -> Union[int, Iterable]: """Conversion class name `string` => `integer`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes", split="train") >>> ds.features["label"].str2int('neg') 0 ``` """ if not isinstance(values, str) and not isinstance(values, Iterable): raise ValueError( f"Values {values} should be a string or an Iterable (list, numpy array, pytorch, tensorflow tensors)" ) return_list = True if isinstance(values, str): values = [values] return_list = False output = [self._strval2int(value) for value in values] return output if return_list else output[0] def _strval2int(self, value: str) -> int: failed_parse = False value = str(value) # first attempt - raw string value int_value = self._str2int.get(value) if int_value is None: # second attempt - strip whitespace int_value = self._str2int.get(value.strip()) if int_value is None: # third attempt - convert str to int try: int_value = int(value) except ValueError: failed_parse = True else: if int_value < -1 or int_value >= self.num_classes: failed_parse = True if failed_parse: raise ValueError(f"Invalid string class label {value}") return int_value def int2str(self, values: Union[int, Iterable]) -> Union[str, Iterable]: """Conversion `integer` => class name `string`. Regarding unknown/missing labels: passing negative integers raises `ValueError`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes", split="train") >>> ds.features["label"].int2str(0) 'neg' ``` """ if not isinstance(values, int) and not isinstance(values, Iterable): raise ValueError( f"Values {values} should be an integer or an Iterable (list, numpy array, pytorch, tensorflow tensors)" ) return_list = True if isinstance(values, int): values = [values] return_list = False for v in values: if not 0 <= v < self.num_classes: raise ValueError(f"Invalid integer class label {v:d}") output = [self._int2str[int(v)] for v in values] return output if return_list else output[0] def encode_example(self, example_data): if self.num_classes is None: raise ValueError( "Trying to use ClassLabel feature with undefined number of class. " "Please set ClassLabel.names or num_classes." ) # If a string is given, convert to associated integer if isinstance(example_data, str): example_data = self.str2int(example_data) # Allowing -1 to mean no label. if not -1 <= example_data < self.num_classes: raise ValueError(f"Class label {example_data:d} greater than configured num_classes {self.num_classes}") return example_data def cast_storage(self, storage: Union[pa.StringArray, pa.IntegerArray]) -> pa.Int64Array: """Cast an Arrow array to the `ClassLabel` arrow storage type. The Arrow types that can be converted to the `ClassLabel` pyarrow storage type are: - `pa.string()` - `pa.int()` Args: storage (`Union[pa.StringArray, pa.IntegerArray]`): PyArrow array to cast. Returns: `pa.Int64Array`: Array in the `ClassLabel` arrow storage type. """ if isinstance(storage, pa.IntegerArray) and len(storage) > 0: min_max = pc.min_max(storage).as_py() if min_max["max"] is not None and min_max["max"] >= self.num_classes: raise ValueError( f"Class label {min_max['max']} greater than configured num_classes {self.num_classes}" ) elif isinstance(storage, pa.StringArray): storage = pa.array( [self._strval2int(label) if label is not None else None for label in storage.to_pylist()] ) return array_cast(storage, self.pa_type) @staticmethod def _load_names_from_file(names_filepath): with open(names_filepath, encoding="utf-8") as f: return [name.strip() for name in f.read().split("\n") if name.strip()] # Filter empty names class Sequence: """ A `Sequence` is a utility that automatically converts internal dictionary feature into a dictionary of lists. This behavior is implemented to have a compatibility layer with the TensorFlow Datasets library but may be un-wanted in some cases. If you don't want this behavior, you can use a [`List`] or a [`LargeList`] instead of the [`Sequence`]. Args: feature ([`FeatureType`]): Child feature data type of each item within the large list. length (optional `int`, default to -1): Length of the list if it is fixed. Defaults to -1 which means an arbitrary length. Returns: [`List`] of the specified feature, except `dict` of sub-features which are converted to `dict` of lists of sub-features for compatibility with TFDS. """ def __new__(cls, feature=None, length=-1, **kwargs): # useful to still get isinstance(Sequence(Value("int64")), Sequence) if ( cls is Sequence and isinstance(feature, dict) and any(not isinstance(subfeature, List) for subfeature in feature.values()) ): out = {key: List(value, length=length, **kwargs) for key, value in feature.items()} else: out = super().__new__(List) return out @dataclass(repr=False) class List(Sequence): """Feature type for large list data composed of child feature data type. It is backed by `pyarrow.ListType`, which uses 32-bit offsets or a fixed length. Args: feature ([`FeatureType`]): Child feature data type of each item within the large list. length (optional `int`, default to -1): Length of the list if it is fixed. Defaults to -1 which means an arbitrary length. """ feature: Any length: int = -1 id: Optional[str] = field(default=None, repr=False) # Automatically constructed pa_type: ClassVar[Any] = None _type: str = field(default="List", init=False, repr=False) def __repr__(self): if self.length != -1: return f"{type(self).__name__}({self.feature}, length={self.length})" else: return f"{type(self).__name__}({self.feature})" @dataclass(repr=False) class LargeList: """Feature type for large list data composed of child feature data type. It is backed by `pyarrow.LargeListType`, which is like `pyarrow.ListType` but with 64-bit rather than 32-bit offsets. Args: feature ([`FeatureType`]): Child feature data type of each item within the large list. """ feature: Any id: Optional[str] = field(default=None, repr=False) # Automatically constructed pa_type: ClassVar[Any] = None _type: str = field(default="LargeList", init=False, repr=False) def __repr__(self): return f"{type(self).__name__}({self.feature})" FeatureType = Union[ dict, list, tuple, Value, ClassLabel, Translation, TranslationVariableLanguages, LargeList, List, Array2D, Array3D, Array4D, Array5D, Audio, Image, Video, Pdf, ] def _check_non_null_non_empty_recursive(obj, schema: Optional[FeatureType] = None) -> bool: """ Check if the object is not None. If the object is a list or a tuple, recursively check the first element of the sequence and stop if at any point the first element is not a sequence or is an empty sequence. """ if obj is None: return False elif isinstance(obj, (list, tuple)) and (schema is None or isinstance(schema, (list, tuple, LargeList, List))): if len(obj) > 0: if schema is None: pass elif isinstance(schema, (list, tuple)): schema = schema[0] else: schema = schema.feature return _check_non_null_non_empty_recursive(obj[0], schema) else: return False else: return True def get_nested_type(schema: FeatureType) -> pa.DataType: """ get_nested_type() converts a datasets.FeatureType into a pyarrow.DataType, and acts as the inverse of generate_from_arrow_type(). It performs double-duty as the implementation of Features.type and handles the conversion of datasets.Feature->pa.struct """ # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, Features): return pa.struct( {key: get_nested_type(schema[key]) for key in schema} ) # Features is subclass of dict, and dict order is deterministic since Python 3.6 elif isinstance(schema, dict): return pa.struct( {key: get_nested_type(schema[key]) for key in schema} ) # however don't sort on struct types since the order matters elif isinstance(schema, (list, tuple)): if len(schema) != 1: raise ValueError("When defining list feature, you should just provide one example of the inner type") value_type = get_nested_type(schema[0]) return pa.list_(value_type) elif isinstance(schema, LargeList): value_type = get_nested_type(schema.feature) return pa.large_list(value_type) elif isinstance(schema, List): value_type = get_nested_type(schema.feature) return pa.list_(value_type, schema.length) # Other objects are callable which returns their data type (ClassLabel, Array2D, Translation, Arrow datatype creation methods) return schema() def encode_nested_example(schema, obj, level=0): """Encode a nested example. This is used since some features (in particular ClassLabel) have some logic during encoding. To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be encoded. If the first element needs to be encoded, then all the elements of the list will be encoded, otherwise they'll stay the same. """ # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, dict): if level == 0 and obj is None: raise ValueError("Got None but expected a dictionary instead") return ( {k: encode_nested_example(schema[k], obj.get(k), level=level + 1) for k in schema} if obj is not None else None ) elif isinstance(schema, (LargeList, List)): if obj is None: return None else: if len(obj) > 0: sub_schema = schema.feature for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, sub_schema): break try: changed = bool(encode_nested_example(sub_schema, first_elmt, level=level + 1) != first_elmt) except ValueError: # can happen when comparing arrays changed = False if changed: return [encode_nested_example(sub_schema, o, level=level + 1) for o in obj] return list(obj) # Object with special encoding: # ClassLabel will convert from string to int, TranslationVariableLanguages does some checks elif hasattr(schema, "encode_example"): return schema.encode_example(obj) if obj is not None else None # Other object should be directly convertible to a native Arrow type (like Translation and Translation) return obj def decode_nested_example(schema, obj, token_per_repo_id: Optional[dict[str, Union[str, bool, None]]] = None): """Decode a nested example. This is used since some features (in particular Audio and Image) have some logic during decoding. To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be decoded. If the first element needs to be decoded, then all the elements of the list will be decoded, otherwise they'll stay the same. """ # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, dict): return ( {k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in zip_dict(schema, obj)} if obj is not None else None ) elif isinstance(schema, (list, tuple)): sub_schema = schema[0] if obj is None: return None else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, sub_schema): break if decode_nested_example(sub_schema, first_elmt) != first_elmt: return [decode_nested_example(sub_schema, o) for o in obj] return list(obj) elif isinstance(schema, (LargeList, List)): if obj is None: return None else: sub_schema = schema.feature if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, sub_schema): break if decode_nested_example(sub_schema, first_elmt) != first_elmt: return [decode_nested_example(sub_schema, o) for o in obj] return list(obj) # Object with special decoding: elif hasattr(schema, "decode_example") and getattr(schema, "decode", True): # we pass the token to read and decode files from private repositories in streaming mode return schema.decode_example(obj, token_per_repo_id=token_per_repo_id) if obj is not None else None return obj _FEATURE_TYPES: dict[str, FeatureType] = { Value.__name__: Value, ClassLabel.__name__: ClassLabel, Translation.__name__: Translation, TranslationVariableLanguages.__name__: TranslationVariableLanguages, LargeList.__name__: LargeList, List.__name__: List, Array2D.__name__: Array2D, Array3D.__name__: Array3D, Array4D.__name__: Array4D, Array5D.__name__: Array5D, Audio.__name__: Audio, Image.__name__: Image, Video.__name__: Video, Pdf.__name__: Pdf, } @experimental def register_feature( feature_cls: type, feature_type: str, ): """ Register a Feature object using a name and class. This function must be used on a Feature class. """ if feature_type in _FEATURE_TYPES: logger.warning( f"Overwriting feature type '{feature_type}' ({_FEATURE_TYPES[feature_type].__name__} -> {feature_cls.__name__})" ) _FEATURE_TYPES[feature_type] = feature_cls def generate_from_dict(obj: Any): """Regenerate the nested feature object from a deserialized dict. We use the '_type' fields to get the dataclass name to load. generate_from_dict is the recursive helper for Features.from_dict, and allows for a convenient constructor syntax to define features from deserialized JSON dictionaries. This function is used in particular when deserializing a :class:`DatasetInfo` that was dumped to a JSON object. This acts as an analogue to :meth:`Features.from_arrow_schema` and handles the recursive field-by-field instantiation, but doesn't require any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes that :class:`Value` automatically performs. """ # Nested structures: we allow dict, list/tuples, sequences if isinstance(obj, list): return [generate_from_dict(value) for value in obj] # Otherwise we have a dict or a dataclass if "_type" not in obj or isinstance(obj["_type"], dict): return {key: generate_from_dict(value) for key, value in obj.items()} obj = dict(obj) _type = obj.pop("_type") class_type = _FEATURE_TYPES.get(_type, None) or globals().get(_type, None) if class_type is None: raise ValueError(f"Feature type '{_type}' not found. Available feature types: {list(_FEATURE_TYPES.keys())}") if class_type == LargeList: feature = obj.pop("feature") return LargeList(generate_from_dict(feature), **obj) if class_type == List: feature = obj.pop("feature") return List(generate_from_dict(feature), **obj) if class_type == Sequence: # backward compatibility, this translates to a List or a dict feature = obj.pop("feature") return Sequence(feature=generate_from_dict(feature), **obj) field_names = {f.name for f in fields(class_type)} return class_type(**{k: v for k, v in obj.items() if k in field_names}) def generate_from_arrow_type(pa_type: pa.DataType) -> FeatureType: """ generate_from_arrow_type accepts an arrow DataType and returns a datasets FeatureType to be used as the type for a single field. This is the high-level arrow->datasets type conversion and is inverted by get_nested_type(). This operates at the individual *field* level, whereas Features.from_arrow_schema() operates at the full schema level and holds the methods that represent the bijection from Features<->pyarrow.Schema """ if isinstance(pa_type, pa.StructType): return {field.name: generate_from_arrow_type(field.type) for field in pa_type} elif isinstance(pa_type, pa.FixedSizeListType): return List(generate_from_arrow_type(pa_type.value_type), length=pa_type.list_size) elif isinstance(pa_type, pa.ListType): return List(generate_from_arrow_type(pa_type.value_type)) elif isinstance(pa_type, pa.LargeListType): return LargeList(generate_from_arrow_type(pa_type.value_type)) elif isinstance(pa_type, _ArrayXDExtensionType): array_feature = [None, None, Array2D, Array3D, Array4D, Array5D][pa_type.ndims] return array_feature(shape=pa_type.shape, dtype=pa_type.value_type) elif isinstance(pa_type, pa.DataType): return Value(dtype=_arrow_to_datasets_dtype(pa_type)) else: raise ValueError(f"Cannot convert {pa_type} to a Feature type.") def numpy_to_pyarrow_listarray(arr: np.ndarray, type: pa.DataType = None) -> pa.ListArray: """Build a PyArrow ListArray from a multidimensional NumPy array""" arr = np.array(arr) values = pa.array(arr.flatten(), type=type) for i in range(arr.ndim - 1): n_offsets = reduce(mul, arr.shape[: arr.ndim - i - 1], 1) step_offsets = arr.shape[arr.ndim - i - 1] offsets = pa.array(np.arange(n_offsets + 1) * step_offsets, type=pa.int32()) values = pa.ListArray.from_arrays(offsets, values) return values def list_of_pa_arrays_to_pyarrow_listarray(l_arr: list[Optional[pa.Array]]) -> pa.ListArray: null_mask = np.array([arr is None for arr in l_arr]) null_indices = np.arange(len(null_mask))[null_mask] - np.arange(np.sum(null_mask)) l_arr = [arr for arr in l_arr if arr is not None] offsets = np.cumsum( [0] + [len(arr) for arr in l_arr], dtype=object ) # convert to dtype object to allow None insertion offsets = np.insert(offsets, null_indices, None) offsets = pa.array(offsets, type=pa.int32()) values = pa.concat_arrays(l_arr) return pa.ListArray.from_arrays(offsets, values) def list_of_np_array_to_pyarrow_listarray(l_arr: list[np.ndarray], type: pa.DataType = None) -> pa.ListArray: """Build a PyArrow ListArray from a possibly nested list of NumPy arrays""" if len(l_arr) > 0: return list_of_pa_arrays_to_pyarrow_listarray( [numpy_to_pyarrow_listarray(arr, type=type) if arr is not None else None for arr in l_arr] ) else: return pa.array([], type=type) def contains_any_np_array(data: Any): """Return `True` if data is a NumPy ndarray or (recursively) if first non-null value in list is a NumPy ndarray. Args: data (Any): Data. Returns: bool """ if isinstance(data, np.ndarray): return True elif isinstance(data, list): return contains_any_np_array(first_non_null_value(data)[1]) else: return False def any_np_array_to_pyarrow_listarray(data: Union[np.ndarray, list], type: pa.DataType = None) -> pa.ListArray: """Convert to PyArrow ListArray either a NumPy ndarray or (recursively) a list that may contain any NumPy ndarray. Args: data (Union[np.ndarray, List]): Data. type (pa.DataType): Explicit PyArrow DataType passed to coerce the ListArray data type. Returns: pa.ListArray """ if isinstance(data, np.ndarray): return numpy_to_pyarrow_listarray(data, type=type) elif isinstance(data, list): return list_of_pa_arrays_to_pyarrow_listarray([any_np_array_to_pyarrow_listarray(i, type=type) for i in data]) def to_pyarrow_listarray(data: Any, pa_type: _ArrayXDExtensionType) -> pa.Array: """Convert to PyArrow ListArray. Args: data (Any): List, iterable, np.ndarray or pd.Series. pa_type (_ArrayXDExtensionType): Any of the ArrayNDExtensionType. Returns: pyarrow.Array """ if contains_any_np_array(data): return any_np_array_to_pyarrow_listarray(data, type=pa_type.value_type) else: return pa.array(data, pa_type.storage_dtype) def _visit(feature: FeatureType, func: Callable[[FeatureType], Optional[FeatureType]]) -> FeatureType: """Visit a (possibly nested) feature. Args: feature (FeatureType): the feature type to be checked Returns: visited feature (FeatureType) """ if isinstance(feature, Features): out = func(Features({k: _visit(f, func) for k, f in feature.items()})) elif isinstance(feature, dict): out = func({k: _visit(f, func) for k, f in feature.items()}) elif isinstance(feature, LargeList): out = func(LargeList(_visit(feature.feature, func))) elif isinstance(feature, List): out = func(List(_visit(feature.feature, func), length=feature.length)) else: out = func(feature) return feature if out is None else out _VisitPath = list[Union[str, Literal[0]]] def _visit_with_path( feature: FeatureType, func: Callable[[FeatureType, _VisitPath], Optional[FeatureType]], visit_path: _VisitPath = [] ) -> FeatureType: """Visit a (possibly nested) feature with its path in the Feature object. A path in a nested feature object is the list of keys that need to be sequentially accessed to get to the sub-feature. For example: - ["foo"] corresponds to the column "foo" - ["foo", 0] corresponds to the sub-feature of the lists in "foo" - ["foo", "bar"] corresponds to the sub-feature of the dicts in "foo" with key "bar" Args: feature (`FeatureType`): the feature type to be checked. Returns: `FeatureType`: the visited feature. """ if isinstance(feature, Features): out = func(Features({k: _visit_with_path(f, func, visit_path + [k]) for k, f in feature.items()}), visit_path) elif isinstance(feature, dict): out = func({k: _visit_with_path(f, func, visit_path + [k]) for k, f in feature.items()}, visit_path) elif isinstance(feature, List): out = func(List(_visit_with_path(feature.feature, func, visit_path + [0]), length=feature.length), visit_path) elif isinstance(feature, LargeList): out = func(LargeList(_visit_with_path(feature.feature, func, visit_path + [0])), visit_path) else: out = func(feature, visit_path) return feature if out is None else out def require_decoding(feature: FeatureType, ignore_decode_attribute: bool = False) -> bool: """Check if a (possibly nested) feature requires decoding. Args: feature (FeatureType): the feature type to be checked ignore_decode_attribute (:obj:`bool`, default ``False``): Whether to ignore the current value of the `decode` attribute of the decodable feature types. Returns: :obj:`bool` """ if isinstance(feature, dict): return any(require_decoding(f) for f in feature.values()) elif isinstance(feature, (list, tuple)): return require_decoding(feature[0]) elif isinstance(feature, LargeList): return require_decoding(feature.feature) elif isinstance(feature, List): return require_decoding(feature.feature) else: return hasattr(feature, "decode_example") and ( getattr(feature, "decode", True) if not ignore_decode_attribute else True ) def require_storage_cast(feature: FeatureType) -> bool: """Check if a (possibly nested) feature requires storage casting. Args: feature (FeatureType): the feature type to be checked Returns: :obj:`bool` """ if isinstance(feature, dict): return any(require_storage_cast(f) for f in feature.values()) elif isinstance(feature, LargeList): return require_storage_cast(feature.feature) elif isinstance(feature, List): return require_storage_cast(feature.feature) else: return hasattr(feature, "cast_storage") def require_storage_embed(feature: FeatureType) -> bool: """Check if a (possibly nested) feature requires embedding data into storage. Args: feature (FeatureType): the feature type to be checked Returns: :obj:`bool` """ if isinstance(feature, dict): return any(require_storage_cast(f) for f in feature.values()) elif isinstance(feature, LargeList): return require_storage_cast(feature.feature) elif isinstance(feature, List): return require_storage_cast(feature.feature) else: return hasattr(feature, "embed_storage") def keep_features_dicts_synced(func): """ Wrapper to keep the secondary dictionary, which tracks whether keys are decodable, of the :class:`datasets.Features` object in sync with the main dictionary. """ @wraps(func) def wrapper(*args, **kwargs): if args: self: "Features" = args[0] args = args[1:] else: self: "Features" = kwargs.pop("self") out = func(self, *args, **kwargs) assert hasattr(self, "_column_requires_decoding") self._column_requires_decoding = {col: require_decoding(feature) for col, feature in self.items()} return out wrapper._decorator_name_ = "_keep_dicts_synced" return wrapper class Features(dict): """A special dictionary that defines the internal structure of a dataset. Instantiated with a dictionary of type `dict[str, FieldType]`, where keys are the desired column names, and values are the type of that column. `FieldType` can be one of the following: - [`Value`] feature specifies a single data type value, e.g. `int64` or `string`. - [`ClassLabel`] feature specifies a predefined set of classes which can have labels associated to them and will be stored as integers in the dataset. - Python `dict` specifies a composite feature containing a mapping of sub-fields to sub-features. It's possible to have nested fields of nested fields in an arbitrary manner. - [`List`] or [`LargeList`] specifies a composite feature containing a sequence of sub-features, all of the same feature type. - [`Array2D`], [`Array3D`], [`Array4D`] or [`Array5D`] feature for multidimensional arrays. - [`Audio`] feature to store the absolute path to an audio file or a dictionary with the relative path to an audio file ("path" key) and its bytes content ("bytes" key). This feature loads the audio lazily with a decoder. - [`Image`] feature to store the absolute path to an image file, an `np.ndarray` object, a `PIL.Image.Image` object or a dictionary with the relative path to an image file ("path" key) and its bytes content ("bytes" key). This feature extracts the image data. - [`Video`] feature to store the absolute path to a video file, a `torchcodec.decoders.VideoDecoder` object or a dictionary with the relative path to a video file ("path" key) and its bytes content ("bytes" key). This feature loads the video lazily with a decoder. - [`Pdf`] feature to store the absolute path to a PDF file, a `pdfplumber.pdf.PDF` object or a dictionary with the relative path to a PDF file ("path" key) and its bytes content ("bytes" key). This feature loads the PDF lazily with a PDF reader. - [`Translation`] or [`TranslationVariableLanguages`] feature specific to Machine Translation. """ def __init__(*args, **kwargs): # self not in the signature to allow passing self as a kwarg if not args: raise TypeError("descriptor '__init__' of 'Features' object needs an argument") self, *args = args super(Features, self).__init__(*args, **kwargs) # keep track of columns which require decoding self._column_requires_decoding: dict[str, bool] = { col: require_decoding(feature) for col, feature in self.items() } # backward compatibility with datasets<4 : [feature] -> List(feature) def _check_old_list(feature): if isinstance(feature, list): return List(_visit(feature[0], _check_old_list)) return feature for column_name, feature in self.items(): self[column_name] = _visit(feature, _check_old_list) __setitem__ = keep_features_dicts_synced(dict.__setitem__) __delitem__ = keep_features_dicts_synced(dict.__delitem__) update = keep_features_dicts_synced(dict.update) setdefault = keep_features_dicts_synced(dict.setdefault) pop = keep_features_dicts_synced(dict.pop) popitem = keep_features_dicts_synced(dict.popitem) clear = keep_features_dicts_synced(dict.clear) def __reduce__(self): return Features, (dict(self),) @property def type(self): """ Features field types. Returns: :obj:`pyarrow.DataType` """ return get_nested_type(self) @property def arrow_schema(self): """ Features schema. Returns: :obj:`pyarrow.Schema` """ hf_metadata = {"info": {"features": self.to_dict()}} return pa.schema(self.type).with_metadata({"huggingface": json.dumps(hf_metadata)}) @classmethod def from_arrow_schema(cls, pa_schema: pa.Schema) -> "Features": """ Construct [`Features`] from Arrow Schema. It also checks the schema metadata for Hugging Face Datasets features. Non-nullable fields are not supported and set to nullable. Also, pa.dictionary is not supported and it uses its underlying type instead. Therefore datasets convert DictionaryArray objects to their actual values. Args: pa_schema (`pyarrow.Schema`): Arrow Schema. Returns: [`Features`] """ # try to load features from the arrow schema metadata metadata_features = Features() if pa_schema.metadata is not None and b"huggingface" in pa_schema.metadata: metadata = json.loads(pa_schema.metadata[b"huggingface"].decode()) if "info" in metadata and "features" in metadata["info"] and metadata["info"]["features"] is not None: metadata_features = Features.from_dict(metadata["info"]["features"]) metadata_features_schema = metadata_features.arrow_schema obj = { field.name: ( metadata_features[field.name] if field.name in metadata_features and metadata_features_schema.field(field.name) == field else generate_from_arrow_type(field.type) ) for field in pa_schema } return cls(**obj) @classmethod def from_dict(cls, dic) -> "Features": """ Construct [`Features`] from dict. Regenerate the nested feature object from a deserialized dict. We use the `_type` key to infer the dataclass name of the feature `FieldType`. It allows for a convenient constructor syntax to define features from deserialized JSON dictionaries. This function is used in particular when deserializing a [`DatasetInfo`] that was dumped to a JSON object. This acts as an analogue to [`Features.from_arrow_schema`] and handles the recursive field-by-field instantiation, but doesn't require any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes that [`Value`] automatically performs. Args: dic (`dict[str, Any]`): Python dictionary. Returns: `Features` Example:: >>> Features.from_dict({'_type': {'dtype': 'string', 'id': None, '_type': 'Value'}}) {'_type': Value('string')} """ obj = generate_from_dict(dic) return cls(**obj) def to_dict(self): return asdict(self) def _to_yaml_list(self) -> list: # we compute the YAML list from the dict representation that is used for JSON dump yaml_data = self.to_dict() def simplify(feature: dict) -> dict: if not isinstance(feature, dict): raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}") for list_type in ["large_list", "list", "sequence"]: # # list_type: -> list_type: int32 # dtype: int32 -> # if isinstance(feature.get(list_type), dict) and list(feature[list_type]) == ["dtype"]: feature[list_type] = feature[list_type]["dtype"] # # list_type: -> list_type: # struct: -> - name: foo # - name: foo -> dtype: int32 # dtype: int32 -> # if isinstance(feature.get(list_type), dict) and list(feature[list_type]) == ["struct"]: feature[list_type] = feature[list_type]["struct"] # # class_label: -> class_label: # names: -> names: # - negative -> '0': negative # - positive -> '1': positive # if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), list): # server-side requirement: keys must be strings feature["class_label"]["names"] = { str(label_id): label_name for label_id, label_name in enumerate(feature["class_label"]["names"]) } return feature def to_yaml_inner(obj: Union[dict, list]) -> dict: if isinstance(obj, dict): _type = obj.pop("_type", None) if _type == "LargeList": _feature = obj.pop("feature") return simplify({"large_list": to_yaml_inner(_feature), **obj}) elif _type == "List": _feature = obj.pop("feature") return simplify({"list": to_yaml_inner(_feature), **obj}) elif _type == "Value": return obj elif _type and not obj: return {"dtype": camelcase_to_snakecase(_type)} elif _type: return {"dtype": simplify({camelcase_to_snakecase(_type): obj})} else: return {"struct": [{"name": name, **to_yaml_inner(_feature)} for name, _feature in obj.items()]} elif isinstance(obj, list): return simplify({"list": simplify(to_yaml_inner(obj[0]))}) elif isinstance(obj, tuple): return to_yaml_inner(list(obj)) else: raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}") def to_yaml_types(obj: dict) -> dict: if isinstance(obj, dict): return {k: to_yaml_types(v) for k, v in obj.items()} elif isinstance(obj, list): return [to_yaml_types(v) for v in obj] elif isinstance(obj, tuple): return to_yaml_types(list(obj)) else: return obj return to_yaml_types(to_yaml_inner(yaml_data)["struct"]) @classmethod def _from_yaml_list(cls, yaml_data: list) -> "Features": yaml_data = copy.deepcopy(yaml_data) # we convert the list obtained from YAML data into the dict representation that is used for JSON dump def unsimplify(feature: dict) -> dict: if not isinstance(feature, dict): raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}") for list_type in ["large_list", "list", "sequence"]: # # list_type: int32 -> list_type: # -> dtype: int32 # if isinstance(feature.get(list_type), str): feature[list_type] = {"dtype": feature[list_type]} # # class_label: -> class_label: # names: -> names: # '0': negative -> - negative # '1': positive -> - positive # if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), dict): label_ids = sorted(feature["class_label"]["names"], key=int) if label_ids and [int(label_id) for label_id in label_ids] != list(range(int(label_ids[-1]) + 1)): raise ValueError( f"ClassLabel expected a value for all label ids [0:{int(label_ids[-1]) + 1}] but some ids are missing." ) feature["class_label"]["names"] = [feature["class_label"]["names"][label_id] for label_id in label_ids] return feature def from_yaml_inner(obj: Union[dict, list]) -> Union[dict, list]: if isinstance(obj, dict): if not obj: return {} _type = next(iter(obj)) if _type == "large_list": _feature = from_yaml_inner(unsimplify(obj).pop(_type)) return {"feature": _feature, **obj, "_type": "LargeList"} if _type == "sequence": # backward compatibility if isinstance(obj[_type], list): _feature = from_yaml_inner(unsimplify(obj).pop(_type)) return { name: {"feature": _subfeature, **obj, "_type": "List"} for name, _subfeature in _feature.items() } else: _feature = from_yaml_inner(unsimplify(obj).pop(_type)) return {"feature": _feature, **obj, "_type": "List"} if _type == "list": _feature = from_yaml_inner(unsimplify(obj).pop(_type)) return {"feature": _feature, **obj, "_type": "List"} if _type == "struct": return from_yaml_inner(obj["struct"]) elif _type == "dtype": if isinstance(obj["dtype"], str): # e.g. int32, float64, string, audio, image try: Value(obj["dtype"]) return {**obj, "_type": "Value"} except ValueError: # e.g. Audio, Image, ArrayXD return {"_type": snakecase_to_camelcase(obj["dtype"])} else: return from_yaml_inner(obj["dtype"]) else: return {"_type": snakecase_to_camelcase(_type), **unsimplify(obj)[_type]} elif isinstance(obj, list): names = [_feature.pop("name") for _feature in obj] return {name: from_yaml_inner(_feature) for name, _feature in zip(names, obj)} else: raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}") return cls.from_dict(from_yaml_inner(yaml_data)) def encode_example(self, example): """ Encode example into a format for Arrow. Args: example (`dict[str, Any]`): Data in a Dataset row. Returns: `dict[str, Any]` """ example = cast_to_python_objects(example) return encode_nested_example(self, example) def encode_column(self, column, column_name: str): """ Encode column into a format for Arrow. Args: column (`list[Any]`): Data in a Dataset column. column_name (`str`): Dataset column name. Returns: `list[Any]` """ column = cast_to_python_objects(column) return [encode_nested_example(self[column_name], obj, level=1) for obj in column] def encode_batch(self, batch): """ Encode batch into a format for Arrow. Args: batch (`dict[str, list[Any]]`): Data in a Dataset batch. Returns: `dict[str, list[Any]]` """ encoded_batch = {} if set(batch) != set(self): raise ValueError(f"Column mismatch between batch {set(batch)} and features {set(self)}") for key, column in batch.items(): column = cast_to_python_objects(column) encoded_batch[key] = [encode_nested_example(self[key], obj, level=1) for obj in column] return encoded_batch def decode_example(self, example: dict, token_per_repo_id: Optional[dict[str, Union[str, bool, None]]] = None): """Decode example with custom feature decoding. Args: example (`dict[str, Any]`): Dataset row data. token_per_repo_id (`dict`, *optional*): To access and decode audio or image files from private repositories on the Hub, you can pass a dictionary `repo_id (str) -> token (bool or str)`. Returns: `dict[str, Any]` """ return { column_name: decode_nested_example(feature, value, token_per_repo_id=token_per_repo_id) if self._column_requires_decoding[column_name] else value for column_name, (feature, value) in zip_dict( {key: value for key, value in self.items() if key in example}, example ) } def decode_column( self, column: list, column_name: str, token_per_repo_id: Optional[dict[str, Union[str, bool, None]]] = None ): """Decode column with custom feature decoding. Args: column (`list[Any]`): Dataset column data. column_name (`str`): Dataset column name. Returns: `list[Any]` """ return ( [ decode_nested_example(self[column_name], value, token_per_repo_id=token_per_repo_id) if value is not None else None for value in column ] if self._column_requires_decoding[column_name] else column ) def decode_batch(self, batch: dict, token_per_repo_id: Optional[dict[str, Union[str, bool, None]]] = None): """Decode batch with custom feature decoding. Args: batch (`dict[str, list[Any]]`): Dataset batch data. token_per_repo_id (`dict`, *optional*): To access and decode audio or image files from private repositories on the Hub, you can pass a dictionary repo_id (str) -> token (bool or str) Returns: `dict[str, list[Any]]` """ decoded_batch = {} for column_name, column in batch.items(): decoded_batch[column_name] = ( [ decode_nested_example(self[column_name], value, token_per_repo_id=token_per_repo_id) if value is not None else None for value in column ] if self._column_requires_decoding[column_name] else column ) return decoded_batch def copy(self) -> "Features": """ Make a deep copy of [`Features`]. Returns: [`Features`] Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes", split="train") >>> copy_of_features = ds.features.copy() >>> copy_of_features {'label': ClassLabel(names=['neg', 'pos']), 'text': Value('string')} ``` """ return copy.deepcopy(self) def reorder_fields_as(self, other: "Features") -> "Features": """ Reorder Features fields to match the field order of other [`Features`]. The order of the fields is important since it matters for the underlying arrow data. Re-ordering the fields allows to make the underlying arrow data type match. Args: other ([`Features`]): The other [`Features`] to align with. Returns: [`Features`] Example:: >>> from datasets import Features, List, Value >>> # let's say we have two features with a different order of nested fields (for a and b for example) >>> f1 = Features({"root": {"a": Value("string"), "b": Value("string")}}) >>> f2 = Features({"root": {"b": Value("string"), "a": Value("string")}}) >>> assert f1.type != f2.type >>> # re-ordering keeps the base structure (here List is defined at the root level), but makes the fields order match >>> f1.reorder_fields_as(f2) {'root': List({'b': Value('string'), 'a': Value('string')})} >>> assert f1.reorder_fields_as(f2).type == f2.type """ def recursive_reorder(source, target, stack=""): stack_position = " at " + stack[1:] if stack else "" if isinstance(source, dict): if not isinstance(target, dict): raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position) if sorted(source) != sorted(target): message = ( f"Keys mismatch: between {source} (source) and {target} (target).\n" f"{source.keys() - target.keys()} are missing from target " f"and {target.keys() - source.keys()} are missing from source" + stack_position ) raise ValueError(message) return {key: recursive_reorder(source[key], target[key], stack + f".{key}") for key in target} elif isinstance(source, List): if not isinstance(target, List): raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position) return List(recursive_reorder(source.feature, target.feature, stack + ".<list>"), length=source.length) elif isinstance(source, LargeList): if not isinstance(target, LargeList): raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position) return LargeList(recursive_reorder(source.feature, target.feature, stack + ".<list>")) else: return source return Features(recursive_reorder(self, other)) def flatten(self, max_depth=16) -> "Features": """Flatten the features. Every dictionary column is removed and is replaced by all the subfields it contains. The new fields are named by concatenating the name of the original column and the subfield name like this: `<original>.<subfield>`. If a column contains nested dictionaries, then all the lower-level subfields names are also concatenated to form new columns: `<original>.<subfield>.<subsubfield>`, etc. Returns: [`Features`]: The flattened features. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rajpurkar/squad", split="train") >>> ds.features.flatten() {'answers.answer_start': List(Value('int32'), id=None), 'answers.text': List(Value('string'), id=None), 'context': Value('string'), 'id': Value('string'), 'question': Value('string'), 'title': Value('string')} ``` """ for depth in range(1, max_depth): no_change = True flattened = self.copy() for column_name, subfeature in self.items(): if isinstance(subfeature, dict): no_change = False flattened.update({f"{column_name}.{k}": v for k, v in subfeature.items()}) del flattened[column_name] elif hasattr(subfeature, "flatten") and subfeature.flatten() != subfeature: no_change = False flattened.update({f"{column_name}.{k}": v for k, v in subfeature.flatten().items()}) del flattened[column_name] self = flattened if no_change: break return self def _align_features(features_list: list[Features]) -> list[Features]: """Align dictionaries of features so that the keys that are found in multiple dictionaries share the same feature.""" name2feature = {} for features in features_list: for k, v in features.items(): if k in name2feature and isinstance(v, dict): # Recursively align features. name2feature[k] = _align_features([name2feature[k], v])[0] elif k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"): name2feature[k] = v return [Features({k: name2feature[k] for k in features.keys()}) for features in features_list] def _check_if_features_can_be_aligned(features_list: list[Features]): """Check if the dictionaries of features can be aligned. Two dictonaries of features can be aligned if the keys they share have the same type or some of them is of type `Value("null")`. """ name2feature = {} for features in features_list: for k, v in features.items(): if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"): name2feature[k] = v for features in features_list: for k, v in features.items(): if isinstance(v, dict) and isinstance(name2feature[k], dict): # Deep checks for structure. _check_if_features_can_be_aligned([name2feature[k], v]) elif not (isinstance(v, Value) and v.dtype == "null") and name2feature[k] != v: raise ValueError( f'The features can\'t be aligned because the key {k} of features {features} has unexpected type - {v} (expected either {name2feature[k]} or Value("null").' ) def _fix_for_backward_compatible_features(feature: Any) -> FeatureType: def _fix_old_list(feature): if isinstance(feature, list): return List(_fix_for_backward_compatible_features(feature[0])) return feature return _visit(feature, _fix_old_list)
datasets/src/datasets/features/features.py/0
{ "file_path": "datasets/src/datasets/features/features.py", "repo_id": "datasets", "token_count": 41301 }
102
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """DatasetInfo record information we know about a dataset. This includes things that we know about the dataset statically, i.e.: - description - canonical location - does it have validation and tests splits - size - etc. This also includes the things that can and should be computed once we've processed the dataset as well: - number of examples (in each split) - etc. """ import copy import dataclasses import json import os import posixpath from dataclasses import dataclass from pathlib import Path from typing import ClassVar, Optional, Union import fsspec from fsspec.core import url_to_fs from huggingface_hub import DatasetCard, DatasetCardData from . import config from .features import Features from .splits import SplitDict from .utils import Version from .utils.logging import get_logger from .utils.py_utils import asdict, unique_values logger = get_logger(__name__) @dataclass class SupervisedKeysData: input: str = "" output: str = "" @dataclass class DownloadChecksumsEntryData: key: str = "" value: str = "" class MissingCachedSizesConfigError(Exception): """The expected cached sizes of the download file are missing.""" class NonMatchingCachedSizesError(Exception): """The prepared split doesn't have expected sizes.""" @dataclass class PostProcessedInfo: features: Optional[Features] = None resources_checksums: Optional[dict] = None def __post_init__(self): # Convert back to the correct classes when we reload from dict if self.features is not None and not isinstance(self.features, Features): self.features = Features.from_dict(self.features) @classmethod def from_dict(cls, post_processed_info_dict: dict) -> "PostProcessedInfo": field_names = {f.name for f in dataclasses.fields(cls)} return cls(**{k: v for k, v in post_processed_info_dict.items() if k in field_names}) @dataclass class DatasetInfo: """Information about a dataset. `DatasetInfo` documents datasets, including its name, version, and features. See the constructor arguments and properties for a full list. Not all fields are known on construction and may be updated later. Attributes: description (`str`): A description of the dataset. citation (`str`): A BibTeX citation of the dataset. homepage (`str`): A URL to the official homepage for the dataset. license (`str`): The dataset's license. It can be the name of the license or a paragraph containing the terms of the license. features ([`Features`], *optional*): The features used to specify the dataset's column types. post_processed (`PostProcessedInfo`, *optional*): Information regarding the resources of a possible post-processing of a dataset. For example, it can contain the information of an index. supervised_keys (`SupervisedKeysData`, *optional*): Specifies the input feature and the label for supervised learning if applicable for the dataset (legacy from TFDS). builder_name (`str`, *optional*): The name of the `GeneratorBasedBuilder` subclass used to create the dataset. It is also the snake_case version of the dataset builder class name. config_name (`str`, *optional*): The name of the configuration derived from [`BuilderConfig`]. version (`str` or [`Version`], *optional*): The version of the dataset. splits (`dict`, *optional*): The mapping between split name and metadata. download_checksums (`dict`, *optional*): The mapping between the URL to download the dataset's checksums and corresponding metadata. download_size (`int`, *optional*): The size of the files to download to generate the dataset, in bytes. post_processing_size (`int`, *optional*): Size of the dataset in bytes after post-processing, if any. dataset_size (`int`, *optional*): The combined size in bytes of the Arrow tables for all splits. size_in_bytes (`int`, *optional*): The combined size in bytes of all files associated with the dataset (downloaded files + Arrow files). **config_kwargs (additional keyword arguments): Keyword arguments to be passed to the [`BuilderConfig`] and used in the [`DatasetBuilder`]. """ # Set in the dataset builders description: str = dataclasses.field(default_factory=str) citation: str = dataclasses.field(default_factory=str) homepage: str = dataclasses.field(default_factory=str) license: str = dataclasses.field(default_factory=str) features: Optional[Features] = None post_processed: Optional[PostProcessedInfo] = None supervised_keys: Optional[SupervisedKeysData] = None # Set later by the builder builder_name: Optional[str] = None dataset_name: Optional[str] = None # for packaged builders, to be different from builder_name config_name: Optional[str] = None version: Optional[Union[str, Version]] = None # Set later by `download_and_prepare` splits: Optional[dict] = None download_checksums: Optional[dict] = None download_size: Optional[int] = None post_processing_size: Optional[int] = None dataset_size: Optional[int] = None size_in_bytes: Optional[int] = None _INCLUDED_INFO_IN_YAML: ClassVar[list[str]] = [ "config_name", "download_size", "dataset_size", "features", "splits", ] def __post_init__(self): # Convert back to the correct classes when we reload from dict if self.features is not None and not isinstance(self.features, Features): self.features = Features.from_dict(self.features) if self.post_processed is not None and not isinstance(self.post_processed, PostProcessedInfo): self.post_processed = PostProcessedInfo.from_dict(self.post_processed) if self.version is not None and not isinstance(self.version, Version): if isinstance(self.version, str): self.version = Version(self.version) else: self.version = Version.from_dict(self.version) if self.splits is not None and not isinstance(self.splits, SplitDict): self.splits = SplitDict.from_split_dict(self.splits) if self.supervised_keys is not None and not isinstance(self.supervised_keys, SupervisedKeysData): if isinstance(self.supervised_keys, (tuple, list)): self.supervised_keys = SupervisedKeysData(*self.supervised_keys) else: self.supervised_keys = SupervisedKeysData(**self.supervised_keys) def write_to_directory(self, dataset_info_dir, pretty_print=False, storage_options: Optional[dict] = None): """Write `DatasetInfo` and license (if present) as JSON files to `dataset_info_dir`. Args: dataset_info_dir (`str`): Destination directory. pretty_print (`bool`, defaults to `False`): If `True`, the JSON will be pretty-printed with the indent level of 4. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.9.0"/> Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("cornell-movie-review-data/rotten_tomatoes", split="validation") >>> ds.info.write_to_directory("/path/to/directory/") ``` """ fs: fsspec.AbstractFileSystem fs, *_ = url_to_fs(dataset_info_dir, **(storage_options or {})) with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "wb") as f: self._dump_info(f, pretty_print=pretty_print) if self.license: with fs.open(posixpath.join(dataset_info_dir, config.LICENSE_FILENAME), "wb") as f: self._dump_license(f) def _dump_info(self, file, pretty_print=False): """Dump info in `file` file-like object open in bytes mode (to support remote files)""" file.write(json.dumps(asdict(self), indent=4 if pretty_print else None).encode("utf-8")) def _dump_license(self, file): """Dump license in `file` file-like object open in bytes mode (to support remote files)""" file.write(self.license.encode("utf-8")) @classmethod def from_merge(cls, dataset_infos: list["DatasetInfo"]): dataset_infos = [dset_info.copy() for dset_info in dataset_infos if dset_info is not None] if len(dataset_infos) > 0 and all(dataset_infos[0] == dset_info for dset_info in dataset_infos): # if all dataset_infos are equal we don't need to merge. Just return the first. return dataset_infos[0] description = "\n\n".join(unique_values(info.description for info in dataset_infos)).strip() citation = "\n\n".join(unique_values(info.citation for info in dataset_infos)).strip() homepage = "\n\n".join(unique_values(info.homepage for info in dataset_infos)).strip() license = "\n\n".join(unique_values(info.license for info in dataset_infos)).strip() features = None supervised_keys = None return cls( description=description, citation=citation, homepage=homepage, license=license, features=features, supervised_keys=supervised_keys, ) @classmethod def from_directory(cls, dataset_info_dir: str, storage_options: Optional[dict] = None) -> "DatasetInfo": """Create [`DatasetInfo`] from the JSON file in `dataset_info_dir`. This function updates all the dynamically generated fields (num_examples, hash, time of creation,...) of the [`DatasetInfo`]. This will overwrite all previous metadata. Args: dataset_info_dir (`str`): The directory containing the metadata file. This should be the root directory of a specific dataset version. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.9.0"/> Example: ```py >>> from datasets import DatasetInfo >>> ds_info = DatasetInfo.from_directory("/path/to/directory/") ``` """ fs: fsspec.AbstractFileSystem fs, *_ = url_to_fs(dataset_info_dir, **(storage_options or {})) logger.debug(f"Loading Dataset info from {dataset_info_dir}") if not dataset_info_dir: raise ValueError("Calling DatasetInfo.from_directory() with undefined dataset_info_dir.") with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "r", encoding="utf-8") as f: dataset_info_dict = json.load(f) return cls.from_dict(dataset_info_dict) @classmethod def from_dict(cls, dataset_info_dict: dict) -> "DatasetInfo": field_names = {f.name for f in dataclasses.fields(cls)} return cls(**{k: v for k, v in dataset_info_dict.items() if k in field_names}) def update(self, other_dataset_info: "DatasetInfo", ignore_none=True): self_dict = self.__dict__ self_dict.update( **{ k: copy.deepcopy(v) for k, v in other_dataset_info.__dict__.items() if (v is not None or not ignore_none) } ) def copy(self) -> "DatasetInfo": return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()}) def _to_yaml_dict(self) -> dict: yaml_dict = {} dataset_info_dict = asdict(self) for key in dataset_info_dict: if key in self._INCLUDED_INFO_IN_YAML: value = getattr(self, key) if hasattr(value, "_to_yaml_list"): # Features, SplitDict yaml_dict[key] = value._to_yaml_list() elif hasattr(value, "_to_yaml_string"): # Version yaml_dict[key] = value._to_yaml_string() else: yaml_dict[key] = value return yaml_dict @classmethod def _from_yaml_dict(cls, yaml_data: dict) -> "DatasetInfo": yaml_data = copy.deepcopy(yaml_data) if yaml_data.get("features") is not None: yaml_data["features"] = Features._from_yaml_list(yaml_data["features"]) if yaml_data.get("splits") is not None: yaml_data["splits"] = SplitDict._from_yaml_list(yaml_data["splits"]) field_names = {f.name for f in dataclasses.fields(cls)} return cls(**{k: v for k, v in yaml_data.items() if k in field_names}) class DatasetInfosDict(dict[str, DatasetInfo]): def write_to_directory(self, dataset_infos_dir, overwrite=False, pretty_print=False) -> None: total_dataset_infos = {} dataset_infos_path = os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME) dataset_readme_path = os.path.join(dataset_infos_dir, config.REPOCARD_FILENAME) if not overwrite: total_dataset_infos = self.from_directory(dataset_infos_dir) total_dataset_infos.update(self) if os.path.exists(dataset_infos_path): # for backward compatibility, let's update the JSON file if it exists with open(dataset_infos_path, "w", encoding="utf-8") as f: dataset_infos_dict = { config_name: asdict(dset_info) for config_name, dset_info in total_dataset_infos.items() } json.dump(dataset_infos_dict, f, indent=4 if pretty_print else None) # Dump the infos in the YAML part of the README.md file if os.path.exists(dataset_readme_path): dataset_card = DatasetCard.load(dataset_readme_path) dataset_card_data = dataset_card.data else: dataset_card = None dataset_card_data = DatasetCardData() if total_dataset_infos: total_dataset_infos.to_dataset_card_data(dataset_card_data) dataset_card = ( DatasetCard("---\n" + str(dataset_card_data) + "\n---\n") if dataset_card is None else dataset_card ) dataset_card.save(Path(dataset_readme_path)) @classmethod def from_directory(cls, dataset_infos_dir) -> "DatasetInfosDict": logger.debug(f"Loading Dataset Infos from {dataset_infos_dir}") # Load the info from the YAML part of README.md if os.path.exists(os.path.join(dataset_infos_dir, config.REPOCARD_FILENAME)): dataset_card_data = DatasetCard.load(Path(dataset_infos_dir) / config.REPOCARD_FILENAME).data if "dataset_info" in dataset_card_data: return cls.from_dataset_card_data(dataset_card_data) if os.path.exists(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME)): # this is just to have backward compatibility with dataset_infos.json files with open(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME), encoding="utf-8") as f: return cls( { config_name: DatasetInfo.from_dict(dataset_info_dict) for config_name, dataset_info_dict in json.load(f).items() } ) else: return cls() @classmethod def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> "DatasetInfosDict": if isinstance(dataset_card_data.get("dataset_info"), (list, dict)): if isinstance(dataset_card_data["dataset_info"], list): return cls( { dataset_info_yaml_dict.get("config_name", "default"): DatasetInfo._from_yaml_dict( dataset_info_yaml_dict ) for dataset_info_yaml_dict in dataset_card_data["dataset_info"] } ) else: dataset_info = DatasetInfo._from_yaml_dict(dataset_card_data["dataset_info"]) dataset_info.config_name = dataset_card_data["dataset_info"].get("config_name", "default") return cls({dataset_info.config_name: dataset_info}) else: return cls() def to_dataset_card_data(self, dataset_card_data: DatasetCardData) -> None: if self: # first get existing metadata info if "dataset_info" in dataset_card_data and isinstance(dataset_card_data["dataset_info"], dict): dataset_metadata_infos = { dataset_card_data["dataset_info"].get("config_name", "default"): dataset_card_data["dataset_info"] } elif "dataset_info" in dataset_card_data and isinstance(dataset_card_data["dataset_info"], list): dataset_metadata_infos = { config_metadata["config_name"]: config_metadata for config_metadata in dataset_card_data["dataset_info"] } else: dataset_metadata_infos = {} # update/rewrite existing metadata info with the one to dump total_dataset_infos = { **dataset_metadata_infos, **{config_name: dset_info._to_yaml_dict() for config_name, dset_info in self.items()}, } # the config_name from the dataset_infos_dict takes over the config_name of the DatasetInfo for config_name, dset_info_yaml_dict in total_dataset_infos.items(): dset_info_yaml_dict["config_name"] = config_name if len(total_dataset_infos) == 1: # use a struct instead of a list of configurations, since there's only one dataset_card_data["dataset_info"] = next(iter(total_dataset_infos.values())) config_name = dataset_card_data["dataset_info"].pop("config_name", None) if config_name != "default": # if config_name is not "default" preserve it and put at the first position dataset_card_data["dataset_info"] = { "config_name": config_name, **dataset_card_data["dataset_info"], } else: dataset_card_data["dataset_info"] = [] for config_name, dataset_info_yaml_dict in sorted(total_dataset_infos.items()): # add the config_name field in first position dataset_info_yaml_dict.pop("config_name", None) dataset_info_yaml_dict = {"config_name": config_name, **dataset_info_yaml_dict} dataset_card_data["dataset_info"].append(dataset_info_yaml_dict)
datasets/src/datasets/info.py/0
{ "file_path": "datasets/src/datasets/info.py", "repo_id": "datasets", "token_count": 8405 }
103
from typing import Callable def is_documented_by(function_with_docstring: Callable): """Decorator to share docstrings across common functions. Args: function_with_docstring (`Callable`): Name of the function with the docstring. """ def wrapper(target_function): target_function.__doc__ = function_with_docstring.__doc__ return target_function return wrapper
datasets/src/datasets/utils/doc_utils.py/0
{ "file_path": "datasets/src/datasets/utils/doc_utils.py", "repo_id": "datasets", "token_count": 137 }
104
[ "unknown", "n<1K", "1K<n<10K", "10K<n<100K", "100K<n<1M", "1M<n<10M", "10M<n<100M", "100M<n<1B", "1B<n<10B", "10B<n<100B", "100B<n<1T", "n>1T" ]
datasets/src/datasets/utils/resources/size_categories.json/0
{ "file_path": "datasets/src/datasets/utils/resources/size_categories.json", "repo_id": "datasets", "token_count": 124 }
105
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node NUM_SHARDS = 4 NUM_ITEMS_PER_SHARD = 3 class FailedTestError(RuntimeError): pass def gen(shards: List[str]): for shard in shards: for i in range(NUM_ITEMS_PER_SHARD): yield {"i": i, "shard": shard} def main(): rank = int(os.environ["RANK"]) world_size = int(os.environ["WORLD_SIZE"]) parser = ArgumentParser() parser.add_argument("--streaming", type=bool) parser.add_argument("--local_rank", type=int) parser.add_argument("--num_workers", type=int, default=0) args = parser.parse_args() streaming = args.streaming num_workers = args.num_workers gen_kwargs = {"shards": [f"shard_{shard_idx}" for shard_idx in range(NUM_SHARDS)]} ds = IterableDataset.from_generator(gen, gen_kwargs=gen_kwargs) if not streaming: ds = Dataset.from_list(list(ds)) ds = split_dataset_by_node(ds, rank=rank, world_size=world_size) dataloader = torch.utils.data.DataLoader(ds, num_workers=num_workers) full_size = NUM_SHARDS * NUM_ITEMS_PER_SHARD expected_local_size = full_size // world_size expected_local_size += int(rank < (full_size % world_size)) local_size = sum(1 for _ in dataloader) if local_size != expected_local_size: raise FailedTestError(f"local_size {local_size} != expected_local_size {expected_local_size}") if __name__ == "__main__": main()
datasets/tests/distributed_scripts/run_torch_distributed.py/0
{ "file_path": "datasets/tests/distributed_scripts/run_torch_distributed.py", "repo_id": "datasets", "token_count": 617 }
106
import pytest from datasets import Column, Dataset, Features, Value, Video, load_dataset from ..utils import require_torchcodec @require_torchcodec @pytest.mark.parametrize( "build_example", [ lambda video_path: video_path, lambda video_path: open(video_path, "rb").read(), lambda video_path: {"path": video_path}, lambda video_path: {"path": video_path, "bytes": None}, lambda video_path: {"path": video_path, "bytes": open(video_path, "rb").read()}, lambda video_path: {"path": None, "bytes": open(video_path, "rb").read()}, lambda video_path: {"bytes": open(video_path, "rb").read()}, ], ) def test_video_feature_encode_example(shared_datadir, build_example): from torchcodec.decoders import VideoDecoder video_path = str(shared_datadir / "test_video_66x50.mov") video = Video() encoded_example = video.encode_example(build_example(video_path)) assert isinstance(encoded_example, dict) assert encoded_example.keys() == {"bytes", "path"} assert encoded_example["bytes"] is not None or encoded_example["path"] is not None decoded_example = video.decode_example(encoded_example) assert isinstance(decoded_example, VideoDecoder) @require_torchcodec def test_dataset_with_video_feature(shared_datadir): import torch from torchcodec.decoders import VideoDecoder video_path = str(shared_datadir / "test_video_66x50.mov") data = {"video": [video_path]} features = Features({"video": Video()}) dset = Dataset.from_dict(data, features=features) item = dset[0] assert item.keys() == {"video"} assert isinstance(item["video"], VideoDecoder) assert item["video"].get_frame_at(0).data.shape == (3, 50, 66) assert isinstance(item["video"].get_frame_at(0).data, torch.Tensor) batch = dset[:1] assert len(batch) == 1 assert batch.keys() == {"video"} assert isinstance(batch["video"], list) and all(isinstance(item, VideoDecoder) for item in batch["video"]) assert batch["video"][0].get_frame_at(0).data.shape == (3, 50, 66) assert isinstance(batch["video"][0].get_frame_at(0).data, torch.Tensor) column = dset["video"] assert len(column) == 1 assert isinstance(column, Column) and all(isinstance(item, VideoDecoder) for item in column) assert next(iter(column)).get_frame_at(0).data.shape == (3, 50, 66) assert isinstance(next(iter(column)).get_frame_at(0).data, torch.Tensor) # from bytes with open(video_path, "rb") as f: data = {"video": [f.read()]} dset = Dataset.from_dict(data, features=features) item = dset[0] assert item.keys() == {"video"} assert isinstance(item["video"], VideoDecoder) assert item["video"].get_frame_at(0).data.shape == (3, 50, 66) assert isinstance(item["video"].get_frame_at(0).data, torch.Tensor) @require_torchcodec def test_dataset_with_video_map_and_formatted(shared_datadir): from torchcodec.decoders import VideoDecoder video_path = str(shared_datadir / "test_video_66x50.mov") data = {"video": [video_path]} features = Features({"video": Video()}) dset = Dataset.from_dict(data, features=features) dset = dset.map(lambda x: x).with_format("numpy") example = dset[0] assert isinstance(example["video"], VideoDecoder) # assert isinstance(example["video"][0], np.ndarray) # from bytes with open(video_path, "rb") as f: data = {"video": [f.read()]} dset = Dataset.from_dict(data, features=features) dset = dset.map(lambda x: x).with_format("numpy") example = dset[0] assert isinstance(example["video"], VideoDecoder) # assert isinstance(example["video"][0], np.ndarray) # Dataset casting and mapping @require_torchcodec def test_dataset_with_video_feature_map_is_decoded(shared_datadir): video_path = str(shared_datadir / "test_video_66x50.mov") data = {"video": [video_path], "text": ["Hello"]} features = Features({"video": Video(), "text": Value("string")}) dset = Dataset.from_dict(data, features=features) def process_audio_sampling_rate_by_example(example): begin_stream_seconds = example["video"].metadata.begin_stream_seconds example["double_begin_stream_seconds"] = 2 * begin_stream_seconds return example decoded_dset = dset.map(process_audio_sampling_rate_by_example) for item in decoded_dset.cast_column("video", Video(decode=False)): assert item.keys() == {"video", "text", "double_begin_stream_seconds"} assert item["double_begin_stream_seconds"] == 0.0 def process_audio_sampling_rate_by_batch(batch): double_fps = [] for video in batch["video"]: double_fps.append(2 * video.metadata.begin_stream_seconds) batch["double_begin_stream_seconds"] = double_fps return batch decoded_dset = dset.map(process_audio_sampling_rate_by_batch, batched=True) for item in decoded_dset.cast_column("video", Video(decode=False)): assert item.keys() == {"video", "text", "double_begin_stream_seconds"} assert item["double_begin_stream_seconds"] == 0.0 @pytest.fixture def jsonl_video_dataset_path(shared_datadir, tmp_path_factory): import json video_path = str(shared_datadir / "test_video_66x50.mov") data = [{"video": video_path, "text": "Hello world!"}] path = str(tmp_path_factory.mktemp("data") / "video_dataset.jsonl") with open(path, "w") as f: for item in data: f.write(json.dumps(item) + "\n") return path @require_torchcodec @pytest.mark.parametrize("streaming", [False, True]) def test_load_dataset_with_video_feature(streaming, jsonl_video_dataset_path, shared_datadir): from torchcodec.decoders import VideoDecoder video_path = str(shared_datadir / "test_video_66x50.mov") data_files = jsonl_video_dataset_path features = Features({"video": Video(), "text": Value("string")}) dset = load_dataset("json", split="train", data_files=data_files, features=features, streaming=streaming) item = dset[0] if not streaming else next(iter(dset)) assert item.keys() == {"video", "text"} assert isinstance(item["video"], VideoDecoder) assert item["video"].get_frame_at(0).data.shape == (3, 50, 66) assert item["video"].metadata.path == video_path
datasets/tests/features/test_video.py/0
{ "file_path": "datasets/tests/features/test_video.py", "repo_id": "datasets", "token_count": 2431 }
107
import shutil import textwrap import numpy as np import pytest from datasets import Audio, ClassLabel, Features from datasets.builder import InvalidConfigName from datasets.data_files import DataFilesDict, DataFilesList, get_data_patterns from datasets.download.streaming_download_manager import StreamingDownloadManager from datasets.packaged_modules.audiofolder.audiofolder import AudioFolder, AudioFolderConfig from ..utils import require_sndfile, require_torchcodec @pytest.fixture def cache_dir(tmp_path): return str(tmp_path / "audiofolder_cache_dir") @pytest.fixture def data_files_with_labels_no_metadata(tmp_path, audio_file): data_dir = tmp_path / "data_files_with_labels_no_metadata" data_dir.mkdir(parents=True, exist_ok=True) subdir_class_0 = data_dir / "fr" subdir_class_0.mkdir(parents=True, exist_ok=True) subdir_class_1 = data_dir / "uk" subdir_class_1.mkdir(parents=True, exist_ok=True) audio_filename = subdir_class_0 / "audio_fr.wav" shutil.copyfile(audio_file, audio_filename) audio_filename2 = subdir_class_1 / "audio_uk.wav" shutil.copyfile(audio_file, audio_filename2) data_files_with_labels_no_metadata = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) return data_files_with_labels_no_metadata @pytest.fixture def audio_file_with_metadata(tmp_path, audio_file): audio_filename = tmp_path / "audio_file.wav" shutil.copyfile(audio_file, audio_filename) audio_metadata_filename = tmp_path / "metadata.jsonl" audio_metadata = textwrap.dedent( """\ {"file_name": "audio_file.wav", "text": "Audio transcription"} """ ) with open(audio_metadata_filename, "w", encoding="utf-8") as f: f.write(audio_metadata) return str(audio_filename), str(audio_metadata_filename) @pytest.fixture def data_files_with_one_split_and_metadata(tmp_path, audio_file): data_dir = tmp_path / "audiofolder_data_dir_with_metadata" data_dir.mkdir(parents=True, exist_ok=True) subdir = data_dir / "subdir" subdir.mkdir(parents=True, exist_ok=True) audio_filename = data_dir / "audio_file.wav" shutil.copyfile(audio_file, audio_filename) audio_filename2 = data_dir / "audio_file2.wav" shutil.copyfile(audio_file, audio_filename2) audio_filename3 = subdir / "audio_file3.wav" # in subdir shutil.copyfile(audio_file, audio_filename3) audio_metadata_filename = data_dir / "metadata.jsonl" audio_metadata = textwrap.dedent( """\ {"file_name": "audio_file.wav", "text": "First audio transcription"} {"file_name": "audio_file2.wav", "text": "Second audio transcription"} {"file_name": "subdir/audio_file3.wav", "text": "Third audio transcription (in subdir)"} """ ) with open(audio_metadata_filename, "w", encoding="utf-8") as f: f.write(audio_metadata) data_files_with_one_split_and_metadata = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) assert len(data_files_with_one_split_and_metadata) == 1 assert len(data_files_with_one_split_and_metadata["train"]) == 4 return data_files_with_one_split_and_metadata @pytest.fixture(params=["jsonl", "csv"]) def data_files_with_two_splits_and_metadata(request, tmp_path, audio_file): data_dir = tmp_path / "audiofolder_data_dir_with_metadata" data_dir.mkdir(parents=True, exist_ok=True) train_dir = data_dir / "train" train_dir.mkdir(parents=True, exist_ok=True) test_dir = data_dir / "test" test_dir.mkdir(parents=True, exist_ok=True) audio_filename = train_dir / "audio_file.wav" # train audio shutil.copyfile(audio_file, audio_filename) audio_filename2 = train_dir / "audio_file2.wav" # train audio shutil.copyfile(audio_file, audio_filename2) audio_filename3 = test_dir / "audio_file3.wav" # test audio shutil.copyfile(audio_file, audio_filename3) train_audio_metadata_filename = train_dir / f"metadata.{request.param}" audio_metadata = ( textwrap.dedent( """\ {"file_name": "audio_file.wav", "text": "First train audio transcription"} {"file_name": "audio_file2.wav", "text": "Second train audio transcription"} """ ) if request.param == "jsonl" else textwrap.dedent( """\ file_name,text audio_file.wav,First train audio transcription audio_file2.wav,Second train audio transcription """ ) ) with open(train_audio_metadata_filename, "w", encoding="utf-8") as f: f.write(audio_metadata) test_audio_metadata_filename = test_dir / f"metadata.{request.param}" audio_metadata = ( textwrap.dedent( """\ {"file_name": "audio_file3.wav", "text": "Test audio transcription"} """ ) if request.param == "jsonl" else textwrap.dedent( """\ file_name,text audio_file3.wav,Test audio transcription """ ) ) with open(test_audio_metadata_filename, "w", encoding="utf-8") as f: f.write(audio_metadata) data_files_with_two_splits_and_metadata = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) assert len(data_files_with_two_splits_and_metadata) == 2 assert len(data_files_with_two_splits_and_metadata["train"]) == 3 assert len(data_files_with_two_splits_and_metadata["test"]) == 2 return data_files_with_two_splits_and_metadata @pytest.fixture def data_files_with_zip_archives(tmp_path, audio_file_44100, audio_file_16000): data_dir = tmp_path / "audiofolder_data_dir_with_zip_archives" data_dir.mkdir(parents=True, exist_ok=True) archive_dir = data_dir / "archive" archive_dir.mkdir(parents=True, exist_ok=True) subdir = archive_dir / "subdir" subdir.mkdir(parents=True, exist_ok=True) audio_filename = archive_dir / "audio_file.mp3" shutil.copyfile(audio_file_44100, audio_filename) audio_filename2 = subdir / "audio_file2.mp3" # in subdir shutil.copyfile(audio_file_16000, audio_filename2) audio_metadata_filename = archive_dir / "metadata.jsonl" audio_metadata = textwrap.dedent( """\ {"file_name": "audio_file.mp3", "text": "First audio transcription"} {"file_name": "subdir/audio_file2.mp3", "text": "Second audio transcription (in subdir)"} """ ) with open(audio_metadata_filename, "w", encoding="utf-8") as f: f.write(audio_metadata) shutil.make_archive(str(archive_dir), "zip", archive_dir) shutil.rmtree(str(archive_dir)) data_files_with_zip_archives = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) assert len(data_files_with_zip_archives) == 1 assert len(data_files_with_zip_archives["train"]) == 1 return data_files_with_zip_archives def test_config_raises_when_invalid_name() -> None: with pytest.raises(InvalidConfigName, match="Bad characters"): _ = AudioFolderConfig(name="name-with-*-invalid-character") @pytest.mark.parametrize("data_files", ["str_path", ["str_path"], DataFilesList(["str_path"], [()])]) def test_config_raises_when_invalid_data_files(data_files) -> None: with pytest.raises(ValueError, match="Expected a DataFilesDict"): _ = AudioFolderConfig(name="name", data_files=data_files) @require_torchcodec @require_sndfile # check that labels are inferred correctly from dir names def test_generate_examples_with_labels(data_files_with_labels_no_metadata, cache_dir): # there are no metadata.jsonl files in this test case audiofolder = AudioFolder(data_files=data_files_with_labels_no_metadata, cache_dir=cache_dir, drop_labels=False) audiofolder.download_and_prepare() assert audiofolder.info.features == Features({"audio": Audio(), "label": ClassLabel(names=["fr", "uk"])}) dataset = list(audiofolder.as_dataset()["train"]) label_feature = audiofolder.info.features["label"] assert dataset[0]["label"] == label_feature._str2int["fr"] assert dataset[1]["label"] == label_feature._str2int["uk"] @require_sndfile @pytest.mark.parametrize("drop_metadata", [None, True, False]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_generate_examples_drop_labels(data_files_with_labels_no_metadata, drop_metadata, drop_labels): audiofolder = AudioFolder( drop_metadata=drop_metadata, drop_labels=drop_labels, data_files=data_files_with_labels_no_metadata ) gen_kwargs = audiofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs # removing the labels explicitly requires drop_labels=True assert gen_kwargs["add_labels"] is not bool(drop_labels) assert gen_kwargs["add_metadata"] is False # metadata files is not present in this case generator = audiofolder._generate_examples(**gen_kwargs) if not drop_labels: assert all( example.keys() == {"audio", "label"} and all(val is not None for val in example.values()) for _, example in generator ) else: assert all( example.keys() == {"audio"} and all(val is not None for val in example.values()) for _, example in generator ) @require_sndfile @pytest.mark.parametrize("drop_metadata", [None, True, False]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_generate_examples_drop_metadata(audio_file_with_metadata, drop_metadata, drop_labels): audio_file, audio_metadata_file = audio_file_with_metadata audiofolder = AudioFolder( drop_metadata=drop_metadata, drop_labels=drop_labels, data_files={"train": [audio_file, audio_metadata_file]} ) gen_kwargs = audiofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs # since the dataset has metadata, removing the metadata explicitly requires drop_metadata=True assert gen_kwargs["add_metadata"] is not bool(drop_metadata) # since the dataset has metadata, adding the labels explicitly requires drop_labels=False assert gen_kwargs["add_labels"] is False generator = audiofolder._generate_examples(**gen_kwargs) expected_columns = {"audio"} if gen_kwargs["add_metadata"]: expected_columns.add("text") if gen_kwargs["add_labels"]: expected_columns.add("label") result = [example for _, example in generator] assert len(result) == 1 example = result[0] assert example.keys() == expected_columns for column in expected_columns: assert example[column] is not None @require_torchcodec @require_sndfile @pytest.mark.parametrize("streaming", [False, True]) def test_data_files_with_metadata_and_single_split(streaming, cache_dir, data_files_with_one_split_and_metadata): data_files = data_files_with_one_split_and_metadata audiofolder = AudioFolder(data_files=data_files, cache_dir=cache_dir) audiofolder.download_and_prepare() datasets = audiofolder.as_streaming_dataset() if streaming else audiofolder.as_dataset() for split, data_files in data_files.items(): expected_num_of_audios = len(data_files) - 1 # don't count the metadata file assert split in datasets dataset = list(datasets[split]) assert len(dataset) == expected_num_of_audios # make sure each sample has its own audio and metadata assert len({example["audio"].metadata.path for example in dataset}) == expected_num_of_audios assert len({example["text"] for example in dataset}) == expected_num_of_audios assert all(example["text"] is not None for example in dataset) @require_torchcodec @require_sndfile @pytest.mark.parametrize("streaming", [False, True]) def test_data_files_with_metadata_and_multiple_splits(streaming, cache_dir, data_files_with_two_splits_and_metadata): data_files = data_files_with_two_splits_and_metadata audiofolder = AudioFolder(data_files=data_files, cache_dir=cache_dir) audiofolder.download_and_prepare() datasets = audiofolder.as_streaming_dataset() if streaming else audiofolder.as_dataset() for split, data_files in data_files.items(): expected_num_of_audios = len(data_files) - 1 # don't count the metadata file assert split in datasets dataset = list(datasets[split]) assert len(dataset) == expected_num_of_audios # make sure each sample has its own audio and metadata assert len({example["audio"].metadata.path for example in dataset}) == expected_num_of_audios assert len({example["text"] for example in dataset}) == expected_num_of_audios assert all(example["text"] is not None for example in dataset) @require_torchcodec @require_sndfile @pytest.mark.parametrize("streaming", [False, True]) def test_data_files_with_metadata_and_archives(streaming, cache_dir, data_files_with_zip_archives): audiofolder = AudioFolder(data_files=data_files_with_zip_archives, cache_dir=cache_dir) audiofolder.download_and_prepare() datasets = audiofolder.as_streaming_dataset() if streaming else audiofolder.as_dataset() for split, data_files in data_files_with_zip_archives.items(): num_of_archives = len(data_files) # the metadata file is inside the archive expected_num_of_audios = 2 * num_of_archives assert split in datasets dataset = list(datasets[split]) assert len(dataset) == expected_num_of_audios # make sure each sample has its own audio (all arrays are different) and metadata assert ( sum( np.array_equal( dataset[0]["audio"].get_all_samples().data.numpy(), example["audio"].get_all_samples().data.numpy() ) for example in dataset[1:] ) == 0 ) assert len({example["text"] for example in dataset}) == expected_num_of_audios assert all(example["text"] is not None for example in dataset) @require_sndfile def test_data_files_with_wrong_metadata_file_name(cache_dir, tmp_path, audio_file): data_dir = tmp_path / "data_dir_with_bad_metadata" data_dir.mkdir(parents=True, exist_ok=True) shutil.copyfile(audio_file, data_dir / "audio_file.wav") audio_metadata_filename = data_dir / "bad_metadata.jsonl" # bad file audio_metadata = textwrap.dedent( """\ {"file_name": "audio_file.wav", "text": "Audio transcription"} """ ) with open(audio_metadata_filename, "w", encoding="utf-8") as f: f.write(audio_metadata) data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) audiofolder = AudioFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) audiofolder.download_and_prepare() dataset = audiofolder.as_dataset(split="train") # check that there are no metadata, since the metadata file name doesn't have the right name assert "text" not in dataset.column_names @require_sndfile def test_data_files_with_custom_audio_file_name_column_in_metadata_file(cache_dir, tmp_path, audio_file): data_dir = tmp_path / "data_dir_with_custom_file_name_metadata" data_dir.mkdir(parents=True, exist_ok=True) shutil.copyfile(audio_file, data_dir / "audio_file.wav") audio_metadata_filename = data_dir / "metadata.jsonl" audio_metadata = textwrap.dedent( # with bad column "bad_file_name" instead of "file_name" """\ {"speech_file_name": "audio_file.wav", "text": "Audio transcription"} """ ) with open(audio_metadata_filename, "w", encoding="utf-8") as f: f.write(audio_metadata) data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) audiofolder = AudioFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) audiofolder.download_and_prepare() dataset = audiofolder.as_dataset(split="train") assert "speech" in dataset.features assert "speech_file_name" not in dataset.features @require_sndfile def test_data_files_with_with_metadata_in_different_formats(cache_dir, tmp_path, audio_file): data_dir = tmp_path / "data_dir_with_metadata_in_different_format" data_dir.mkdir(parents=True, exist_ok=True) shutil.copyfile(audio_file, data_dir / "audio_file.wav") audio_metadata_filename_jsonl = data_dir / "metadata.jsonl" audio_metadata_jsonl = textwrap.dedent( """\ {"file_name": "audio_file.wav", "text": "Audio transcription"} """ ) with open(audio_metadata_filename_jsonl, "w", encoding="utf-8") as f: f.write(audio_metadata_jsonl) audio_metadata_filename_csv = data_dir / "metadata.csv" audio_metadata_csv = textwrap.dedent( """\ file_name,text audio_file.wav,Audio transcription """ ) with open(audio_metadata_filename_csv, "w", encoding="utf-8") as f: f.write(audio_metadata_csv) data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) audiofolder = AudioFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) with pytest.raises(ValueError) as exc_info: audiofolder.download_and_prepare() assert "metadata files with different extensions" in str(exc_info.value)
datasets/tests/packaged_modules/test_audiofolder.py/0
{ "file_path": "datasets/tests/packaged_modules/test_audiofolder.py", "repo_id": "datasets", "token_count": 6716 }
108
import importlib import os import tempfile import types from contextlib import nullcontext as does_not_raise from multiprocessing import Process from pathlib import Path from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from multiprocess.pool import Pool from datasets.arrow_dataset import Dataset from datasets.arrow_writer import ArrowWriter from datasets.builder import ( ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder, InvalidConfigName, ) from datasets.data_files import DataFilesList from datasets.dataset_dict import DatasetDict, IterableDatasetDict from datasets.download.download_manager import DownloadMode from datasets.features import Features, List, Value from datasets.info import DatasetInfo, PostProcessedInfo from datasets.iterable_dataset import IterableDataset from datasets.load import configure_builder_class from datasets.splits import Split, SplitDict, SplitGenerator, SplitInfo from datasets.streaming import xjoin from datasets.utils.file_utils import is_local_path from datasets.utils.info_utils import VerificationMode from datasets.utils.logging import INFO, get_logger from .utils import ( assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_faiss, set_current_working_directory_to_temp_dir, ) class DummyBuilder(DatasetBuilder): def _info(self): return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): return [SplitGenerator(name=Split.TRAIN)] def _prepare_split(self, split_generator, **kwargs): fname = f"{self.dataset_name}-{split_generator.name}.arrow" with ArrowWriter(features=self.info.features, path=os.path.join(self._output_dir, fname)) as writer: writer.write_batch({"text": ["foo"] * 100}) num_examples, num_bytes = writer.finalize() split_generator.split_info.num_examples = num_examples split_generator.split_info.num_bytes = num_bytes class DummyGeneratorBasedBuilder(GeneratorBasedBuilder): def _info(self): return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): return [SplitGenerator(name=Split.TRAIN)] def _generate_examples(self): for i in range(100): yield i, {"text": "foo"} class DummyArrowBasedBuilder(ArrowBasedBuilder): def _info(self): return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): return [SplitGenerator(name=Split.TRAIN)] def _generate_tables(self): for i in range(10): yield i, pa.table({"text": ["foo"] * 10}) class DummyGeneratorBasedBuilderWithIntegers(GeneratorBasedBuilder): def _info(self): return DatasetInfo(features=Features({"id": Value("int8")})) def _split_generators(self, dl_manager): return [SplitGenerator(name=Split.TRAIN)] def _generate_examples(self): for i in range(100): yield i, {"id": i} class DummyGeneratorBasedBuilderConfig(BuilderConfig): def __init__(self, content="foo", times=2, *args, **kwargs): super().__init__(*args, **kwargs) self.content = content self.times = times class DummyGeneratorBasedBuilderWithConfig(GeneratorBasedBuilder): BUILDER_CONFIG_CLASS = DummyGeneratorBasedBuilderConfig def _info(self): return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): return [SplitGenerator(name=Split.TRAIN)] def _generate_examples(self): for i in range(100): yield i, {"text": self.config.content * self.config.times} class DummyBuilderWithMultipleConfigs(DummyBuilder): BUILDER_CONFIGS = [ DummyGeneratorBasedBuilderConfig(name="a"), DummyGeneratorBasedBuilderConfig(name="b"), ] class DummyBuilderWithDefaultConfig(DummyBuilderWithMultipleConfigs): DEFAULT_CONFIG_NAME = "a" class DummyBuilderWithDownload(DummyBuilder): def __init__(self, *args, rel_path=None, abs_path=None, **kwargs): super().__init__(*args, **kwargs) self._rel_path = rel_path self._abs_path = abs_path def _split_generators(self, dl_manager): if self._rel_path is not None: assert os.path.exists(dl_manager.download(self._rel_path)), "dl_manager must support relative paths" if self._abs_path is not None: assert os.path.exists(dl_manager.download(self._abs_path)), "dl_manager must support absolute paths" return [SplitGenerator(name=Split.TRAIN)] class DummyBuilderWithManualDownload(DummyBuilderWithMultipleConfigs): @property def manual_download_instructions(self): return "To use the dataset you have to download some stuff manually and pass the data path to data_dir" def _split_generators(self, dl_manager): if not os.path.exists(self.config.data_dir): raise FileNotFoundError(f"data_dir {self.config.data_dir} doesn't exist.") return [SplitGenerator(name=Split.TRAIN)] class DummyArrowBasedBuilderWithShards(ArrowBasedBuilder): def _info(self): return DatasetInfo(features=Features({"id": Value("int8"), "filepath": Value("string")})) def _split_generators(self, dl_manager): return [SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepaths": [f"data{i}.txt" for i in range(4)]})] def _generate_tables(self, filepaths): idx = 0 for filepath in filepaths: for i in range(10): yield idx, pa.table({"id": range(10 * i, 10 * (i + 1)), "filepath": [filepath] * 10}) idx += 1 class DummyGeneratorBasedBuilderWithShards(GeneratorBasedBuilder): def _info(self): return DatasetInfo(features=Features({"id": Value("int8"), "filepath": Value("string")})) def _split_generators(self, dl_manager): return [SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepaths": [f"data{i}.txt" for i in range(4)]})] def _generate_examples(self, filepaths): idx = 0 for filepath in filepaths: for i in range(100): yield idx, {"id": i, "filepath": filepath} idx += 1 class DummyArrowBasedBuilderWithAmbiguousShards(ArrowBasedBuilder): def _info(self): return DatasetInfo(features=Features({"id": Value("int8"), "filepath": Value("string")})) def _split_generators(self, dl_manager): return [ SplitGenerator( name=Split.TRAIN, gen_kwargs={ "filepaths": [f"data{i}.txt" for i in range(4)], "dummy_kwarg_with_different_length": [f"dummy_data{i}.txt" for i in range(3)], }, ) ] def _generate_tables(self, filepaths, dummy_kwarg_with_different_length): idx = 0 for filepath in filepaths: for i in range(10): yield idx, pa.table({"id": range(10 * i, 10 * (i + 1)), "filepath": [filepath] * 10}) idx += 1 class DummyGeneratorBasedBuilderWithAmbiguousShards(GeneratorBasedBuilder): def _info(self): return DatasetInfo(features=Features({"id": Value("int8"), "filepath": Value("string")})) def _split_generators(self, dl_manager): return [ SplitGenerator( name=Split.TRAIN, gen_kwargs={ "filepaths": [f"data{i}.txt" for i in range(4)], "dummy_kwarg_with_different_length": [f"dummy_data{i}.txt" for i in range(3)], }, ) ] def _generate_examples(self, filepaths, dummy_kwarg_with_different_length): idx = 0 for filepath in filepaths: for i in range(100): yield idx, {"id": i, "filepath": filepath} idx += 1 def _run_concurrent_download_and_prepare(tmp_dir): builder = DummyBuilder(cache_dir=tmp_dir) builder.download_and_prepare(download_mode=DownloadMode.REUSE_DATASET_IF_EXISTS) return builder def check_streaming(builder): builders_module = importlib.import_module(builder.__module__) assert builders_module._patched_for_streaming assert builders_module.os.path.join is xjoin class BuilderTest(TestCase): def test_download_and_prepare(self): with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyBuilder(cache_dir=tmp_dir) builder.download_and_prepare(download_mode=DownloadMode.FORCE_REDOWNLOAD) self.assertTrue( os.path.exists( os.path.join( tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow" ) ) ) self.assertDictEqual(builder.info.features, Features({"text": Value("string")})) self.assertEqual(builder.info.splits["train"].num_examples, 100) self.assertTrue( os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) ) def test_download_and_prepare_checksum_computation(self): with tempfile.TemporaryDirectory() as tmp_dir: builder_no_verification = DummyBuilder(cache_dir=tmp_dir) builder_no_verification.download_and_prepare(download_mode=DownloadMode.FORCE_REDOWNLOAD) self.assertTrue( all(v["checksum"] is not None for _, v in builder_no_verification.info.download_checksums.items()) ) builder_with_verification = DummyBuilder(cache_dir=tmp_dir) builder_with_verification.download_and_prepare( download_mode=DownloadMode.FORCE_REDOWNLOAD, verification_mode=VerificationMode.ALL_CHECKS, ) self.assertTrue( all(v["checksum"] is None for _, v in builder_with_verification.info.download_checksums.items()) ) def test_concurrent_download_and_prepare(self): with tempfile.TemporaryDirectory() as tmp_dir: processes = 2 with Pool(processes=processes) as pool: jobs = [ pool.apply_async(_run_concurrent_download_and_prepare, kwds={"tmp_dir": tmp_dir}) for _ in range(processes) ] builders = [job.get() for job in jobs] for builder in builders: self.assertTrue( os.path.exists( os.path.join( tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow", ) ) ) self.assertDictEqual(builder.info.features, Features({"text": Value("string")})) self.assertEqual(builder.info.splits["train"].num_examples, 100) self.assertTrue( os.path.exists( os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json") ) ) def test_download_and_prepare_with_base_path(self): with tempfile.TemporaryDirectory() as tmp_dir: rel_path = "dummy1.data" abs_path = os.path.join(tmp_dir, "dummy2.data") # test relative path is missing builder = DummyBuilderWithDownload(cache_dir=tmp_dir, rel_path=rel_path) with self.assertRaises(FileNotFoundError): builder.download_and_prepare(download_mode=DownloadMode.FORCE_REDOWNLOAD, base_path=tmp_dir) # test absolute path is missing builder = DummyBuilderWithDownload(cache_dir=tmp_dir, abs_path=abs_path) with self.assertRaises(FileNotFoundError): builder.download_and_prepare(download_mode=DownloadMode.FORCE_REDOWNLOAD, base_path=tmp_dir) # test that they are both properly loaded when they exist open(os.path.join(tmp_dir, rel_path), "w") open(abs_path, "w") builder = DummyBuilderWithDownload(cache_dir=tmp_dir, rel_path=rel_path, abs_path=abs_path) builder.download_and_prepare(download_mode=DownloadMode.FORCE_REDOWNLOAD, base_path=tmp_dir) self.assertTrue( os.path.exists( os.path.join( tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow", ) ) ) def test_as_dataset_with_post_process(self): def _post_process(self, dataset, resources_paths): def char_tokenize(example): return {"tokens": list(example["text"])} return dataset.map(char_tokenize, cache_file_name=resources_paths["tokenized_dataset"]) def _post_processing_resources(self, split): return {"tokenized_dataset": f"tokenized_dataset-{split}.arrow"} with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyBuilder(cache_dir=tmp_dir) builder.info.post_processed = PostProcessedInfo( features=Features({"text": Value("string"), "tokens": List(Value("string"))}) ) builder._post_process = types.MethodType(_post_process, builder) builder._post_processing_resources = types.MethodType(_post_processing_resources, builder) os.makedirs(builder.cache_dir) builder.info.splits = SplitDict() builder.info.splits.add(SplitInfo("train", num_examples=10)) builder.info.splits.add(SplitInfo("test", num_examples=10)) for split in builder.info.splits: with ArrowWriter( path=os.path.join(builder.cache_dir, f"{builder.dataset_name}-{split}.arrow"), features=Features({"text": Value("string")}), ) as writer: writer.write_batch({"text": ["foo"] * 10}) writer.finalize() with ArrowWriter( path=os.path.join(builder.cache_dir, f"tokenized_dataset-{split}.arrow"), features=Features({"text": Value("string"), "tokens": List(Value("string"))}), ) as writer: writer.write_batch({"text": ["foo"] * 10, "tokens": [list("foo")] * 10}) writer.finalize() dsets = builder.as_dataset() self.assertIsInstance(dsets, DatasetDict) self.assertListEqual(list(dsets.keys()), ["train", "test"]) self.assertEqual(len(dsets["train"]), 10) self.assertEqual(len(dsets["test"]), 10) self.assertDictEqual( dsets["train"].features, Features({"text": Value("string"), "tokens": List(Value("string"))}) ) self.assertDictEqual( dsets["test"].features, Features({"text": Value("string"), "tokens": List(Value("string"))}) ) self.assertListEqual(dsets["train"].column_names, ["text", "tokens"]) self.assertListEqual(dsets["test"].column_names, ["text", "tokens"]) del dsets dset = builder.as_dataset("train") self.assertIsInstance(dset, Dataset) self.assertEqual(dset.split, "train") self.assertEqual(len(dset), 10) self.assertDictEqual(dset.features, Features({"text": Value("string"), "tokens": List(Value("string"))})) self.assertListEqual(dset.column_names, ["text", "tokens"]) self.assertGreater(builder.info.post_processing_size, 0) self.assertGreater( builder.info.post_processed.resources_checksums["train"]["tokenized_dataset"]["num_bytes"], 0 ) del dset dset = builder.as_dataset("train+test[:30%]") self.assertIsInstance(dset, Dataset) self.assertEqual(dset.split, "train+test[:30%]") self.assertEqual(len(dset), 13) self.assertDictEqual(dset.features, Features({"text": Value("string"), "tokens": List(Value("string"))})) self.assertListEqual(dset.column_names, ["text", "tokens"]) del dset dset = builder.as_dataset("all") self.assertIsInstance(dset, Dataset) self.assertEqual(dset.split, "train+test") self.assertEqual(len(dset), 20) self.assertDictEqual(dset.features, Features({"text": Value("string"), "tokens": List(Value("string"))})) self.assertListEqual(dset.column_names, ["text", "tokens"]) del dset def _post_process(self, dataset, resources_paths): return dataset.select([0, 1], keep_in_memory=True) with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyBuilder(cache_dir=tmp_dir) builder._post_process = types.MethodType(_post_process, builder) os.makedirs(builder.cache_dir) builder.info.splits = SplitDict() builder.info.splits.add(SplitInfo("train", num_examples=10)) builder.info.splits.add(SplitInfo("test", num_examples=10)) for split in builder.info.splits: with ArrowWriter( path=os.path.join(builder.cache_dir, f"{builder.dataset_name}-{split}.arrow"), features=Features({"text": Value("string")}), ) as writer: writer.write_batch({"text": ["foo"] * 10}) writer.finalize() with ArrowWriter( path=os.path.join(builder.cache_dir, f"small_dataset-{split}.arrow"), features=Features({"text": Value("string")}), ) as writer: writer.write_batch({"text": ["foo"] * 2}) writer.finalize() dsets = builder.as_dataset() self.assertIsInstance(dsets, DatasetDict) self.assertListEqual(list(dsets.keys()), ["train", "test"]) self.assertEqual(len(dsets["train"]), 2) self.assertEqual(len(dsets["test"]), 2) self.assertDictEqual(dsets["train"].features, Features({"text": Value("string")})) self.assertDictEqual(dsets["test"].features, Features({"text": Value("string")})) self.assertListEqual(dsets["train"].column_names, ["text"]) self.assertListEqual(dsets["test"].column_names, ["text"]) del dsets dset = builder.as_dataset("train") self.assertIsInstance(dset, Dataset) self.assertEqual(dset.split, "train") self.assertEqual(len(dset), 2) self.assertDictEqual(dset.features, Features({"text": Value("string")})) self.assertListEqual(dset.column_names, ["text"]) del dset dset = builder.as_dataset("train+test[:30%]") self.assertIsInstance(dset, Dataset) self.assertEqual(dset.split, "train+test[:30%]") self.assertEqual(len(dset), 2) self.assertDictEqual(dset.features, Features({"text": Value("string")})) self.assertListEqual(dset.column_names, ["text"]) del dset @require_faiss def test_as_dataset_with_post_process_with_index(self): def _post_process(self, dataset, resources_paths): if os.path.exists(resources_paths["index"]): dataset.load_faiss_index("my_index", resources_paths["index"]) return dataset else: dataset.add_faiss_index_from_external_arrays( external_arrays=np.ones((len(dataset), 8)), string_factory="Flat", index_name="my_index" ) dataset.save_faiss_index("my_index", resources_paths["index"]) return dataset def _post_processing_resources(self, split): return {"index": f"Flat-{split}.faiss"} with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyBuilder(cache_dir=tmp_dir) builder._post_process = types.MethodType(_post_process, builder) builder._post_processing_resources = types.MethodType(_post_processing_resources, builder) os.makedirs(builder.cache_dir) builder.info.splits = SplitDict() builder.info.splits.add(SplitInfo("train", num_examples=10)) builder.info.splits.add(SplitInfo("test", num_examples=10)) for split in builder.info.splits: with ArrowWriter( path=os.path.join(builder.cache_dir, f"{builder.dataset_name}-{split}.arrow"), features=Features({"text": Value("string")}), ) as writer: writer.write_batch({"text": ["foo"] * 10}) writer.finalize() with ArrowWriter( path=os.path.join(builder.cache_dir, f"small_dataset-{split}.arrow"), features=Features({"text": Value("string")}), ) as writer: writer.write_batch({"text": ["foo"] * 2}) writer.finalize() dsets = builder.as_dataset() self.assertIsInstance(dsets, DatasetDict) self.assertListEqual(list(dsets.keys()), ["train", "test"]) self.assertEqual(len(dsets["train"]), 10) self.assertEqual(len(dsets["test"]), 10) self.assertDictEqual(dsets["train"].features, Features({"text": Value("string")})) self.assertDictEqual(dsets["test"].features, Features({"text": Value("string")})) self.assertListEqual(dsets["train"].column_names, ["text"]) self.assertListEqual(dsets["test"].column_names, ["text"]) self.assertListEqual(dsets["train"].list_indexes(), ["my_index"]) self.assertListEqual(dsets["test"].list_indexes(), ["my_index"]) self.assertGreater(builder.info.post_processing_size, 0) self.assertGreater(builder.info.post_processed.resources_checksums["train"]["index"]["num_bytes"], 0) del dsets dset = builder.as_dataset("train") self.assertIsInstance(dset, Dataset) self.assertEqual(dset.split, "train") self.assertEqual(len(dset), 10) self.assertDictEqual(dset.features, Features({"text": Value("string")})) self.assertListEqual(dset.column_names, ["text"]) self.assertListEqual(dset.list_indexes(), ["my_index"]) del dset dset = builder.as_dataset("train+test[:30%]") self.assertIsInstance(dset, Dataset) self.assertEqual(dset.split, "train+test[:30%]") self.assertEqual(len(dset), 13) self.assertDictEqual(dset.features, Features({"text": Value("string")})) self.assertListEqual(dset.column_names, ["text"]) self.assertListEqual(dset.list_indexes(), ["my_index"]) del dset def test_download_and_prepare_with_post_process(self): def _post_process(self, dataset, resources_paths): def char_tokenize(example): return {"tokens": list(example["text"])} return dataset.map(char_tokenize, cache_file_name=resources_paths["tokenized_dataset"]) def _post_processing_resources(self, split): return {"tokenized_dataset": f"tokenized_dataset-{split}.arrow"} with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyBuilder(cache_dir=tmp_dir) builder.info.post_processed = PostProcessedInfo( features=Features({"text": Value("string"), "tokens": List(Value("string"))}) ) builder._post_process = types.MethodType(_post_process, builder) builder._post_processing_resources = types.MethodType(_post_processing_resources, builder) builder.download_and_prepare(download_mode=DownloadMode.FORCE_REDOWNLOAD) self.assertTrue( os.path.exists( os.path.join( tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow" ) ) ) self.assertDictEqual(builder.info.features, Features({"text": Value("string")})) self.assertDictEqual( builder.info.post_processed.features, Features({"text": Value("string"), "tokens": List(Value("string"))}), ) self.assertEqual(builder.info.splits["train"].num_examples, 100) self.assertTrue( os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) ) def _post_process(self, dataset, resources_paths): return dataset.select([0, 1], keep_in_memory=True) with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyBuilder(cache_dir=tmp_dir) builder._post_process = types.MethodType(_post_process, builder) builder.download_and_prepare(download_mode=DownloadMode.FORCE_REDOWNLOAD) self.assertTrue( os.path.exists( os.path.join( tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow" ) ) ) self.assertDictEqual(builder.info.features, Features({"text": Value("string")})) self.assertIsNone(builder.info.post_processed) self.assertEqual(builder.info.splits["train"].num_examples, 100) self.assertTrue( os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) ) def _post_process(self, dataset, resources_paths): if os.path.exists(resources_paths["index"]): dataset.load_faiss_index("my_index", resources_paths["index"]) return dataset else: dataset = dataset.add_faiss_index_from_external_arrays( external_arrays=np.ones((len(dataset), 8)), string_factory="Flat", index_name="my_index" ) dataset.save_faiss_index("my_index", resources_paths["index"]) return dataset def _post_processing_resources(self, split): return {"index": f"Flat-{split}.faiss"} with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyBuilder(cache_dir=tmp_dir) builder._post_process = types.MethodType(_post_process, builder) builder._post_processing_resources = types.MethodType(_post_processing_resources, builder) builder.download_and_prepare(download_mode=DownloadMode.FORCE_REDOWNLOAD) self.assertTrue( os.path.exists( os.path.join( tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow" ) ) ) self.assertDictEqual(builder.info.features, Features({"text": Value("string")})) self.assertIsNone(builder.info.post_processed) self.assertEqual(builder.info.splits["train"].num_examples, 100) self.assertTrue( os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) ) def test_error_download_and_prepare(self): def _prepare_split(self, split_generator, **kwargs): raise ValueError() with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyBuilder(cache_dir=tmp_dir) builder._prepare_split = types.MethodType(_prepare_split, builder) self.assertRaises( ValueError, builder.download_and_prepare, download_mode=DownloadMode.FORCE_REDOWNLOAD, ) self.assertRaises(FileNotFoundError, builder.as_dataset) def test_generator_based_download_and_prepare(self): with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir) builder.download_and_prepare(download_mode=DownloadMode.FORCE_REDOWNLOAD) self.assertTrue( os.path.exists( os.path.join( tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow", ) ) ) self.assertDictEqual(builder.info.features, Features({"text": Value("string")})) self.assertEqual(builder.info.splits["train"].num_examples, 100) self.assertTrue( os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) ) # Test that duplicated keys are ignored if verification_mode is "no_checks" with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir) with patch("datasets.builder.ArrowWriter", side_effect=ArrowWriter) as mock_arrow_writer: builder.download_and_prepare( download_mode=DownloadMode.FORCE_REDOWNLOAD, verification_mode=VerificationMode.NO_CHECKS ) mock_arrow_writer.assert_called_once() args, kwargs = mock_arrow_writer.call_args_list[0] self.assertFalse(kwargs["check_duplicates"]) mock_arrow_writer.reset_mock() builder.download_and_prepare( download_mode=DownloadMode.FORCE_REDOWNLOAD, verification_mode=VerificationMode.BASIC_CHECKS ) mock_arrow_writer.assert_called_once() args, kwargs = mock_arrow_writer.call_args_list[0] self.assertTrue(kwargs["check_duplicates"]) def test_cache_dir_no_args(self): with tempfile.TemporaryDirectory() as tmp_dir: builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_dir=None, data_files=None) relative_cache_dir_parts = Path(builder._relative_data_dir()).parts self.assertTupleEqual(relative_cache_dir_parts, (builder.dataset_name, "default", "0.0.0")) def test_cache_dir_for_data_files(self): with tempfile.TemporaryDirectory() as tmp_dir: dummy_data1 = os.path.join(tmp_dir, "dummy_data1.txt") with open(dummy_data1, "w", encoding="utf-8") as f: f.writelines("foo bar") dummy_data2 = os.path.join(tmp_dir, "dummy_data2.txt") with open(dummy_data2, "w", encoding="utf-8") as f: f.writelines("foo bar\n") builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=dummy_data1) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=dummy_data1) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data1]) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files={"train": dummy_data1}) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files={Split.TRAIN: dummy_data1}) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files={"train": [dummy_data1]}) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files={"test": dummy_data1}) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=dummy_data2) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data2]) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data1, dummy_data2]) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data1, dummy_data2]) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data1, dummy_data2]) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data2, dummy_data1]) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) builder = DummyGeneratorBasedBuilder( cache_dir=tmp_dir, data_files={"train": dummy_data1, "test": dummy_data2} ) other_builder = DummyGeneratorBasedBuilder( cache_dir=tmp_dir, data_files={"train": dummy_data1, "test": dummy_data2} ) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder( cache_dir=tmp_dir, data_files={"train": [dummy_data1], "test": dummy_data2} ) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder( cache_dir=tmp_dir, data_files={"train": dummy_data1, "validation": dummy_data2} ) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilder( cache_dir=tmp_dir, data_files={"train": [dummy_data1, dummy_data2], "test": dummy_data2}, ) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) def test_cache_dir_for_features(self): with tempfile.TemporaryDirectory() as tmp_dir: f1 = Features({"id": Value("int8")}) f2 = Features({"id": Value("int32")}) builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, features=f1) other_builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, features=f1) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, features=f2) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) def test_cache_dir_for_config_kwargs(self): with tempfile.TemporaryDirectory() as tmp_dir: # create config on the fly builder = DummyGeneratorBasedBuilderWithConfig(cache_dir=tmp_dir, content="foo", times=2) other_builder = DummyGeneratorBasedBuilderWithConfig(cache_dir=tmp_dir, times=2, content="foo") self.assertEqual(builder.cache_dir, other_builder.cache_dir) self.assertIn("content=foo", builder.cache_dir) self.assertIn("times=2", builder.cache_dir) other_builder = DummyGeneratorBasedBuilderWithConfig(cache_dir=tmp_dir, content="bar", times=2) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyGeneratorBasedBuilderWithConfig(cache_dir=tmp_dir, content="foo") self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) with tempfile.TemporaryDirectory() as tmp_dir: # overwrite an existing config builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a", content="foo", times=2) other_builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a", times=2, content="foo") self.assertEqual(builder.cache_dir, other_builder.cache_dir) self.assertIn("content=foo", builder.cache_dir) self.assertIn("times=2", builder.cache_dir) other_builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a", content="bar", times=2) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a", content="foo") self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) def test_config_names(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(ValueError) as error_context: DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, data_files=None, data_dir=None) self.assertIn("Please pick one among the available configs", str(error_context.exception)) builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a") self.assertEqual(builder.config.name, "a") builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="b") self.assertEqual(builder.config.name, "b") with self.assertRaises(ValueError): DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir) builder = DummyBuilderWithDefaultConfig(cache_dir=tmp_dir) self.assertEqual(builder.config.name, "a") def test_cache_dir_for_data_dir(self): with tempfile.TemporaryDirectory() as tmp_dir, tempfile.TemporaryDirectory() as data_dir: builder = DummyBuilderWithManualDownload(cache_dir=tmp_dir, config_name="a", data_dir=data_dir) other_builder = DummyBuilderWithManualDownload(cache_dir=tmp_dir, config_name="a", data_dir=data_dir) self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = DummyBuilderWithManualDownload(cache_dir=tmp_dir, config_name="a", data_dir=tmp_dir) self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) def test_cache_dir_for_configured_builder(self): with tempfile.TemporaryDirectory() as tmp_dir, tempfile.TemporaryDirectory() as data_dir: builder_cls = configure_builder_class( DummyBuilderWithManualDownload, builder_configs=[BuilderConfig(data_dir=data_dir)], default_config_name=None, dataset_name="dummy", ) builder = builder_cls(cache_dir=tmp_dir, hash="abc") other_builder = builder_cls(cache_dir=tmp_dir, hash="abc") self.assertEqual(builder.cache_dir, other_builder.cache_dir) other_builder = builder_cls(cache_dir=tmp_dir, hash="def") self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) def test_config_raises_when_invalid_name() -> None: with pytest.raises(InvalidConfigName, match="Bad characters"): _ = BuilderConfig(name="name-with-*-invalid-character") @pytest.mark.parametrize("data_files", ["str_path", ["str_path"], DataFilesList(["str_path"], [()])]) def test_config_raises_when_invalid_data_files(data_files) -> None: with pytest.raises(ValueError, match="Expected a DataFilesDict"): _ = BuilderConfig(name="name", data_files=data_files) def test_arrow_based_download_and_prepare(tmp_path): builder = DummyArrowBasedBuilder(cache_dir=tmp_path) builder.download_and_prepare() assert os.path.exists( os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow", ) ) assert builder.info.features, Features({"text": Value("string")}) assert builder.info.splits["train"].num_examples == 100 assert os.path.exists(os.path.join(tmp_path, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) @pytest.mark.parametrize( "split, expected_dataset_class, expected_dataset_length", [ (None, DatasetDict, 10), ("train", Dataset, 10), ("train+test[:30%]", Dataset, 13), ], ) @pytest.mark.parametrize("in_memory", [False, True]) def test_builder_as_dataset(split, expected_dataset_class, expected_dataset_length, in_memory, tmp_path): cache_dir = str(tmp_path) builder = DummyBuilder(cache_dir=cache_dir) os.makedirs(builder.cache_dir) builder.info.splits = SplitDict() builder.info.splits.add(SplitInfo("train", num_examples=10)) builder.info.splits.add(SplitInfo("test", num_examples=10)) for info_split in builder.info.splits: with ArrowWriter( path=os.path.join(builder.cache_dir, f"{builder.dataset_name}-{info_split}.arrow"), features=Features({"text": Value("string")}), ) as writer: writer.write_batch({"text": ["foo"] * 10}) writer.finalize() with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase(): dataset = builder.as_dataset(split=split, in_memory=in_memory) assert isinstance(dataset, expected_dataset_class) if isinstance(dataset, DatasetDict): assert list(dataset.keys()) == ["train", "test"] datasets = dataset.values() expected_splits = ["train", "test"] elif isinstance(dataset, Dataset): datasets = [dataset] expected_splits = [split] for dataset, expected_split in zip(datasets, expected_splits): assert dataset.split == expected_split assert len(dataset) == expected_dataset_length assert dataset.features == Features({"text": Value("string")}) dataset.column_names == ["text"] @pytest.mark.parametrize("in_memory", [False, True]) def test_generator_based_builder_as_dataset(in_memory, tmp_path): cache_dir = tmp_path / "data" cache_dir.mkdir() cache_dir = str(cache_dir) builder = DummyGeneratorBasedBuilder(cache_dir=cache_dir) builder.download_and_prepare(download_mode=DownloadMode.FORCE_REDOWNLOAD) with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase(): dataset = builder.as_dataset("train", in_memory=in_memory) assert dataset.data.to_pydict() == {"text": ["foo"] * 100} @pytest.mark.parametrize( "writer_batch_size, default_writer_batch_size, expected_chunks", [(None, None, 1), (None, 5, 20), (10, None, 10)] ) def test_custom_writer_batch_size(tmp_path, writer_batch_size, default_writer_batch_size, expected_chunks): cache_dir = str(tmp_path) if default_writer_batch_size: DummyGeneratorBasedBuilder.DEFAULT_WRITER_BATCH_SIZE = default_writer_batch_size builder = DummyGeneratorBasedBuilder(cache_dir=cache_dir, writer_batch_size=writer_batch_size) assert builder._writer_batch_size == (writer_batch_size or default_writer_batch_size) builder.download_and_prepare(download_mode=DownloadMode.FORCE_REDOWNLOAD) dataset = builder.as_dataset("train") assert len(dataset.data[0].chunks) == expected_chunks def test_builder_as_streaming_dataset(tmp_path): dummy_builder = DummyGeneratorBasedBuilder(cache_dir=str(tmp_path)) check_streaming(dummy_builder) dsets = dummy_builder.as_streaming_dataset() assert isinstance(dsets, IterableDatasetDict) assert isinstance(dsets["train"], IterableDataset) assert len(list(dsets["train"])) == 100 dset = dummy_builder.as_streaming_dataset(split="train") assert isinstance(dset, IterableDataset) assert len(list(dset)) == 100 def _run_test_builder_streaming_works_in_subprocesses(builder): check_streaming(builder) dset = builder.as_streaming_dataset(split="train") assert isinstance(dset, IterableDataset) assert len(list(dset)) == 100 def test_builder_streaming_works_in_subprocess(tmp_path): dummy_builder = DummyGeneratorBasedBuilder(cache_dir=str(tmp_path)) p = Process(target=_run_test_builder_streaming_works_in_subprocesses, args=(dummy_builder,)) p.start() p.join() class DummyBuilderWithVersion(GeneratorBasedBuilder): VERSION = "2.0.0" def _info(self): return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): pass def _generate_examples(self): pass class DummyBuilderWithBuilderConfigs(GeneratorBasedBuilder): BUILDER_CONFIGS = [BuilderConfig(name="custom", version="2.0.0")] def _info(self): return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): pass def _generate_examples(self): pass class CustomBuilderConfig(BuilderConfig): def __init__(self, date=None, language=None, version="2.0.0", **kwargs): name = f"{date}.{language}" super().__init__(name=name, version=version, **kwargs) self.date = date self.language = language class DummyBuilderWithCustomBuilderConfigs(GeneratorBasedBuilder): BUILDER_CONFIGS = [CustomBuilderConfig(date="20220501", language="en")] BUILDER_CONFIG_CLASS = CustomBuilderConfig def _info(self): return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): pass def _generate_examples(self): pass @pytest.mark.parametrize( "builder_class, kwargs", [ (DummyBuilderWithVersion, {}), (DummyBuilderWithBuilderConfigs, {"config_name": "custom"}), (DummyBuilderWithCustomBuilderConfigs, {"config_name": "20220501.en"}), (DummyBuilderWithCustomBuilderConfigs, {"date": "20220501", "language": "ca"}), ], ) def test_builder_config_version(builder_class, kwargs, tmp_path): cache_dir = str(tmp_path) builder = builder_class(cache_dir=cache_dir, **kwargs) assert builder.config.version == "2.0.0" def test_builder_download_and_prepare_with_absolute_output_dir(tmp_path): builder = DummyGeneratorBasedBuilder() output_dir = str(tmp_path) builder.download_and_prepare(output_dir) assert builder._output_dir.startswith(tmp_path.resolve().as_posix()) assert os.path.exists(os.path.join(output_dir, "dataset_info.json")) assert os.path.exists(os.path.join(output_dir, f"{builder.dataset_name}-train.arrow")) assert not os.path.exists(os.path.join(output_dir + ".incomplete")) def test_builder_download_and_prepare_with_relative_output_dir(): with set_current_working_directory_to_temp_dir(): builder = DummyGeneratorBasedBuilder() output_dir = "test-out" builder.download_and_prepare(output_dir) assert Path(builder._output_dir).resolve().as_posix().startswith(Path(output_dir).resolve().as_posix()) assert os.path.exists(os.path.join(output_dir, "dataset_info.json")) assert os.path.exists(os.path.join(output_dir, f"{builder.dataset_name}-train.arrow")) assert not os.path.exists(os.path.join(output_dir + ".incomplete")) def test_builder_with_filesystem_download_and_prepare(tmp_path, mockfs): builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path) builder.download_and_prepare("mock://my_dataset", storage_options=mockfs.storage_options) assert builder._output_dir.startswith("mock://my_dataset") assert is_local_path(builder._cache_downloaded_dir) assert isinstance(builder._fs, type(mockfs)) assert builder._fs.storage_options == mockfs.storage_options assert mockfs.exists("my_dataset/dataset_info.json") assert mockfs.exists(f"my_dataset/{builder.dataset_name}-train.arrow") assert not mockfs.exists("my_dataset.incomplete") def test_builder_with_filesystem_download_and_prepare_reload(tmp_path, mockfs, caplog): builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path) mockfs.makedirs("my_dataset") DatasetInfo().write_to_directory("mock://my_dataset", storage_options=mockfs.storage_options) mockfs.touch(f"my_dataset/{builder.dataset_name}-train.arrow") caplog.clear() with caplog.at_level(INFO, logger=get_logger().name): builder.download_and_prepare("mock://my_dataset", storage_options=mockfs.storage_options) assert "Found cached dataset" in caplog.text def test_generator_based_builder_download_and_prepare_as_parquet(tmp_path): builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path) builder.download_and_prepare(file_format="parquet") assert builder.info.splits["train"].num_examples == 100 parquet_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.parquet" ) assert os.path.exists(parquet_path) assert pq.ParquetFile(parquet_path) is not None def test_generator_based_builder_download_and_prepare_sharded(tmp_path): writer_batch_size = 25 builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path, writer_batch_size=writer_batch_size) with patch("datasets.config.MAX_SHARD_SIZE", 1): # one batch per shard builder.download_and_prepare(file_format="parquet") expected_num_shards = 100 // writer_batch_size assert builder.info.splits["train"].num_examples == 100 parquet_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.parquet", ) assert os.path.exists(parquet_path) parquet_files = [ pq.ParquetFile(parquet_path) for parquet_path in Path(tmp_path).rglob( f"{builder.dataset_name}-train-*-of-{expected_num_shards:05d}.parquet" ) ] assert len(parquet_files) == expected_num_shards assert sum(parquet_file.metadata.num_rows for parquet_file in parquet_files) == 100 def test_generator_based_builder_download_and_prepare_with_max_shard_size(tmp_path): writer_batch_size = 25 builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path, writer_batch_size=writer_batch_size) builder.download_and_prepare(file_format="parquet", max_shard_size=1) # one batch per shard expected_num_shards = 100 // writer_batch_size assert builder.info.splits["train"].num_examples == 100 parquet_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.parquet", ) assert os.path.exists(parquet_path) parquet_files = [ pq.ParquetFile(parquet_path) for parquet_path in Path(tmp_path).rglob( f"{builder.dataset_name}-train-*-of-{expected_num_shards:05d}.parquet" ) ] assert len(parquet_files) == expected_num_shards assert sum(parquet_file.metadata.num_rows for parquet_file in parquet_files) == 100 def test_generator_based_builder_download_and_prepare_with_num_proc(tmp_path): builder = DummyGeneratorBasedBuilderWithShards(cache_dir=tmp_path) builder.download_and_prepare(num_proc=2) expected_num_shards = 2 assert builder.info.splits["train"].num_examples == 400 assert builder.info.splits["train"].shard_lengths == [200, 200] arrow_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.arrow", ) assert os.path.exists(arrow_path) ds = builder.as_dataset("train") assert len(ds) == 400 assert ds.to_dict() == { "id": [i for _ in range(4) for i in range(100)], "filepath": [f"data{i}.txt" for i in range(4) for _ in range(100)], } @pytest.mark.parametrize( "num_proc, expectation", [(None, does_not_raise()), (1, does_not_raise()), (2, pytest.raises(RuntimeError))] ) def test_generator_based_builder_download_and_prepare_with_ambiguous_shards(num_proc, expectation, tmp_path): builder = DummyGeneratorBasedBuilderWithAmbiguousShards(cache_dir=tmp_path) with expectation: builder.download_and_prepare(num_proc=num_proc) def test_arrow_based_builder_download_and_prepare_as_parquet(tmp_path): builder = DummyArrowBasedBuilder(cache_dir=tmp_path) builder.download_and_prepare(file_format="parquet") assert builder.info.splits["train"].num_examples == 100 parquet_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.parquet" ) assert os.path.exists(parquet_path) assert pq.ParquetFile(parquet_path) is not None def test_arrow_based_builder_download_and_prepare_sharded(tmp_path): builder = DummyArrowBasedBuilder(cache_dir=tmp_path) with patch("datasets.config.MAX_SHARD_SIZE", 1): # one batch per shard builder.download_and_prepare(file_format="parquet") expected_num_shards = 10 assert builder.info.splits["train"].num_examples == 100 parquet_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.parquet", ) assert os.path.exists(parquet_path) parquet_files = [ pq.ParquetFile(parquet_path) for parquet_path in Path(tmp_path).rglob( f"{builder.dataset_name}-train-*-of-{expected_num_shards:05d}.parquet" ) ] assert len(parquet_files) == expected_num_shards assert sum(parquet_file.metadata.num_rows for parquet_file in parquet_files) == 100 def test_arrow_based_builder_download_and_prepare_with_max_shard_size(tmp_path): builder = DummyArrowBasedBuilder(cache_dir=tmp_path) builder.download_and_prepare(file_format="parquet", max_shard_size=1) # one table per shard expected_num_shards = 10 assert builder.info.splits["train"].num_examples == 100 parquet_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.parquet", ) assert os.path.exists(parquet_path) parquet_files = [ pq.ParquetFile(parquet_path) for parquet_path in Path(tmp_path).rglob( f"{builder.dataset_name}-train-*-of-{expected_num_shards:05d}.parquet" ) ] assert len(parquet_files) == expected_num_shards assert sum(parquet_file.metadata.num_rows for parquet_file in parquet_files) == 100 def test_arrow_based_builder_download_and_prepare_with_num_proc(tmp_path): builder = DummyArrowBasedBuilderWithShards(cache_dir=tmp_path) builder.download_and_prepare(num_proc=2) expected_num_shards = 2 assert builder.info.splits["train"].num_examples == 400 assert builder.info.splits["train"].shard_lengths == [200, 200] arrow_path = os.path.join( tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.arrow", ) assert os.path.exists(arrow_path) ds = builder.as_dataset("train") assert len(ds) == 400 assert ds.to_dict() == { "id": [i for _ in range(4) for i in range(100)], "filepath": [f"data{i}.txt" for i in range(4) for _ in range(100)], } @pytest.mark.parametrize( "num_proc, expectation", [(None, does_not_raise()), (1, does_not_raise()), (2, pytest.raises(RuntimeError))] ) def test_arrow_based_builder_download_and_prepare_with_ambiguous_shards(num_proc, expectation, tmp_path): builder = DummyArrowBasedBuilderWithAmbiguousShards(cache_dir=tmp_path) with expectation: builder.download_and_prepare(num_proc=num_proc)
datasets/tests/test_builder.py/0
{ "file_path": "datasets/tests/test_builder.py", "repo_id": "datasets", "token_count": 25362 }
109
import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("dataset_size", [None, 400 * 2**20, 600 * 2**20]) @pytest.mark.parametrize("input_in_memory_max_size", ["default", 0, 100 * 2**20, 900 * 2**20]) def test_is_small_dataset(dataset_size, input_in_memory_max_size, monkeypatch): if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config, "IN_MEMORY_MAX_SIZE", input_in_memory_max_size) in_memory_max_size = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: expected = dataset_size < in_memory_max_size else: expected = False result = is_small_dataset(dataset_size) assert result == expected
datasets/tests/test_info_utils.py/0
{ "file_path": "datasets/tests/test_info_utils.py", "repo_id": "datasets", "token_count": 366 }
110
import pytest from datasets.utils.version import Version @pytest.mark.parametrize( "other, expected_equality", [ (Version("1.0.0"), True), ("1.0.0", True), (Version("2.0.0"), False), ("2.0.0", False), ("1", False), ("a", False), (1, False), (None, False), ], ) def test_version_equality_and_hash(other, expected_equality): version = Version("1.0.0") assert (version == other) is expected_equality assert (version != other) is not expected_equality assert (hash(version) == hash(other)) is expected_equality
datasets/tests/test_version.py/0
{ "file_path": "datasets/tests/test_version.py", "repo_id": "datasets", "token_count": 254 }
111
from functools import partial import torch from benchmarking_utils import BenchmarkMixin, BenchmarkScenario, model_init_fn from diffusers import BitsAndBytesConfig, FluxTransformer2DModel from diffusers.utils.testing_utils import torch_device CKPT_ID = "black-forest-labs/FLUX.1-dev" RESULT_FILENAME = "flux.csv" def get_input_dict(**device_dtype_kwargs): # resolution: 1024x1024 # maximum sequence length 512 hidden_states = torch.randn(1, 4096, 64, **device_dtype_kwargs) encoder_hidden_states = torch.randn(1, 512, 4096, **device_dtype_kwargs) pooled_prompt_embeds = torch.randn(1, 768, **device_dtype_kwargs) image_ids = torch.ones(512, 3, **device_dtype_kwargs) text_ids = torch.ones(4096, 3, **device_dtype_kwargs) timestep = torch.tensor([1.0], **device_dtype_kwargs) guidance = torch.tensor([1.0], **device_dtype_kwargs) return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "img_ids": image_ids, "txt_ids": text_ids, "pooled_projections": pooled_prompt_embeds, "timestep": timestep, "guidance": guidance, } if __name__ == "__main__": scenarios = [ BenchmarkScenario( name=f"{CKPT_ID}-bf16", model_cls=FluxTransformer2DModel, model_init_kwargs={ "pretrained_model_name_or_path": CKPT_ID, "torch_dtype": torch.bfloat16, "subfolder": "transformer", }, get_model_input_dict=partial(get_input_dict, device=torch_device, dtype=torch.bfloat16), model_init_fn=model_init_fn, compile_kwargs={"fullgraph": True}, ), BenchmarkScenario( name=f"{CKPT_ID}-bnb-nf4", model_cls=FluxTransformer2DModel, model_init_kwargs={ "pretrained_model_name_or_path": CKPT_ID, "torch_dtype": torch.bfloat16, "subfolder": "transformer", "quantization_config": BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_quant_type="nf4" ), }, get_model_input_dict=partial(get_input_dict, device=torch_device, dtype=torch.bfloat16), model_init_fn=model_init_fn, ), BenchmarkScenario( name=f"{CKPT_ID}-layerwise-upcasting", model_cls=FluxTransformer2DModel, model_init_kwargs={ "pretrained_model_name_or_path": CKPT_ID, "torch_dtype": torch.bfloat16, "subfolder": "transformer", }, get_model_input_dict=partial(get_input_dict, device=torch_device, dtype=torch.bfloat16), model_init_fn=partial(model_init_fn, layerwise_upcasting=True), ), BenchmarkScenario( name=f"{CKPT_ID}-group-offload-leaf", model_cls=FluxTransformer2DModel, model_init_kwargs={ "pretrained_model_name_or_path": CKPT_ID, "torch_dtype": torch.bfloat16, "subfolder": "transformer", }, get_model_input_dict=partial(get_input_dict, device=torch_device, dtype=torch.bfloat16), model_init_fn=partial( model_init_fn, group_offload_kwargs={ "onload_device": torch_device, "offload_device": torch.device("cpu"), "offload_type": "leaf_level", "use_stream": True, "non_blocking": True, }, ), ), ] runner = BenchmarkMixin() runner.run_bencmarks_and_collate(scenarios, filename=RESULT_FILENAME)
diffusers/benchmarks/benchmarking_flux.py/0
{ "file_path": "diffusers/benchmarks/benchmarking_flux.py", "repo_id": "diffusers", "token_count": 1946 }
112
FROM nvidia/cuda:12.1.0-runtime-ubuntu20.04 LABEL maintainer="Hugging Face" LABEL repository="diffusers" ENV DEBIAN_FRONTEND=noninteractive ENV MINIMUM_SUPPORTED_TORCH_VERSION="2.1.0" ENV MINIMUM_SUPPORTED_TORCHVISION_VERSION="0.16.0" ENV MINIMUM_SUPPORTED_TORCHAUDIO_VERSION="2.1.0" RUN apt-get -y update \ && apt-get install -y software-properties-common \ && add-apt-repository ppa:deadsnakes/ppa RUN apt install -y bash \ build-essential \ git \ git-lfs \ curl \ ca-certificates \ libsndfile1-dev \ libgl1 \ python3.10 \ python3.10-dev \ python3-pip \ python3.10-venv && \ rm -rf /var/lib/apt/lists # make sure to use venv RUN python3.10 -m venv /opt/venv ENV PATH="/opt/venv/bin:$PATH" # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \ python3.10 -m uv pip install --no-cache-dir \ torch==$MINIMUM_SUPPORTED_TORCH_VERSION \ torchvision==$MINIMUM_SUPPORTED_TORCHVISION_VERSION \ torchaudio==$MINIMUM_SUPPORTED_TORCHAUDIO_VERSION \ invisible_watermark && \ python3.10 -m pip install --no-cache-dir \ accelerate \ datasets \ hf-doc-builder \ huggingface-hub \ hf_transfer \ Jinja2 \ librosa \ numpy==1.26.4 \ scipy \ tensorboard \ transformers \ hf_transfer CMD ["/bin/bash"]
diffusers/docker/diffusers-pytorch-minimum-cuda/Dockerfile/0
{ "file_path": "diffusers/docker/diffusers-pytorch-minimum-cuda/Dockerfile", "repo_id": "diffusers", "token_count": 622 }
113
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Single files The [`~loaders.FromSingleFileMixin.from_single_file`] method allows you to load: * a model stored in a single file, which is useful if you're working with models from the diffusion ecosystem, like Automatic1111, and commonly rely on a single-file layout to store and share models * a model stored in their originally distributed layout, which is useful if you're working with models finetuned with other services, and want to load it directly into Diffusers model objects and pipelines > [!TIP] > Read the [Model files and layouts](../../using-diffusers/other-formats) guide to learn more about the Diffusers-multifolder layout versus the single-file layout, and how to load models stored in these different layouts. ## Supported pipelines - [`StableDiffusionPipeline`] - [`StableDiffusionImg2ImgPipeline`] - [`StableDiffusionInpaintPipeline`] - [`StableDiffusionControlNetPipeline`] - [`StableDiffusionControlNetImg2ImgPipeline`] - [`StableDiffusionControlNetInpaintPipeline`] - [`StableDiffusionUpscalePipeline`] - [`StableDiffusionXLPipeline`] - [`StableDiffusionXLImg2ImgPipeline`] - [`StableDiffusionXLInpaintPipeline`] - [`StableDiffusionXLInstructPix2PixPipeline`] - [`StableDiffusionXLControlNetPipeline`] - [`StableDiffusionXLKDiffusionPipeline`] - [`StableDiffusion3Pipeline`] - [`LatentConsistencyModelPipeline`] - [`LatentConsistencyModelImg2ImgPipeline`] - [`StableDiffusionControlNetXSPipeline`] - [`StableDiffusionXLControlNetXSPipeline`] - [`LEditsPPPipelineStableDiffusion`] - [`LEditsPPPipelineStableDiffusionXL`] - [`PIAPipeline`] ## Supported models - [`UNet2DConditionModel`] - [`StableCascadeUNet`] - [`AutoencoderKL`] - [`ControlNetModel`] - [`SD3Transformer2DModel`] - [`FluxTransformer2DModel`] ## FromSingleFileMixin [[autodoc]] loaders.single_file.FromSingleFileMixin ## FromOriginalModelMixin [[autodoc]] loaders.single_file_model.FromOriginalModelMixin
diffusers/docs/source/en/api/loaders/single_file.md/0
{ "file_path": "diffusers/docs/source/en/api/loaders/single_file.md", "repo_id": "diffusers", "token_count": 804 }
114
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # AutoencoderKLCogVideoX The 3D variational autoencoder (VAE) model with KL loss used in [CogVideoX](https://github.com/THUDM/CogVideo) was introduced in [CogVideoX: Text-to-Video Diffusion Models with An Expert Transformer](https://github.com/THUDM/CogVideo/blob/main/resources/CogVideoX.pdf) by Tsinghua University & ZhipuAI. The model can be loaded with the following code snippet. ```python from diffusers import AutoencoderKLCogVideoX vae = AutoencoderKLCogVideoX.from_pretrained("THUDM/CogVideoX-2b", subfolder="vae", torch_dtype=torch.float16).to("cuda") ``` ## AutoencoderKLCogVideoX [[autodoc]] AutoencoderKLCogVideoX - decode - encode - all ## AutoencoderKLOutput [[autodoc]] models.autoencoders.autoencoder_kl.AutoencoderKLOutput ## DecoderOutput [[autodoc]] models.autoencoders.vae.DecoderOutput
diffusers/docs/source/en/api/models/autoencoderkl_cogvideox.md/0
{ "file_path": "diffusers/docs/source/en/api/models/autoencoderkl_cogvideox.md", "repo_id": "diffusers", "token_count": 450 }
115
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ControlNet with Stable Diffusion 3 <div class="flex flex-wrap space-x-1"> <img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/> </div> StableDiffusion3ControlNetPipeline is an implementation of ControlNet for Stable Diffusion 3. ControlNet was introduced in [Adding Conditional Control to Text-to-Image Diffusion Models](https://huggingface.co/papers/2302.05543) by Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. With a ControlNet model, you can provide an additional control image to condition and control Stable Diffusion generation. For example, if you provide a depth map, the ControlNet model generates an image that'll preserve the spatial information from the depth map. It is a more flexible and accurate way to control the image generation process. The abstract from the paper is: *We present ControlNet, a neural network architecture to add spatial conditioning controls to large, pretrained text-to-image diffusion models. ControlNet locks the production-ready large diffusion models, and reuses their deep and robust encoding layers pretrained with billions of images as a strong backbone to learn a diverse set of conditional controls. The neural architecture is connected with "zero convolutions" (zero-initialized convolution layers) that progressively grow the parameters from zero and ensure that no harmful noise could affect the finetuning. We test various conditioning controls, eg, edges, depth, segmentation, human pose, etc, with Stable Diffusion, using single or multiple conditions, with or without prompts. We show that the training of ControlNets is robust with small (<50k) and large (>1m) datasets. Extensive results show that ControlNet may facilitate wider applications to control image diffusion models.* This controlnet code is mainly implemented by [The InstantX Team](https://huggingface.co/InstantX). The inpainting-related code was developed by [The Alimama Creative Team](https://huggingface.co/alimama-creative). You can find pre-trained checkpoints for SD3-ControlNet in the table below: | ControlNet type | Developer | Link | | -------- | ---------- | ---- | | Canny | [The InstantX Team](https://huggingface.co/InstantX) | [Link](https://huggingface.co/InstantX/SD3-Controlnet-Canny) | | Depth | [The InstantX Team](https://huggingface.co/InstantX) | [Link](https://huggingface.co/InstantX/SD3-Controlnet-Depth) | | Pose | [The InstantX Team](https://huggingface.co/InstantX) | [Link](https://huggingface.co/InstantX/SD3-Controlnet-Pose) | | Tile | [The InstantX Team](https://huggingface.co/InstantX) | [Link](https://huggingface.co/InstantX/SD3-Controlnet-Tile) | | Inpainting | [The AlimamaCreative Team](https://huggingface.co/alimama-creative) | [link](https://huggingface.co/alimama-creative/SD3-Controlnet-Inpainting) | <Tip> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. </Tip> ## StableDiffusion3ControlNetPipeline [[autodoc]] StableDiffusion3ControlNetPipeline - all - __call__ ## StableDiffusion3ControlNetInpaintingPipeline [[autodoc]] pipelines.controlnet_sd3.pipeline_stable_diffusion_3_controlnet_inpainting.StableDiffusion3ControlNetInpaintingPipeline - all - __call__ ## StableDiffusion3PipelineOutput [[autodoc]] pipelines.stable_diffusion_3.pipeline_output.StableDiffusion3PipelineOutput
diffusers/docs/source/en/api/pipelines/controlnet_sd3.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/controlnet_sd3.md", "repo_id": "diffusers", "token_count": 1150 }
116
<!-- Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. --> <div style="float: right;"> <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/docs/diffusers/main/en/tutorials/using_peft_for_inference" target="_blank" rel="noopener"> <img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/> </a> </div> </div> # HunyuanVideo [HunyuanVideo](https://huggingface.co/papers/2412.03603) is a 13B parameter diffusion transformer model designed to be competitive with closed-source video foundation models and enable wider community access. This model uses a "dual-stream to single-stream" architecture to separately process the video and text tokens first, before concatenating and feeding them to the transformer to fuse the multimodal information. A pretrained multimodal large language model (MLLM) is used as the encoder because it has better image-text alignment, better image detail description and reasoning, and it can be used as a zero-shot learner if system instructions are added to user prompts. Finally, HunyuanVideo uses a 3D causal variational autoencoder to more efficiently process video data at the original resolution and frame rate. You can find all the original HunyuanVideo checkpoints under the [Tencent](https://huggingface.co/tencent) organization. > [!TIP] > Click on the HunyuanVideo models in the right sidebar for more examples of video generation tasks. > > The examples below use a checkpoint from [hunyuanvideo-community](https://huggingface.co/hunyuanvideo-community) because the weights are stored in a layout compatible with Diffusers. The example below demonstrates how to generate a video optimized for memory or inference speed. <hfoptions id="usage"> <hfoption id="memory"> Refer to the [Reduce memory usage](../../optimization/memory) guide for more details about the various memory saving techniques. The quantized HunyuanVideo model below requires ~14GB of VRAM. ```py import torch from diffusers import AutoModel, HunyuanVideoPipeline from diffusers.quantizers import PipelineQuantizationConfig from diffusers.utils import export_to_video # quantize weights to int4 with bitsandbytes pipeline_quant_config = PipelineQuantizationConfig( quant_backend="bitsandbytes_4bit", quant_kwargs={ "load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16 }, components_to_quantize=["transformer"] ) pipeline = HunyuanVideoPipeline.from_pretrained( "hunyuanvideo-community/HunyuanVideo", quantization_config=pipeline_quant_config, torch_dtype=torch.bfloat16, ) # model-offloading and tiling pipeline.enable_model_cpu_offload() pipeline.vae.enable_tiling() prompt = "A fluffy teddy bear sits on a bed of soft pillows surrounded by children's toys." video = pipeline(prompt=prompt, num_frames=61, num_inference_steps=30).frames[0] export_to_video(video, "output.mp4", fps=15) ``` </hfoption> <hfoption id="inference speed"> [Compilation](../../optimization/fp16#torchcompile) is slow the first time but subsequent calls to the pipeline are faster. ```py import torch from diffusers import AutoModel, HunyuanVideoPipeline from diffusers.quantizers import PipelineQuantizationConfig from diffusers.utils import export_to_video # quantize weights to int4 with bitsandbytes pipeline_quant_config = PipelineQuantizationConfig( quant_backend="bitsandbytes_4bit", quant_kwargs={ "load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16 }, components_to_quantize=["transformer"] ) pipeline = HunyuanVideoPipeline.from_pretrained( "hunyuanvideo-community/HunyuanVideo", quantization_config=pipeline_quant_config, torch_dtype=torch.bfloat16, ) # model-offloading and tiling pipeline.enable_model_cpu_offload() pipeline.vae.enable_tiling() # torch.compile pipeline.transformer.to(memory_format=torch.channels_last) pipeline.transformer = torch.compile( pipeline.transformer, mode="max-autotune", fullgraph=True ) prompt = "A fluffy teddy bear sits on a bed of soft pillows surrounded by children's toys." video = pipeline(prompt=prompt, num_frames=61, num_inference_steps=30).frames[0] export_to_video(video, "output.mp4", fps=15) ``` </hfoption> </hfoptions> ## Notes - HunyuanVideo supports LoRAs with [`~loaders.HunyuanVideoLoraLoaderMixin.load_lora_weights`]. <details> <summary>Show example code</summary> ```py import torch from diffusers import AutoModel, HunyuanVideoPipeline from diffusers.quantizers import PipelineQuantizationConfig from diffusers.utils import export_to_video # quantize weights to int4 with bitsandbytes pipeline_quant_config = PipelineQuantizationConfig( quant_backend="bitsandbytes_4bit", quant_kwargs={ "load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16 }, components_to_quantize=["transformer"] ) pipeline = HunyuanVideoPipeline.from_pretrained( "hunyuanvideo-community/HunyuanVideo", quantization_config=pipeline_quant_config, torch_dtype=torch.bfloat16, ) # load LoRA weights pipeline.load_lora_weights("https://huggingface.co/lucataco/hunyuan-steamboat-willie-10", adapter_name="steamboat-willie") pipeline.set_adapters("steamboat-willie", 0.9) # model-offloading and tiling pipeline.enable_model_cpu_offload() pipeline.vae.enable_tiling() # use "In the style of SWR" to trigger the LoRA prompt = """ In the style of SWR. A black and white animated scene featuring a fluffy teddy bear sits on a bed of soft pillows surrounded by children's toys. """ video = pipeline(prompt=prompt, num_frames=61, num_inference_steps=30).frames[0] export_to_video(video, "output.mp4", fps=15) ``` </details> - Refer to the table below for recommended inference values. | parameter | recommended value | |---|---| | text encoder dtype | `torch.float16` | | transformer dtype | `torch.bfloat16` | | vae dtype | `torch.float16` | | `num_frames (k)` | 4 * `k` + 1 | - Try lower `shift` values (`2.0` to `5.0`) for lower resolution videos and higher `shift` values (`7.0` to `12.0`) for higher resolution images. ## HunyuanVideoPipeline [[autodoc]] HunyuanVideoPipeline - all - __call__ ## HunyuanVideoPipelineOutput [[autodoc]] pipelines.hunyuan_video.pipeline_output.HunyuanVideoPipelineOutput
diffusers/docs/source/en/api/pipelines/hunyuan_video.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/hunyuan_video.md", "repo_id": "diffusers", "token_count": 2320 }
117
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ConsistencyDecoderScheduler This scheduler is a part of the [`ConsistencyDecoderPipeline`] and was introduced in [DALL-E 3](https://openai.com/dall-e-3). The original codebase can be found at [openai/consistency_models](https://github.com/openai/consistency_models). ## ConsistencyDecoderScheduler [[autodoc]] schedulers.scheduling_consistency_decoder.ConsistencyDecoderScheduler
diffusers/docs/source/en/api/schedulers/consistency_decoder.md/0
{ "file_path": "diffusers/docs/source/en/api/schedulers/consistency_decoder.md", "repo_id": "diffusers", "token_count": 274 }
118
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # HeunDiscreteScheduler The Heun scheduler (Algorithm 1) is from the [Elucidating the Design Space of Diffusion-Based Generative Models](https://huggingface.co/papers/2206.00364) paper by Karras et al. The scheduler is ported from the [k-diffusion](https://github.com/crowsonkb/k-diffusion) library and created by [Katherine Crowson](https://github.com/crowsonkb/). ## HeunDiscreteScheduler [[autodoc]] HeunDiscreteScheduler ## SchedulerOutput [[autodoc]] schedulers.scheduling_utils.SchedulerOutput
diffusers/docs/source/en/api/schedulers/heun.md/0
{ "file_path": "diffusers/docs/source/en/api/schedulers/heun.md", "repo_id": "diffusers", "token_count": 306 }
119
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # AutoPipelineBlocks [`~modular_pipelines.AutoPipelineBlocks`] are a multi-block type containing blocks that support different workflows. It automatically selects which sub-blocks to run based on the input provided at runtime. This is typically used to package multiple workflows - text-to-image, image-to-image, inpaint - into a single pipeline for convenience. This guide shows how to create [`~modular_pipelines.AutoPipelineBlocks`]. Create three [`~modular_pipelines.ModularPipelineBlocks`] for text-to-image, image-to-image, and inpainting. These represent the different workflows available in the pipeline. <hfoptions id="auto"> <hfoption id="text-to-image"> ```py import torch from diffusers.modular_pipelines import ModularPipelineBlocks, InputParam, OutputParam class TextToImageBlock(ModularPipelineBlocks): model_name = "text2img" @property def inputs(self): return [InputParam(name="prompt")] @property def intermediate_outputs(self): return [] @property def description(self): return "I'm a text-to-image workflow!" def __call__(self, components, state): block_state = self.get_block_state(state) print("running the text-to-image workflow") # Add your text-to-image logic here # For example: generate image from prompt self.set_block_state(state, block_state) return components, state ``` </hfoption> <hfoption id="image-to-image"> ```py class ImageToImageBlock(ModularPipelineBlocks): model_name = "img2img" @property def inputs(self): return [InputParam(name="prompt"), InputParam(name="image")] @property def intermediate_outputs(self): return [] @property def description(self): return "I'm an image-to-image workflow!" def __call__(self, components, state): block_state = self.get_block_state(state) print("running the image-to-image workflow") # Add your image-to-image logic here # For example: transform input image based on prompt self.set_block_state(state, block_state) return components, state ``` </hfoption> <hfoption id="inpaint"> ```py class InpaintBlock(ModularPipelineBlocks): model_name = "inpaint" @property def inputs(self): return [InputParam(name="prompt"), InputParam(name="image"), InputParam(name="mask")] @property def intermediate_outputs(self): return [] @property def description(self): return "I'm an inpaint workflow!" def __call__(self, components, state): block_state = self.get_block_state(state) print("running the inpaint workflow") # Add your inpainting logic here # For example: fill masked areas based on prompt self.set_block_state(state, block_state) return components, state ``` </hfoption> </hfoptions> Create an [`~modular_pipelines.AutoPipelineBlocks`] class that includes a list of the sub-block classes and their corresponding block names. You also need to include `block_trigger_inputs`, a list of input names that trigger the corresponding block. If a trigger input is provided at runtime, then that block is selected to run. Use `None` to specify the default block to run if no trigger inputs are detected. Lastly, it is important to include a `description` that clearly explains which inputs trigger which workflow. This helps users understand how to run specific workflows. ```py from diffusers.modular_pipelines import AutoPipelineBlocks class AutoImageBlocks(AutoPipelineBlocks): # List of sub-block classes to choose from block_classes = [block_inpaint_cls, block_i2i_cls, block_t2i_cls] # Names for each block in the same order block_names = ["inpaint", "img2img", "text2img"] # Trigger inputs that determine which block to run # - "mask" triggers inpaint workflow # - "image" triggers img2img workflow (but only if mask is not provided) # - if none of above, runs the text2img workflow (default) block_trigger_inputs = ["mask", "image", None] # Description is extremely important for AutoPipelineBlocks def description(self): return ( "Pipeline generates images given different types of conditions!\n" + "This is an auto pipeline block that works for text2img, img2img and inpainting tasks.\n" + " - inpaint workflow is run when `mask` is provided.\n" + " - img2img workflow is run when `image` is provided (but only when `mask` is not provided).\n" + " - text2img workflow is run when neither `image` nor `mask` is provided.\n" ) ``` It is **very** important to include a `description` to avoid any confusion over how to run a block and what inputs are required. While [`~modular_pipelines.AutoPipelineBlocks`] are convenient, it's conditional logic may be difficult to figure out if it isn't properly explained. Create an instance of `AutoImageBlocks`. ```py auto_blocks = AutoImageBlocks() ``` For more complex compositions, such as nested [`~modular_pipelines.AutoPipelineBlocks`] blocks when they're used as sub-blocks in larger pipelines, use the [`~modular_pipelines.SequentialPipelineBlocks.get_execution_blocks`] method to extract the a block that is actually run based on your input. ```py auto_blocks.get_execution_blocks("mask") ```
diffusers/docs/source/en/modular_diffusers/auto_pipeline_blocks.md/0
{ "file_path": "diffusers/docs/source/en/modular_diffusers/auto_pipeline_blocks.md", "repo_id": "diffusers", "token_count": 1958 }
120
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Metal Performance Shaders (MPS) > [!TIP] > Pipelines with a <img alt="MPS" src="https://img.shields.io/badge/MPS-000000?style=flat&logo=apple&logoColor=white%22"> badge indicate a model can take advantage of the MPS backend on Apple silicon devices for faster inference. Feel free to open a [Pull Request](https://github.com/huggingface/diffusers/compare) to add this badge to pipelines that are missing it. 🤗 Diffusers is compatible with Apple silicon (M1/M2 chips) using the PyTorch [`mps`](https://pytorch.org/docs/stable/notes/mps.html) device, which uses the Metal framework to leverage the GPU on MacOS devices. You'll need to have: - macOS computer with Apple silicon (M1/M2) hardware - macOS 12.6 or later (13.0 or later recommended) - arm64 version of Python - [PyTorch 2.0](https://pytorch.org/get-started/locally/) (recommended) or 1.13 (minimum version supported for `mps`) The `mps` backend uses PyTorch's `.to()` interface to move the Stable Diffusion pipeline on to your M1 or M2 device: ```python from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") pipe = pipe.to("mps") # Recommended if your computer has < 64 GB of RAM pipe.enable_attention_slicing() prompt = "a photo of an astronaut riding a horse on mars" image = pipe(prompt).images[0] image ``` <Tip warning={true}> The PyTorch [mps](https://pytorch.org/docs/stable/notes/mps.html) backend does not support NDArray sizes greater than `2**32`. Please open an [Issue](https://github.com/huggingface/diffusers/issues/new/choose) if you encounter this problem so we can investigate. </Tip> If you're using **PyTorch 1.13**, you need to "prime" the pipeline with an additional one-time pass through it. This is a temporary workaround for an issue where the first inference pass produces slightly different results than subsequent ones. You only need to do this pass once, and after just one inference step you can discard the result. ```diff from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5").to("mps") pipe.enable_attention_slicing() prompt = "a photo of an astronaut riding a horse on mars" # First-time "warmup" pass if PyTorch version is 1.13 + _ = pipe(prompt, num_inference_steps=1) # Results match those from the CPU device after the warmup pass. image = pipe(prompt).images[0] ``` ## Troubleshoot This section lists some common issues with using the `mps` backend and how to solve them. ### Attention slicing M1/M2 performance is very sensitive to memory pressure. When this occurs, the system automatically swaps if it needs to which significantly degrades performance. To prevent this from happening, we recommend *attention slicing* to reduce memory pressure during inference and prevent swapping. This is especially relevant if your computer has less than 64GB of system RAM, or if you generate images at non-standard resolutions larger than 512×512 pixels. Call the [`~DiffusionPipeline.enable_attention_slicing`] function on your pipeline: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True).to("mps") pipeline.enable_attention_slicing() ``` Attention slicing performs the costly attention operation in multiple steps instead of all at once. It usually improves performance by ~20% in computers without universal memory, but we've observed *better performance* in most Apple silicon computers unless you have 64GB of RAM or more. ### Batch inference Generating multiple prompts in a batch can crash or fail to work reliably. If this is the case, try iterating instead of batching.
diffusers/docs/source/en/optimization/mps.md/0
{ "file_path": "diffusers/docs/source/en/optimization/mps.md", "repo_id": "diffusers", "token_count": 1245 }
121
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Quickstart Diffusers is a library for developers and researchers that provides an easy inference API for generating images, videos and audio, as well as the building blocks for implementing new workflows. Diffusers provides many optimizations out-of-the-box that makes it possible to load and run large models on setups with limited memory or to accelerate inference. This Quickstart will give you an overview of Diffusers and get you up and generating quickly. > [!TIP] > Before you begin, make sure you have a Hugging Face [account](https://huggingface.co/join) in order to use gated models like [Flux](https://huggingface.co/black-forest-labs/FLUX.1-dev). Follow the [Installation](./installation) guide to install Diffusers if it's not already installed. ## DiffusionPipeline A diffusion model combines multiple components to generate outputs in any modality based on an input, such as a text description, image or both. For a standard text-to-image model: 1. A text encoder turns a prompt into embeddings that guide the denoising process. Some models have more than one text encoder. 2. A scheduler contains the algorithmic specifics for gradually denoising initial random noise into clean outputs. Different schedulers affect generation speed and quality. 3. A UNet or diffusion transformer (DiT) is the workhorse of a diffusion model. At each step, it performs the denoising predictions, such as how much noise to remove or the general direction in which to steer the noise to generate better quality outputs. The UNet or DiT repeats this loop for a set amount of steps to generate the final output. 4. A variational autoencoder (VAE) encodes and decodes pixels to a spatially compressed latent-space. *Latents* are compressed representations of an image and are more efficient to work with. The UNet or DiT operates on latents, and the clean latents at the end are decoded back into images. The [`DiffusionPipeline`] packages all these components into a single class for inference. There are several arguments in [`~DiffusionPipeline.__call__`] you can change, such as `num_inference_steps`, that affect the diffusion process. Try different values and arguments to see how they change generation quality or speed. Load a model with [`~DiffusionPipeline.from_pretrained`] and describe what you'd like to generate. The example below uses the default argument values. <hfoptions id="diffusionpipeline"> <hfoption id="text-to-image"> Use `.images[0]` to access the generated image output. ```py import torch from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "Qwen/Qwen-Image", torch_dtype=torch.bfloat16, device_map="cuda" ) prompt = """ cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain """ pipeline(prompt).images[0] ``` </hfoption> <hfoption id="text-to-video"> Use `.frames[0]` to access the generated video output and [`~utils.export_to_video`] to save the video. ```py import torch from diffusers import AutoencoderKLWan, DiffusionPipeline from diffusers.quantizers import PipelineQuantizationConfig from diffusers.utils import export_to_video vae = AutoencoderKLWan.from_pretrained( "Wan-AI/Wan2.2-T2V-A14B-Diffusers", subfolder="vae", torch_dtype=torch.float32 ) pipeline = DiffusionPipeline.from_pretrained( "Wan-AI/Wan2.2-T2V-A14B-Diffusers", vae=vae torch_dtype=torch.bfloat16, device_map="cuda" ) prompt = """ Cinematic video of a sleek cat lounging on a colorful inflatable in a crystal-clear turquoise pool in Palm Springs, sipping a salt-rimmed margarita through a straw. Golden-hour sunlight glows over mid-century modern homes and swaying palms. Shot in rich Sony a7S III: with moody, glamorous color grading, subtle lens flares, and soft vintage film grain. Ripples shimmer as a warm desert breeze stirs the water, blending luxury and playful charm in an epic, gorgeously composed frame. """ video = pipeline(prompt=prompt, num_frames=81, num_inference_steps=40).frames[0] export_to_video(video, "output.mp4", fps=16) ``` </hfoption> </hfoptions> ## LoRA Adapters insert a small number of trainable parameters to the original base model. Only the inserted parameters are fine-tuned while the rest of the model weights remain frozen. This makes it fast and cheap to fine-tune a model on a new style. Among adapters, [LoRA's](./tutorials/using_peft_for_inference) are the most popular. Add a LoRA to a pipeline with the [`~loaders.QwenImageLoraLoaderMixin.load_lora_weights`] method. Some LoRA's require a special word to trigger it, such as `Realism`, in the example below. Check a LoRA's model card to see if it requires a trigger word. ```py import torch from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "Qwen/Qwen-Image", torch_dtype=torch.bfloat16, device_map="cuda" ) pipeline.load_lora_weights( "flymy-ai/qwen-image-realism-lora", ) prompt = """ super Realism cinematic film still of a cat sipping a margarita in a pool in Palm Springs in the style of umempart, California highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain """ pipeline(prompt).images[0] ``` Check out the [LoRA](./tutorials/using_peft_for_inference) docs or Adapters section to learn more. ## Quantization [Quantization](./quantization/overview) stores data in fewer bits to reduce memory usage. It may also speed up inference because it takes less time to perform calculations with fewer bits. Diffusers provides several quantization backends and picking one depends on your use case. For example, [bitsandbytes](./quantization/bitsandbytes) and [torchao](./quantization/torchao) are both simple and easy to use for inference, but torchao supports more [quantization types](./quantization/torchao#supported-quantization-types) like fp8. Configure [`PipelineQuantizationConfig`] with the backend to use, the specific arguments (refer to the [API](./api/quantization) reference for available arguments) for that backend, and which components to quantize. The example below quantizes the model to 4-bits and only uses 14.93GB of memory. ```py import torch from diffusers import DiffusionPipeline from diffusers.quantizers import PipelineQuantizationConfig quant_config = PipelineQuantizationConfig( quant_backend="bitsandbytes_4bit", quant_kwargs={"load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16}, components_to_quantize=["transformer", "text_encoder"], ) pipeline = DiffusionPipeline.from_pretrained( "Qwen/Qwen-Image", torch_dtype=torch.bfloat16, quantization_config=quant_config, device_map="cuda" ) prompt = """ cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain """ pipeline(prompt).images[0] print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB") ``` Take a look at the [Quantization](./quantization/overview) section for more details. ## Optimizations > [!TIP] > Optimization is dependent on hardware specs such as memory. Use this [Space](https://huggingface.co/spaces/diffusers/optimized-diffusers-code) to generate code examples that include all of Diffusers' available memory and speed optimization techniques for any model you're using. Modern diffusion models are very large and have billions of parameters. The iterative denoising process is also computationally intensive and slow. Diffusers provides techniques for reducing memory usage and boosting inference speed. These techniques can be combined with quantization to optimize for both memory usage and inference speed. ### Memory usage The text encoders and UNet or DiT can use up as much as ~30GB of memory, exceeding the amount available on many free-tier or consumer GPUs. Offloading stores weights that aren't currently used on the CPU and only moves them to the GPU when they're needed. There are a few offloading types and the example below uses [model offloading](./optimization/memory#model-offloading). This moves an entire model, like a text encoder or transformer, to the CPU when it isn't actively being used. Call [`~DiffusionPipeline.enable_model_cpu_offload`] to activate it. By combining quantization and offloading, the following example only requires ~12.54GB of memory. ```py import torch from diffusers import DiffusionPipeline from diffusers.quantizers import PipelineQuantizationConfig quant_config = PipelineQuantizationConfig( quant_backend="bitsandbytes_4bit", quant_kwargs={"load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16}, components_to_quantize=["transformer", "text_encoder"], ) pipeline = DiffusionPipeline.from_pretrained( "Qwen/Qwen-Image", torch_dtype=torch.bfloat16, quantization_config=quant_config, device_map="cuda" ) pipeline.enable_model_cpu_offload() prompt = """ cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain """ pipeline(prompt).images[0] print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB") ``` Refer to the [Reduce memory usage](./optimization/memory) docs to learn more about other memory reducing techniques. ### Inference speed The denoising loop performs a lot of computations and can be slow. Methods like [torch.compile](./optimization/fp16#torchcompile) increases inference speed by compiling the computations into an optimized kernel. Compilation is slow for the first generation but successive generations should be much faster. The example below uses [regional compilation](./optimization/fp16#regional-compilation) to only compile small regions of a model. It reduces cold-start latency while also providing a runtime speed up. Call [`~ModelMixin.compile_repeated_blocks`] on the model to activate it. ```py import torch from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "Qwen/Qwen-Image", torch_dtype=torch.bfloat16, device_map="cuda" ) pipeline.transformer.compile_repeated_blocks( fullgraph=True, ) prompt = """ cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain """ pipeline(prompt).images[0] ``` Check out the [Accelerate inference](./optimization/fp16) or [Caching](./optimization/cache) docs for more methods that speed up inference.
diffusers/docs/source/en/quicktour.md/0
{ "file_path": "diffusers/docs/source/en/quicktour.md", "repo_id": "diffusers", "token_count": 3215 }
122
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # T2I-Adapter [T2I-Adapter](https://hf.co/papers/2302.08453) is a lightweight adapter model that provides an additional conditioning input image (line art, canny, sketch, depth, pose) to better control image generation. It is similar to a ControlNet, but it is a lot smaller (~77M parameters and ~300MB file size) because its only inserts weights into the UNet instead of copying and training it. The T2I-Adapter is only available for training with the Stable Diffusion XL (SDXL) model. This guide will explore the [train_t2i_adapter_sdxl.py](https://github.com/huggingface/diffusers/blob/main/examples/t2i_adapter/train_t2i_adapter_sdxl.py) training script to help you become familiar with it, and how you can adapt it for your own use-case. Before running the script, make sure you install the library from source: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then navigate to the example folder containing the training script and install the required dependencies for the script you're using: ```bash cd examples/t2i_adapter pip install -r requirements.txt ``` <Tip> 🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more. </Tip> Initialize an 🤗 Accelerate environment: ```bash accelerate config ``` To setup a default 🤗 Accelerate environment without choosing any configurations: ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell, like a notebook, you can use: ```py from accelerate.utils import write_basic_config write_basic_config() ``` Lastly, if you want to train a model on your own dataset, take a look at the [Create a dataset for training](create_dataset) guide to learn how to create a dataset that works with the training script. <Tip> The following sections highlight parts of the training script that are important for understanding how to modify it, but it doesn't cover every aspect of the script in detail. If you're interested in learning more, feel free to read through the [script](https://github.com/huggingface/diffusers/blob/main/examples/t2i_adapter/train_t2i_adapter_sdxl.py) and let us know if you have any questions or concerns. </Tip> ## Script parameters The training script provides many parameters to help you customize your training run. All of the parameters and their descriptions are found in the [`parse_args()`](https://github.com/huggingface/diffusers/blob/aab6de22c33cc01fb7bc81c0807d6109e2c998c9/examples/t2i_adapter/train_t2i_adapter_sdxl.py#L233) function. It provides default values for each parameter, such as the training batch size and learning rate, but you can also set your own values in the training command if you'd like. For example, to activate gradient accumulation, add the `--gradient_accumulation_steps` parameter to the training command: ```bash accelerate launch train_t2i_adapter_sdxl.py \ ----gradient_accumulation_steps=4 ``` Many of the basic and important parameters are described in the [Text-to-image](text2image#script-parameters) training guide, so this guide just focuses on the relevant T2I-Adapter parameters: - `--pretrained_vae_model_name_or_path`: path to a pretrained VAE; the SDXL VAE is known to suffer from numerical instability, so this parameter allows you to specify a better [VAE](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix) - `--crops_coords_top_left_h` and `--crops_coords_top_left_w`: height and width coordinates to include in SDXL's crop coordinate embeddings - `--conditioning_image_column`: the column of the conditioning images in the dataset - `--proportion_empty_prompts`: the proportion of image prompts to replace with empty strings ## Training script As with the script parameters, a walkthrough of the training script is provided in the [Text-to-image](text2image#training-script) training guide. Instead, this guide takes a look at the T2I-Adapter relevant parts of the script. The training script begins by preparing the dataset. This includes [tokenizing](https://github.com/huggingface/diffusers/blob/aab6de22c33cc01fb7bc81c0807d6109e2c998c9/examples/t2i_adapter/train_t2i_adapter_sdxl.py#L674) the prompt and [applying transforms](https://github.com/huggingface/diffusers/blob/aab6de22c33cc01fb7bc81c0807d6109e2c998c9/examples/t2i_adapter/train_t2i_adapter_sdxl.py#L714) to the images and conditioning images. ```py conditioning_image_transforms = transforms.Compose( [ transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(args.resolution), transforms.ToTensor(), ] ) ``` Within the [`main()`](https://github.com/huggingface/diffusers/blob/aab6de22c33cc01fb7bc81c0807d6109e2c998c9/examples/t2i_adapter/train_t2i_adapter_sdxl.py#L770) function, the T2I-Adapter is either loaded from a pretrained adapter or it is randomly initialized: ```py if args.adapter_model_name_or_path: logger.info("Loading existing adapter weights.") t2iadapter = T2IAdapter.from_pretrained(args.adapter_model_name_or_path) else: logger.info("Initializing t2iadapter weights.") t2iadapter = T2IAdapter( in_channels=3, channels=(320, 640, 1280, 1280), num_res_blocks=2, downscale_factor=16, adapter_type="full_adapter_xl", ) ``` The [optimizer](https://github.com/huggingface/diffusers/blob/aab6de22c33cc01fb7bc81c0807d6109e2c998c9/examples/t2i_adapter/train_t2i_adapter_sdxl.py#L952) is initialized for the T2I-Adapter parameters: ```py params_to_optimize = t2iadapter.parameters() optimizer = optimizer_class( params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) ``` Lastly, in the [training loop](https://github.com/huggingface/diffusers/blob/aab6de22c33cc01fb7bc81c0807d6109e2c998c9/examples/t2i_adapter/train_t2i_adapter_sdxl.py#L1086), the adapter conditioning image and the text embeddings are passed to the UNet to predict the noise residual: ```py t2iadapter_image = batch["conditioning_pixel_values"].to(dtype=weight_dtype) down_block_additional_residuals = t2iadapter(t2iadapter_image) down_block_additional_residuals = [ sample.to(dtype=weight_dtype) for sample in down_block_additional_residuals ] model_pred = unet( inp_noisy_latents, timesteps, encoder_hidden_states=batch["prompt_ids"], added_cond_kwargs=batch["unet_added_conditions"], down_block_additional_residuals=down_block_additional_residuals, ).sample ``` If you want to learn more about how the training loop works, check out the [Understanding pipelines, models and schedulers](../using-diffusers/write_own_pipeline) tutorial which breaks down the basic pattern of the denoising process. ## Launch the script Now you’re ready to launch the training script! 🚀 For this example training, you'll use the [fusing/fill50k](https://huggingface.co/datasets/fusing/fill50k) dataset. You can also create and use your own dataset if you want (see the [Create a dataset for training](https://moon-ci-docs.huggingface.co/docs/diffusers/pr_5512/en/training/create_dataset) guide). Set the environment variable `MODEL_DIR` to a model id on the Hub or a path to a local model and `OUTPUT_DIR` to where you want to save the model. Download the following images to condition your training with: ```bash wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png ``` <Tip> To monitor training progress with Weights & Biases, add the `--report_to=wandb` parameter to the training command. You'll also need to add the `--validation_image`, `--validation_prompt`, and `--validation_steps` to the training command to keep track of results. This can be really useful for debugging the model and viewing intermediate results. </Tip> ```bash export MODEL_DIR="stabilityai/stable-diffusion-xl-base-1.0" export OUTPUT_DIR="path to save model" accelerate launch train_t2i_adapter_sdxl.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --mixed_precision="fp16" \ --resolution=1024 \ --learning_rate=1e-5 \ --max_train_steps=15000 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --validation_steps=100 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --report_to="wandb" \ --seed=42 \ --push_to_hub ``` Once training is complete, you can use your T2I-Adapter for inference: ```py from diffusers import StableDiffusionXLAdapterPipeline, T2IAdapter, EulerAncestralDiscreteSchedulerTest from diffusers.utils import load_image import torch adapter = T2IAdapter.from_pretrained("path/to/adapter", torch_dtype=torch.float16) pipeline = StableDiffusionXLAdapterPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", adapter=adapter, torch_dtype=torch.float16 ) pipeline.scheduler = EulerAncestralDiscreteSchedulerTest.from_config(pipe.scheduler.config) pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_model_cpu_offload() control_image = load_image("./conditioning_image_1.png") prompt = "pale golden rod circle with old lace background" generator = torch.manual_seed(0) image = pipeline( prompt, image=control_image, generator=generator ).images[0] image.save("./output.png") ``` ## Next steps Congratulations on training a T2I-Adapter model! 🎉 To learn more: - Read the [Efficient Controllable Generation for SDXL with T2I-Adapters](https://huggingface.co/blog/t2i-sdxl-adapters) blog post to learn more details about the experimental results from the T2I-Adapter team.
diffusers/docs/source/en/training/t2i_adapters.md/0
{ "file_path": "diffusers/docs/source/en/training/t2i_adapters.md", "repo_id": "diffusers", "token_count": 3501 }
123
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Text-guided depth-to-image generation [[open-in-colab]] The [`StableDiffusionDepth2ImgPipeline`] lets you pass a text prompt and an initial image to condition the generation of new images. In addition, you can also pass a `depth_map` to preserve the image structure. If no `depth_map` is provided, the pipeline automatically predicts the depth via an integrated [depth-estimation model](https://github.com/isl-org/MiDaS). Start by creating an instance of the [`StableDiffusionDepth2ImgPipeline`]: ```python import torch from diffusers import StableDiffusionDepth2ImgPipeline from diffusers.utils import load_image, make_image_grid pipeline = StableDiffusionDepth2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-2-depth", torch_dtype=torch.float16, use_safetensors=True, ).to("cuda") ``` Now pass your prompt to the pipeline. You can also pass a `negative_prompt` to prevent certain words from guiding how an image is generated: ```python url = "http://images.cocodataset.org/val2017/000000039769.jpg" init_image = load_image(url) prompt = "two tigers" negative_prompt = "bad, deformed, ugly, bad anatomy" image = pipeline(prompt=prompt, image=init_image, negative_prompt=negative_prompt, strength=0.7).images[0] make_image_grid([init_image, image], rows=1, cols=2) ``` | Input | Output | |---------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------| | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/coco-cats.png" width="500"/> | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/depth2img-tigers.png" width="500"/> |
diffusers/docs/source/en/using-diffusers/depth2img.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/depth2img.md", "repo_id": "diffusers", "token_count": 878 }
124
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Reproducible pipelines Diffusion models are inherently random which is what allows it to generate different outputs every time it is run. But there are certain times when you want to generate the same output every time, like when you're testing, replicating results, and even [improving image quality](#deterministic-batch-generation). While you can't expect to get identical results across platforms, you can expect reproducible results across releases and platforms within a certain tolerance range (though even this may vary). This guide will show you how to control randomness for deterministic generation on a CPU and GPU. > [!TIP] > We strongly recommend reading PyTorch's [statement about reproducibility](https://pytorch.org/docs/stable/notes/randomness.html): > > "Completely reproducible results are not guaranteed across PyTorch releases, individual commits, or different platforms. Furthermore, results may not be reproducible between CPU and GPU executions, even when using identical seeds." ## Control randomness During inference, pipelines rely heavily on random sampling operations which include creating the Gaussian noise tensors to denoise and adding noise to the scheduling step. Take a look at the tensor values in the [`DDIMPipeline`] after two inference steps. ```python from diffusers import DDIMPipeline import numpy as np ddim = DDIMPipeline.from_pretrained( "google/ddpm-cifar10-32", use_safetensors=True) image = ddim(num_inference_steps=2, output_type="np").images print(np.abs(image).sum()) ``` Running the code above prints one value, but if you run it again you get a different value. Each time the pipeline is run, [torch.randn](https://pytorch.org/docs/stable/generated/torch.randn.html) uses a different random seed to create the Gaussian noise tensors. This leads to a different result each time it is run and enables the diffusion pipeline to generate a different random image each time. But if you need to reliably generate the same image, that depends on whether you're running the pipeline on a CPU or GPU. > [!TIP] > It might seem unintuitive to pass `Generator` objects to a pipeline instead of the integer value representing the seed. However, this is the recommended design when working with probabilistic models in PyTorch because a `Generator` is a *random state* that can be passed to multiple pipelines in a sequence. As soon as the `Generator` is consumed, the *state* is changed in place which means even if you passed the same `Generator` to a different pipeline, it won't produce the same result because the state is already changed. <hfoptions id="hardware"> <hfoption id="CPU"> To generate reproducible results on a CPU, you'll need to use a PyTorch [Generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) and set a seed. Now when you run the code, it always prints a value of `1491.1711` because the `Generator` object with the seed is passed to all the random functions in the pipeline. You should get a similar, if not the same, result on whatever hardware and PyTorch version you're using. ```python import torch import numpy as np from diffusers import DDIMPipeline ddim = DDIMPipeline.from_pretrained("google/ddpm-cifar10-32", use_safetensors=True) generator = torch.Generator(device="cpu").manual_seed(0) image = ddim(num_inference_steps=2, output_type="np", generator=generator).images print(np.abs(image).sum()) ``` </hfoption> <hfoption id="GPU"> Writing a reproducible pipeline on a GPU is a bit trickier, and full reproducibility across different hardware is not guaranteed because matrix multiplication - which diffusion pipelines require a lot of - is less deterministic on a GPU than a CPU. For example, if you run the same code example from the CPU example, you'll get a different result even though the seed is identical. This is because the GPU uses a different random number generator than the CPU. ```python import torch import numpy as np from diffusers import DDIMPipeline ddim = DDIMPipeline.from_pretrained("google/ddpm-cifar10-32", use_safetensors=True) ddim.to("cuda") generator = torch.Generator(device="cuda").manual_seed(0) image = ddim(num_inference_steps=2, output_type="np", generator=generator).images print(np.abs(image).sum()) ``` To avoid this issue, Diffusers has a [`~utils.torch_utils.randn_tensor`] function for creating random noise on the CPU, and then moving the tensor to a GPU if necessary. The [`~utils.torch_utils.randn_tensor`] function is used everywhere inside the pipeline. Now you can call [torch.manual_seed](https://pytorch.org/docs/stable/generated/torch.manual_seed.html) which automatically creates a CPU `Generator` that can be passed to the pipeline even if it is being run on a GPU. ```python import torch import numpy as np from diffusers import DDIMPipeline ddim = DDIMPipeline.from_pretrained("google/ddpm-cifar10-32", use_safetensors=True) ddim.to("cuda") generator = torch.manual_seed(0) image = ddim(num_inference_steps=2, output_type="np", generator=generator).images print(np.abs(image).sum()) ``` > [!TIP] > If reproducibility is important to your use case, we recommend always passing a CPU `Generator`. The performance loss is often negligible and you'll generate more similar values than if the pipeline had been run on a GPU. Finally, more complex pipelines such as [`UnCLIPPipeline`], are often extremely susceptible to precision error propagation. You'll need to use exactly the same hardware and PyTorch version for full reproducibility. </hfoption> </hfoptions> ## Deterministic algorithms You can also configure PyTorch to use deterministic algorithms to create a reproducible pipeline. The downside is that deterministic algorithms may be slower than non-deterministic ones and you may observe a decrease in performance. Non-deterministic behavior occurs when operations are launched in more than one CUDA stream. To avoid this, set the environment variable [CUBLAS_WORKSPACE_CONFIG](https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility) to `:16:8` to only use one buffer size during runtime. PyTorch typically benchmarks multiple algorithms to select the fastest one, but if you want reproducibility, you should disable this feature because the benchmark may select different algorithms each time. Set Diffusers [enable_full_determinism](https://github.com/huggingface/diffusers/blob/142f353e1c638ff1d20bd798402b68f72c1ebbdd/src/diffusers/utils/testing_utils.py#L861) to enable deterministic algorithms. ```py enable_full_determinism() ``` Now when you run the same pipeline twice, you'll get identical results. ```py import torch from diffusers import DDIMScheduler, StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True).to("cuda") pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) g = torch.Generator(device="cuda") prompt = "A bear is playing a guitar on Times Square" g.manual_seed(0) result1 = pipe(prompt=prompt, num_inference_steps=50, generator=g, output_type="latent").images g.manual_seed(0) result2 = pipe(prompt=prompt, num_inference_steps=50, generator=g, output_type="latent").images print("L_inf dist =", abs(result1 - result2).max()) "L_inf dist = tensor(0., device='cuda:0')" ```
diffusers/docs/source/en/using-diffusers/reusing_seeds.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/reusing_seeds.md", "repo_id": "diffusers", "token_count": 2201 }
125
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # インストール お使いのディープラーニングライブラリに合わせてDiffusersをインストールできます。 🤗 DiffusersはPython 3.8+、PyTorch 1.7.0+、Flaxでテストされています。使用するディープラーニングライブラリの以下のインストール手順に従ってください: - [PyTorch](https://pytorch.org/get-started/locally/)のインストール手順。 - [Flax](https://flax.readthedocs.io/en/latest/)のインストール手順。 ## pip でインストール Diffusersは[仮想環境](https://docs.python.org/3/library/venv.html)の中でインストールすることが推奨されています。 Python の仮想環境についてよく知らない場合は、こちらの [ガイド](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/) を参照してください。 仮想環境は異なるプロジェクトの管理を容易にし、依存関係間の互換性の問題を回避します。 ではさっそく、プロジェクトディレクトリに仮想環境を作ってみます: ```bash python -m venv .env ``` 仮想環境をアクティブにします: ```bash source .env/bin/activate ``` 🤗 Diffusers もまた 🤗 Transformers ライブラリに依存しており、以下のコマンドで両方をインストールできます: <frameworkcontent> <pt> ```bash pip install diffusers["torch"] transformers ``` </pt> <jax> ```bash pip install diffusers["flax"] transformers ``` </jax> </frameworkcontent> ## ソースからのインストール ソースから🤗 Diffusersをインストールする前に、`torch`と🤗 Accelerateがインストールされていることを確認してください。 `torch`のインストールについては、`torch` [インストール](https://pytorch.org/get-started/locally/#start-locally)ガイドを参照してください。 🤗 Accelerateをインストールするには: ```bash pip install accelerate ``` 以下のコマンドでソースから🤗 Diffusersをインストールできます: ```bash pip install git+https://github.com/huggingface/diffusers ``` このコマンドは最新の `stable` バージョンではなく、最先端の `main` バージョンをインストールします。 `main`バージョンは最新の開発に対応するのに便利です。 例えば、前回の公式リリース以降にバグが修正されたが、新しいリリースがまだリリースされていない場合などには都合がいいです。 しかし、これは `main` バージョンが常に安定しているとは限らないです。 私たちは `main` バージョンを運用し続けるよう努力しており、ほとんどの問題は通常数時間から1日以内に解決されます。 もし問題が発生した場合は、[Issue](https://github.com/huggingface/diffusers/issues/new/choose) を開いてください! ## 編集可能なインストール 以下の場合、編集可能なインストールが必要です: * ソースコードの `main` バージョンを使用する。 * 🤗 Diffusers に貢献し、コードの変更をテストする必要がある場合。 リポジトリをクローンし、次のコマンドで 🤗 Diffusers をインストールしてください: ```bash git clone https://github.com/huggingface/diffusers.git cd diffusers ``` <frameworkcontent> <pt> ```bash pip install -e ".[torch]" ``` </pt> <jax> ```bash pip install -e ".[flax]" ``` </jax> </frameworkcontent> これらのコマンドは、リポジトリをクローンしたフォルダと Python のライブラリパスをリンクします。 Python は通常のライブラリパスに加えて、クローンしたフォルダの中を探すようになります。 例えば、Python パッケージが通常 `~/anaconda3/envs/main/lib/python3.10/site-packages/` にインストールされている場合、Python はクローンした `~/diffusers/` フォルダも同様に参照します。 <Tip warning={true}> ライブラリを使い続けたい場合は、`diffusers`フォルダを残しておく必要があります。 </Tip> これで、以下のコマンドで簡単にクローンを最新版の🤗 Diffusersにアップデートできます: ```bash cd ~/diffusers/ git pull ``` Python環境は次の実行時に `main` バージョンの🤗 Diffusersを見つけます。 ## テレメトリー・ロギングに関するお知らせ このライブラリは `from_pretrained()` リクエスト中にデータを収集します。 このデータには Diffusers と PyTorch/Flax のバージョン、要求されたモデルやパイプラインクラスが含まれます。 また、Hubでホストされている場合は、事前に学習されたチェックポイントへのパスが含まれます。 この使用データは問題のデバッグや新機能の優先順位付けに役立ちます。 テレメトリーはHuggingFace Hubからモデルやパイプラインをロードするときのみ送信されます。ローカルでの使用中は収集されません。 我々は、すべての人が追加情報を共有したくないことを理解し、あなたのプライバシーを尊重します。 そのため、ターミナルから `DISABLE_TELEMETRY` 環境変数を設定することで、データ収集を無効にすることができます: Linux/MacOSの場合 ```bash export DISABLE_TELEMETRY=YES ``` Windows の場合 ```bash set DISABLE_TELEMETRY=YES ```
diffusers/docs/source/ja/installation.md/0
{ "file_path": "diffusers/docs/source/ja/installation.md", "repo_id": "diffusers", "token_count": 2493 }
126
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Intel Gaudi에서 Stable Diffusion을 사용하는 방법 🤗 Diffusers는 🤗 [Optimum Habana](https://huggingface.co/docs/optimum/habana/usage_guides/stable_diffusion)를 통해서 Habana Gaudi와 호환됩니다. ## 요구 사항 - Optimum Habana 1.4 또는 이후, [여기](https://huggingface.co/docs/optimum/habana/installation)에 설치하는 방법이 있습니다. - SynapseAI 1.8. ## 추론 파이프라인 Gaudi에서 Stable Diffusion 1 및 2로 이미지를 생성하려면 두 인스턴스를 인스턴스화해야 합니다: - [`GaudiStableDiffusionPipeline`](https://huggingface.co/docs/optimum/habana/package_reference/stable_diffusion_pipeline)이 포함된 파이프라인. 이 파이프라인은 *텍스트-이미지 생성*을 지원합니다. - [`GaudiDDIMScheduler`](https://huggingface.co/docs/optimum/habana/package_reference/stable_diffusion_pipeline#optimum.habana.diffusers.GaudiDDIMScheduler)이 포함된 스케줄러. 이 스케줄러는 Habana Gaudi에 최적화되어 있습니다. 파이프라인을 초기화할 때, HPU에 배포하기 위해 `use_habana=True`를 지정해야 합니다. 또한 가능한 가장 빠른 생성을 위해 `use_hpu_graphs=True`로 **HPU 그래프**를 활성화해야 합니다. 마지막으로, [Hugging Face Hub](https://huggingface.co/Habana)에서 다운로드할 수 있는 [Gaudi configuration](https://huggingface.co/docs/optimum/habana/package_reference/gaudi_config)을 지정해야 합니다. ```python from optimum.habana import GaudiConfig from optimum.habana.diffusers import GaudiDDIMScheduler, GaudiStableDiffusionPipeline model_name = "stabilityai/stable-diffusion-2-base" scheduler = GaudiDDIMScheduler.from_pretrained(model_name, subfolder="scheduler") pipeline = GaudiStableDiffusionPipeline.from_pretrained( model_name, scheduler=scheduler, use_habana=True, use_hpu_graphs=True, gaudi_config="Habana/stable-diffusion", ) ``` 파이프라인을 호출하여 하나 이상의 프롬프트에서 배치별로 이미지를 생성할 수 있습니다. ```python outputs = pipeline( prompt=[ "High quality photo of an astronaut riding a horse in space", "Face of a yellow cat, high resolution, sitting on a park bench", ], num_images_per_prompt=10, batch_size=4, ) ``` 더 많은 정보를 얻기 위해, Optimum Habana의 [문서](https://huggingface.co/docs/optimum/habana/usage_guides/stable_diffusion)와 공식 GitHub 저장소에 제공된 [예시](https://github.com/huggingface/optimum-habana/tree/main/examples/stable-diffusion)를 확인하세요. ## 벤치마크 다음은 [Habana/stable-diffusion](https://huggingface.co/Habana/stable-diffusion) Gaudi 구성(혼합 정밀도 bf16/fp32)을 사용하는 Habana first-generation Gaudi 및 Gaudi2의 지연 시간입니다: | | Latency (배치 크기 = 1) | Throughput (배치 크기 = 8) | | ---------------------- |:------------------------:|:---------------------------:| | first-generation Gaudi | 4.29s | 0.283 images/s | | Gaudi2 | 1.54s | 0.904 images/s |
diffusers/docs/source/ko/optimization/habana.md/0
{ "file_path": "diffusers/docs/source/ko/optimization/habana.md", "repo_id": "diffusers", "token_count": 1910 }
127
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Low-Rank Adaptation of Large Language Models (LoRA) [[open-in-colab]] <Tip warning={true}> 현재 LoRA는 [`UNet2DConditionalModel`]의 어텐션 레이어에서만 지원됩니다. </Tip> [LoRA(Low-Rank Adaptation of Large Language Models)](https://huggingface.co/papers/2106.09685)는 메모리를 적게 사용하면서 대규모 모델의 학습을 가속화하는 학습 방법입니다. 이는 rank-decomposition weight 행렬 쌍(**업데이트 행렬**이라고 함)을 추가하고 새로 추가된 가중치**만** 학습합니다. 여기에는 몇 가지 장점이 있습니다. - 이전에 미리 학습된 가중치는 고정된 상태로 유지되므로 모델이 [치명적인 망각](https://www.pnas.org/doi/10.1073/pnas.1611835114) 경향이 없습니다. - Rank-decomposition 행렬은 원래 모델보다 파라메터 수가 훨씬 적으므로 학습된 LoRA 가중치를 쉽게 끼워넣을 수 있습니다. - LoRA 매트릭스는 일반적으로 원본 모델의 어텐션 레이어에 추가됩니다. 🧨 Diffusers는 [`~diffusers.loaders.UNet2DConditionLoadersMixin.load_attn_procs`] 메서드를 제공하여 LoRA 가중치를 모델의 어텐션 레이어로 불러옵니다. `scale` 매개변수를 통해 모델이 새로운 학습 이미지에 맞게 조정되는 범위를 제어할 수 있습니다. - 메모리 효율성이 향상되어 Tesla T4, RTX 3080 또는 RTX 2080 Ti와 같은 소비자용 GPU에서 파인튜닝을 실행할 수 있습니다! T4와 같은 GPU는 무료이며 Kaggle 또는 Google Colab 노트북에서 쉽게 액세스할 수 있습니다. <Tip> 💡 LoRA는 어텐션 레이어에만 한정되지는 않습니다. 저자는 언어 모델의 어텐션 레이어를 수정하는 것이 매우 효율적으로 죻은 성능을 얻기에 충분하다는 것을 발견했습니다. 이것이 LoRA 가중치를 모델의 어텐션 레이어에 추가하는 것이 일반적인 이유입니다. LoRA 작동 방식에 대한 자세한 내용은 [Using LoRA for effective Stable Diffusion fine-tuning](https://huggingface.co/blog/lora) 블로그를 확인하세요! </Tip> [cloneofsimo](https://github.com/cloneofsimo)는 인기 있는 [lora](https://github.com/cloneofsimo/lora) GitHub 리포지토리에서 Stable Diffusion을 위한 LoRA 학습을 최초로 시도했습니다. 🧨 Diffusers는 [text-to-image 생성](https://github.com/huggingface/diffusers/tree/main/examples/text_to_image#training-with-lora) 및 [DreamBooth](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth#training-with-low-rank-adaptation-of-large-language-models-lora)을 지원합니다. 이 가이드는 두 가지를 모두 수행하는 방법을 보여줍니다. 모델을 저장하거나 커뮤니티와 공유하려면 Hugging Face 계정에 로그인하세요(아직 계정이 없는 경우 [생성](https://huggingface.co/join)하세요): ```bash hf auth login ``` ## Text-to-image 수십억 개의 파라메터들이 있는 Stable Diffusion과 같은 모델을 파인튜닝하는 것은 느리고 어려울 수 있습니다. LoRA를 사용하면 diffusion 모델을 파인튜닝하는 것이 훨씬 쉽고 빠릅니다. 8비트 옵티마이저와 같은 트릭에 의존하지 않고도 11GB의 GPU RAM으로 하드웨어에서 실행할 수 있습니다. ### 학습[[dreambooth-training]] [Naruto BLIP 캡션](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions) 데이터셋으로 [`stable-diffusion-v1-5`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5)를 파인튜닝해 나만의 포켓몬을 생성해 보겠습니다. 시작하려면 `MODEL_NAME` 및 `DATASET_NAME` 환경 변수가 설정되어 있는지 확인하십시오. `OUTPUT_DIR` 및 `HUB_MODEL_ID` 변수는 선택 사항이며 허브에서 모델을 저장할 위치를 지정합니다. ```bash export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5" export OUTPUT_DIR="/sddata/finetune/lora/naruto" export HUB_MODEL_ID="naruto-lora" export DATASET_NAME="lambdalabs/naruto-blip-captions" ``` 학습을 시작하기 전에 알아야 할 몇 가지 플래그가 있습니다. * `--push_to_hub`를 명시하면 학습된 LoRA 임베딩을 허브에 저장합니다. * `--report_to=wandb`는 학습 결과를 가중치 및 편향 대시보드에 보고하고 기록합니다(예를 들어, 이 [보고서](https://wandb.ai/pcuenq/text2image-fine-tune/run/b4k1w0tn?workspace=user-pcuenq)를 참조하세요). * `--learning_rate=1e-04`, 일반적으로 LoRA에서 사용하는 것보다 더 높은 학습률을 사용할 수 있습니다. 이제 학습을 시작할 준비가 되었습니다 (전체 학습 스크립트는 [여기](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py)에서 찾을 수 있습니다). ```bash accelerate launch train_dreambooth_lora.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a photo of sks dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --checkpointing_steps=100 \ --learning_rate=1e-4 \ --report_to="wandb" \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=500 \ --validation_prompt="A photo of sks dog in a bucket" \ --validation_epochs=50 \ --seed="0" \ --push_to_hub ``` ### 추론[[dreambooth-inference]] 이제 [`StableDiffusionPipeline`]에서 기본 모델을 불러와 추론을 위해 모델을 사용할 수 있습니다: ```py >>> import torch >>> from diffusers import StableDiffusionPipeline >>> model_base = "stable-diffusion-v1-5/stable-diffusion-v1-5" >>> pipe = StableDiffusionPipeline.from_pretrained(model_base, torch_dtype=torch.float16) ``` *기본 모델의 가중치 위에* 파인튜닝된 DreamBooth 모델에서 LoRA 가중치를 불러온 다음, 더 빠른 추론을 위해 파이프라인을 GPU로 이동합니다. LoRA 가중치를 프리징된 사전 훈련된 모델 가중치와 병합할 때, 선택적으로 'scale' 매개변수로 어느 정도의 가중치를 병합할 지 조절할 수 있습니다: <Tip> 💡 `0`의 `scale` 값은 LoRA 가중치를 사용하지 않아 원래 모델의 가중치만 사용한 것과 같고, `1`의 `scale` 값은 파인튜닝된 LoRA 가중치만 사용함을 의미합니다. 0과 1 사이의 값들은 두 결과들 사이로 보간됩니다. </Tip> ```py >>> pipe.unet.load_attn_procs(model_path) >>> pipe.to("cuda") # LoRA 파인튜닝된 모델의 가중치 절반과 기본 모델의 가중치 절반 사용 >>> image = pipe( ... "A picture of a sks dog in a bucket.", ... num_inference_steps=25, ... guidance_scale=7.5, ... cross_attention_kwargs={"scale": 0.5}, ... ).images[0] # 완전히 파인튜닝된 LoRA 모델의 가중치 사용 >>> image = pipe("A picture of a sks dog in a bucket.", num_inference_steps=25, guidance_scale=7.5).images[0] >>> image.save("bucket-dog.png") ```
diffusers/docs/source/ko/training/lora.md/0
{ "file_path": "diffusers/docs/source/ko/training/lora.md", "repo_id": "diffusers", "token_count": 4754 }
128
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 어댑터 불러오기 [[open-in-colab]] 특정 물체의 이미지 또는 특정 스타일의 이미지를 생성하도록 diffusion 모델을 개인화하기 위한 몇 가지 [학습](../training/overview) 기법이 있습니다. 이러한 학습 방법은 각각 다른 유형의 어댑터를 생성합니다. 일부 어댑터는 완전히 새로운 모델을 생성하는 반면, 다른 어댑터는 임베딩 또는 가중치의 작은 부분만 수정합니다. 이는 각 어댑터의 로딩 프로세스도 다르다는 것을 의미합니다. 이 가이드에서는 DreamBooth, textual inversion 및 LoRA 가중치를 불러오는 방법을 설명합니다. <Tip> 사용할 체크포인트와 임베딩은 [Stable Diffusion Conceptualizer](https://huggingface.co/spaces/sd-concepts-library/stable-diffusion-conceptualizer), [LoRA the Explorer](https://huggingface.co/spaces/multimodalart/LoraTheExplorer), [Diffusers Models Gallery](https://huggingface.co/spaces/huggingface-projects/diffusers-gallery)에서 찾아보시기 바랍니다. </Tip> ## DreamBooth [DreamBooth](https://dreambooth.github.io/)는 물체의 여러 이미지에 대한 *diffusion 모델 전체*를 미세 조정하여 새로운 스타일과 설정으로 해당 물체의 이미지를 생성합니다. 이 방법은 모델이 물체 이미지와 연관시키는 방법을 학습하는 프롬프트에 특수 단어를 사용하는 방식으로 작동합니다. 모든 학습 방법 중에서 드림부스는 전체 체크포인트 모델이기 때문에 파일 크기가 가장 큽니다(보통 몇 GB). Hergé가 그린 단 10개의 이미지로 학습된 [herge_style](https://huggingface.co/sd-dreambooth-library/herge-style) 체크포인트를 불러와 해당 스타일의 이미지를 생성해 보겠습니다. 이 모델이 작동하려면 체크포인트를 트리거하는 프롬프트에 특수 단어 `herge_style`을 포함시켜야 합니다: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("sd-dreambooth-library/herge-style", torch_dtype=torch.float16).to("cuda") prompt = "A cute herge_style brown bear eating a slice of pizza, stunning color scheme, masterpiece, illustration" image = pipeline(prompt).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_dreambooth.png" /> </div> ## Textual inversion [Textual inversion](https://textual-inversion.github.io/)은 DreamBooth와 매우 유사하며 몇 개의 이미지만으로 특정 개념(스타일, 개체)을 생성하는 diffusion 모델을 개인화할 수도 있습니다. 이 방법은 프롬프트에 특정 단어를 입력하면 해당 이미지를 나타내는 새로운 임베딩을 학습하고 찾아내는 방식으로 작동합니다. 결과적으로 diffusion 모델 가중치는 동일하게 유지되고 훈련 프로세스는 비교적 작은(수 KB) 파일을 생성합니다. Textual inversion은 임베딩을 생성하기 때문에 DreamBooth처럼 단독으로 사용할 수 없으며 또 다른 모델이 필요합니다. ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda") ``` 이제 [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] 메서드를 사용하여 textual inversion 임베딩을 불러와 이미지를 생성할 수 있습니다. [sd-concepts-library/gta5-artwork](https://huggingface.co/sd-concepts-library/gta5-artwork) 임베딩을 불러와 보겠습니다. 이를 트리거하려면 프롬프트에 특수 단어 `<gta5-artwork>`를 포함시켜야 합니다: ```py pipeline.load_textual_inversion("sd-concepts-library/gta5-artwork") prompt = "A cute brown bear eating a slice of pizza, stunning color scheme, masterpiece, illustration, <gta5-artwork> style" image = pipeline(prompt).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_txt_embed.png" /> </div> Textual inversion은 또한 바람직하지 않은 사물에 대해 *네거티브 임베딩*을 생성하여 모델이 흐릿한 이미지나 손의 추가 손가락과 같은 바람직하지 않은 사물이 포함된 이미지를 생성하지 못하도록 학습할 수도 있습니다. 이는 프롬프트를 빠르게 개선하는 것이 쉬운 방법이 될 수 있습니다. 이는 이전과 같이 임베딩을 [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`]으로 불러오지만 이번에는 두 개의 매개변수가 더 필요합니다: - `weight_name`: 파일이 특정 이름의 🤗 Diffusers 형식으로 저장된 경우이거나 파일이 A1111 형식으로 저장된 경우, 불러올 가중치 파일을 지정합니다. - `token`: 임베딩을 트리거하기 위해 프롬프트에서 사용할 특수 단어를 지정합니다. [sayakpaul/EasyNegative-test](https://huggingface.co/sayakpaul/EasyNegative-test) 임베딩을 불러와 보겠습니다: ```py pipeline.load_textual_inversion( "sayakpaul/EasyNegative-test", weight_name="EasyNegative.safetensors", token="EasyNegative" ) ``` 이제 `token`을 사용해 네거티브 임베딩이 있는 이미지를 생성할 수 있습니다: ```py prompt = "A cute brown bear eating a slice of pizza, stunning color scheme, masterpiece, illustration, EasyNegative" negative_prompt = "EasyNegative" image = pipeline(prompt, negative_prompt=negative_prompt, num_inference_steps=50).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_neg_embed.png" /> </div> ## LoRA [Low-Rank Adaptation (LoRA)](https://huggingface.co/papers/2106.09685)은 속도가 빠르고 파일 크기가 (수백 MB로) 작기 때문에 널리 사용되는 학습 기법입니다. 이 가이드의 다른 방법과 마찬가지로, LoRA는 몇 장의 이미지만으로 새로운 스타일을 학습하도록 모델을 학습시킬 수 있습니다. 이는 diffusion 모델에 새로운 가중치를 삽입한 다음 전체 모델 대신 새로운 가중치만 학습시키는 방식으로 작동합니다. 따라서 LoRA를 더 빠르게 학습시키고 더 쉽게 저장할 수 있습니다. <Tip> LoRA는 다른 학습 방법과 함께 사용할 수 있는 매우 일반적인 학습 기법입니다. 예를 들어, DreamBooth와 LoRA로 모델을 학습하는 것이 일반적입니다. 또한 새롭고 고유한 이미지를 생성하기 위해 여러 개의 LoRA를 불러오고 병합하는 것이 점점 더 일반화되고 있습니다. 병합은 이 불러오기 가이드의 범위를 벗어나므로 자세한 내용은 심층적인 [LoRA 병합](merge_loras) 가이드에서 확인할 수 있습니다. </Tip> LoRA는 다른 모델과 함께 사용해야 합니다: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16).to("cuda") ``` 그리고 [`~loaders.LoraLoaderMixin.load_lora_weights`] 메서드를 사용하여 [ostris/super-cereal-sdxl-lora](https://huggingface.co/ostris/super-cereal-sdxl-lora) 가중치를 불러오고 리포지토리에서 가중치 파일명을 지정합니다: ```py pipeline.load_lora_weights("ostris/super-cereal-sdxl-lora", weight_name="cereal_box_sdxl_v1.safetensors") prompt = "bears, pizza bites" image = pipeline(prompt).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_lora.png" /> </div> [`~loaders.LoraLoaderMixin.load_lora_weights`] 메서드는 LoRA 가중치를 UNet과 텍스트 인코더에 모두 불러옵니다. 이 메서드는 해당 케이스에서 LoRA를 불러오는 데 선호되는 방식입니다: - LoRA 가중치에 UNet 및 텍스트 인코더에 대한 별도의 식별자가 없는 경우 - LoRA 가중치에 UNet과 텍스트 인코더에 대한 별도의 식별자가 있는 경우 하지만 LoRA 가중치만 UNet에 로드해야 하는 경우에는 [`~loaders.UNet2DConditionLoadersMixin.load_attn_procs`] 메서드를 사용할 수 있습니다. [jbilcke-hf/sdxl-cinematic-1](https://huggingface.co/jbilcke-hf/sdxl-cinematic-1) LoRA를 불러와 보겠습니다: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16).to("cuda") pipeline.unet.load_attn_procs("jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors") # 프롬프트에서 cnmt를 사용하여 LoRA를 트리거합니다. prompt = "A cute cnmt eating a slice of pizza, stunning color scheme, masterpiece, illustration" image = pipeline(prompt).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_attn_proc.png" /> </div> LoRA 가중치를 언로드하려면 [`~loaders.LoraLoaderMixin.unload_lora_weights`] 메서드를 사용하여 LoRA 가중치를 삭제하고 모델을 원래 가중치로 복원합니다: ```py pipeline.unload_lora_weights() ``` ### LoRA 가중치 스케일 조정하기 [`~loaders.LoraLoaderMixin.load_lora_weights`] 및 [`~loaders.UNet2DConditionLoadersMixin.load_attn_procs`] 모두 `cross_attention_kwargs={"scale": 0.5}` 파라미터를 전달하여 얼마나 LoRA 가중치를 사용할지 조정할 수 있습니다. 값이 `0`이면 기본 모델 가중치만 사용하는 것과 같고, 값이 `1`이면 완전히 미세 조정된 LoRA를 사용하는 것과 같습니다. 레이어당 사용되는 LoRA 가중치의 양을 보다 세밀하게 제어하려면 [`~loaders.LoraLoaderMixin.set_adapters`]를 사용하여 각 레이어의 가중치를 얼마만큼 조정할지 지정하는 딕셔너리를 전달할 수 있습니다. ```python pipe = ... # 파이프라인 생성 pipe.load_lora_weights(..., adapter_name="my_adapter") scales = { "text_encoder": 0.5, "text_encoder_2": 0.5, # 파이프에 두 번째 텍스트 인코더가 있는 경우에만 사용 가능 "unet": { "down": 0.9, # down 부분의 모든 트랜스포머는 스케일 0.9를 사용 # "mid" # 이 예제에서는 "mid"가 지정되지 않았으므로 중간 부분의 모든 트랜스포머는 기본 스케일 1.0을 사용 "up": { "block_0": 0.6, # # up의 0번째 블록에 있는 3개의 트랜스포머는 모두 스케일 0.6을 사용 "block_1": [0.4, 0.8, 1.0], # up의 첫 번째 블록에 있는 3개의 트랜스포머는 각각 스케일 0.4, 0.8, 1.0을 사용 } } } pipe.set_adapters("my_adapter", scales) ``` 이는 여러 어댑터에서도 작동합니다. 방법은 [이 가이드](https://huggingface.co/docs/diffusers/tutorials/using_peft_for_inference#customize-adapters-strength)를 참조하세요. <Tip warning={true}> 현재 [`~loaders.LoraLoaderMixin.set_adapters`]는 어텐션 가중치의 스케일링만 지원합니다. LoRA에 다른 부분(예: resnets or down-/upsamplers)이 있는 경우 1.0의 스케일을 유지합니다. </Tip> ### Kohya와 TheLastBen 커뮤니티에서 인기 있는 다른 LoRA trainer로는 [Kohya](https://github.com/kohya-ss/sd-scripts/)와 [TheLastBen](https://github.com/TheLastBen/fast-stable-diffusion)의 trainer가 있습니다. 이 trainer들은 🤗 Diffusers가 훈련한 것과는 다른 LoRA 체크포인트를 생성하지만, 같은 방식으로 불러올 수 있습니다. <hfoptions id="other-trainers"> <hfoption id="Kohya"> Kohya LoRA를 불러오기 위해, 예시로 [Civitai](https://civitai.com/)에서 [Blueprintify SD XL 1.0](https://civitai.com/models/150986/blueprintify-sd-xl-10) 체크포인트를 다운로드합니다: ```sh !wget https://civitai.com/api/download/models/168776 -O blueprintify-sd-xl-10.safetensors ``` LoRA 체크포인트를 [`~loaders.LoraLoaderMixin.load_lora_weights`] 메서드로 불러오고 `weight_name` 파라미터에 파일명을 지정합니다: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16).to("cuda") pipeline.load_lora_weights("path/to/weights", weight_name="blueprintify-sd-xl-10.safetensors") ``` 이미지를 생성합니다: ```py # LoRA를 트리거하기 위해 bl3uprint를 프롬프트에 사용 prompt = "bl3uprint, a highly detailed blueprint of the eiffel tower, explaining how to build all parts, many txt, blueprint grid backdrop" image = pipeline(prompt).images[0] image ``` <Tip warning={true}> Kohya LoRA를 🤗 Diffusers와 함께 사용할 때 몇 가지 제한 사항이 있습니다: - [여기](https://github.com/huggingface/diffusers/pull/4287/#issuecomment-1655110736)에 설명된 여러 가지 이유로 인해 이미지가 ComfyUI와 같은 UI에서 생성된 이미지와 다르게 보일 수 있습니다. - [LyCORIS 체크포인트](https://github.com/KohakuBlueleaf/LyCORIS)가 완전히 지원되지 않습니다. [`~loaders.LoraLoaderMixin.load_lora_weights`] 메서드는 LoRA 및 LoCon 모듈로 LyCORIS 체크포인트를 불러올 수 있지만, Hada 및 LoKR은 지원되지 않습니다. </Tip> </hfoption> <hfoption id="TheLastBen"> TheLastBen에서 체크포인트를 불러오는 방법은 매우 유사합니다. 예를 들어, [TheLastBen/William_Eggleston_Style_SDXL](https://huggingface.co/TheLastBen/William_Eggleston_Style_SDXL) 체크포인트를 불러오려면: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16).to("cuda") pipeline.load_lora_weights("TheLastBen/William_Eggleston_Style_SDXL", weight_name="wegg.safetensors") # LoRA를 트리거하기 위해 william eggleston를 프롬프트에 사용 prompt = "a house by william eggleston, sunrays, beautiful, sunlight, sunrays, beautiful" image = pipeline(prompt=prompt).images[0] image ``` </hfoption> </hfoptions> ## IP-Adapter [IP-Adapter](https://ip-adapter.github.io/)는 모든 diffusion 모델에 이미지 프롬프트를 사용할 수 있는 경량 어댑터입니다. 이 어댑터는 이미지와 텍스트 feature의 cross-attention 레이어를 분리하여 작동합니다. 다른 모든 모델 컴포넌트튼 freeze되고 UNet의 embedded 이미지 features만 학습됩니다. 따라서 IP-Adapter 파일은 일반적으로 최대 100MB에 불과합니다. 다양한 작업과 구체적인 사용 사례에 IP-Adapter를 사용하는 방법에 대한 자세한 내용은 [IP-Adapter](../using-diffusers/ip_adapter) 가이드에서 확인할 수 있습니다. > [!TIP] > Diffusers는 현재 가장 많이 사용되는 일부 파이프라인에 대해서만 IP-Adapter를 지원합니다. 멋진 사용 사례가 있는 지원되지 않는 파이프라인에 IP-Adapter를 통합하고 싶다면 언제든지 기능 요청을 여세요! > 공식 IP-Adapter 체크포인트는 [h94/IP-Adapter](https://huggingface.co/h94/IP-Adapter)에서 확인할 수 있습니다. 시작하려면 Stable Diffusion 체크포인트를 불러오세요. ```py from diffusers import AutoPipelineForText2Image import torch from diffusers.utils import load_image pipeline = AutoPipelineForText2Image.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda") ``` 그런 다음 IP-Adapter 가중치를 불러와 [`~loaders.IPAdapterMixin.load_ip_adapter`] 메서드를 사용하여 파이프라인에 추가합니다. ```py pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") ``` 불러온 뒤, 이미지 및 텍스트 프롬프트가 있는 파이프라인을 사용하여 이미지 생성 프로세스를 가이드할 수 있습니다. ```py image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_neg_embed.png") generator = torch.Generator(device="cpu").manual_seed(33) images = pipeline(     prompt='best quality, high quality, wearing sunglasses',     ip_adapter_image=image,     negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality",     num_inference_steps=50,     generator=generator, ).images[0] images ``` <div class="flex justify-center">     <img src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip-bear.png" /> </div> ### IP-Adapter Plus IP-Adapter는 이미지 인코더를 사용하여 이미지 feature를 생성합니다. IP-Adapter 리포지토리에 `image_encoder` 하위 폴더가 있는 경우, 이미지 인코더가 자동으로 불러와 파이프라인에 등록됩니다. 그렇지 않은 경우, [`~transformers.CLIPVisionModelWithProjection`] 모델을 사용하여 이미지 인코더를 명시적으로 불러와 파이프라인에 전달해야 합니다. 이는 ViT-H 이미지 인코더를 사용하는 *IP-Adapter Plus* 체크포인트에 해당하는 케이스입니다. ```py from transformers import CLIPVisionModelWithProjection image_encoder = CLIPVisionModelWithProjection.from_pretrained( "h94/IP-Adapter", subfolder="models/image_encoder", torch_dtype=torch.float16 ) pipeline = AutoPipelineForText2Image.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, torch_dtype=torch.float16 ).to("cuda") pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter-plus_sdxl_vit-h.safetensors") ``` ### IP-Adapter Face ID 모델 IP-Adapter FaceID 모델은 CLIP 이미지 임베딩 대신 `insightface`에서 생성한 이미지 임베딩을 사용하는 실험적인 IP Adapter입니다. 이러한 모델 중 일부는 LoRA를 사용하여 ID 일관성을 개선하기도 합니다. 이러한 모델을 사용하려면 `insightface`와 해당 요구 사항을 모두 설치해야 합니다. <Tip warning={true}> InsightFace 사전학습된 모델은 비상업적 연구 목적으로만 사용할 수 있으므로, IP-Adapter-FaceID 모델은 연구 목적으로만 릴리즈되었으며 상업적 용도로는 사용할 수 없습니다. </Tip> ```py pipeline = AutoPipelineForText2Image.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_ip_adapter("h94/IP-Adapter-FaceID", subfolder=None, weight_name="ip-adapter-faceid_sdxl.bin", image_encoder_folder=None) ``` 두 가지 IP 어댑터 FaceID Plus 모델 중 하나를 사용하려는 경우, 이 모델들은 더 나은 사실감을 얻기 위해 `insightface`와 CLIP 이미지 임베딩을 모두 사용하므로, CLIP 이미지 인코더도 불러와야 합니다. ```py from transformers import CLIPVisionModelWithProjection image_encoder = CLIPVisionModelWithProjection.from_pretrained( "laion/CLIP-ViT-H-14-laion2B-s32B-b79K", torch_dtype=torch.float16, ) pipeline = AutoPipelineForText2Image.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", image_encoder=image_encoder, torch_dtype=torch.float16 ).to("cuda") pipeline.load_ip_adapter("h94/IP-Adapter-FaceID", subfolder=None, weight_name="ip-adapter-faceid-plus_sd15.bin") ```
diffusers/docs/source/ko/using-diffusers/loading_adapters.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/loading_adapters.md", "repo_id": "diffusers", "token_count": 12272 }
129
- title: 开始Diffusers sections: - local: index title: Diffusers - local: installation title: 安装 - local: quicktour title: 快速入门 - local: stable_diffusion title: 有效和高效的扩散 - title: DiffusionPipeline isExpanded: false sections: - local: using-diffusers/schedulers title: Load schedulers and models - title: Inference isExpanded: false sections: - local: training/distributed_inference title: Distributed inference - title: Inference optimization isExpanded: false sections: - local: optimization/fp16 title: Accelerate inference - local: optimization/cache title: Caching - local: optimization/memory title: Reduce memory usage - local: optimization/speed-memory-optims title: Compile and offloading quantized models - title: Community optimizations sections: - local: optimization/pruna title: Pruna - local: optimization/xformers title: xFormers - local: optimization/tome title: Token merging - local: optimization/deepcache title: DeepCache - local: optimization/tgate title: TGATE - local: optimization/xdit title: xDiT - local: optimization/para_attn title: ParaAttention - title: Hybrid Inference isExpanded: false sections: - local: hybrid_inference/overview title: Overview - local: hybrid_inference/vae_encode title: VAE Encode - local: hybrid_inference/api_reference title: API Reference - title: Modular Diffusers isExpanded: false sections: - local: modular_diffusers/overview title: Overview - local: modular_diffusers/quickstart title: Quickstart - local: modular_diffusers/modular_diffusers_states title: States - local: modular_diffusers/pipeline_block title: ModularPipelineBlocks - local: modular_diffusers/sequential_pipeline_blocks title: SequentialPipelineBlocks - local: modular_diffusers/loop_sequential_pipeline_blocks title: LoopSequentialPipelineBlocks - local: modular_diffusers/auto_pipeline_blocks title: AutoPipelineBlocks - local: modular_diffusers/modular_pipeline title: ModularPipeline - local: modular_diffusers/components_manager title: ComponentsManager - local: modular_diffusers/guiders title: Guiders - title: Training isExpanded: false sections: - local: training/overview title: Overview - local: training/adapt_a_model title: Adapt a model to a new task - title: Models sections: - local: training/text2image title: Text-to-image - local: training/kandinsky title: Kandinsky 2.2 - local: training/wuerstchen title: Wuerstchen - local: training/controlnet title: ControlNet - local: training/instructpix2pix title: InstructPix2Pix - title: Methods sections: - local: training/text_inversion title: Textual Inversion - local: training/dreambooth title: DreamBooth - local: training/lora title: LoRA - title: Model accelerators and hardware isExpanded: false sections: - local: optimization/onnx title: ONNX - local: optimization/open_vino title: OpenVINO - local: optimization/coreml title: Core ML - local: optimization/mps title: Metal Performance Shaders (MPS) - local: optimization/habana title: Intel Gaudi - local: optimization/neuron title: AWS Neuron - title: Specific pipeline examples isExpanded: false sections: - local: using-diffusers/consisid title: ConsisID - title: Resources isExpanded: false sections: - title: Task recipes sections: - local: community_projects title: Projects built with Diffusers - local: conceptual/philosophy title: Philosophy - local: conceptual/contribution title: How to contribute? - local: conceptual/ethical_guidelines title: Diffusers' Ethical Guidelines - local: conceptual/evaluation title: Evaluating Diffusion Models
diffusers/docs/source/zh/_toctree.yml/0
{ "file_path": "diffusers/docs/source/zh/_toctree.yml", "repo_id": "diffusers", "token_count": 1405 }
130
<!--版权 2025 HuggingFace 团队。保留所有权利。 根据 Apache 许可证 2.0 版(“许可证”)授权;除非符合许可证的规定,否则不得使用此文件。您可以在 http://www.apache.org/licenses/LICENSE-2.0 获取许可证的副本。 除非适用法律要求或书面同意,根据许可证分发的软件是基于“按原样”分发的,没有任何形式的明示或暗示的保证或条件。有关许可证的特定语言,请参阅许可证。 --> # 模块化管道 [`ModularPipeline`] 将 [`~modular_pipelines.ModularPipelineBlocks`] 转换为可执行的管道,加载模型并执行块中定义的计算步骤。它是运行管道的主要接口,与 [`DiffusionPipeline`] API 非常相似。 主要区别在于在管道中包含了一个预期的 `output` 参数。 <hfoptions id="example"> <hfoption id="text-to-image"> ```py import torch from diffusers.modular_pipelines import SequentialPipelineBlocks from diffusers.modular_pipelines.stable_diffusion_xl import TEXT2IMAGE_BLOCKS blocks = SequentialPipelineBlocks.from_blocks_dict(TEXT2IMAGE_BLOCKS) modular_repo_id = "YiYiXu/modular-loader-t2i-0704" pipeline = blocks.init_pipeline(modular_repo_id) pipeline.load_default_components(torch_dtype=torch.float16) pipeline.to("cuda") image = pipeline(prompt="Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", output="images")[0] image.save("modular_t2i_out.png") ``` </hfoption> <hfoption id="image-to-image"> ```py import torch from diffusers.modular_pipelines import SequentialPipelineBlocks from diffusers.modular_pipelines.stable_diffusion_xl import IMAGE2IMAGE_BLOCKS blocks = SequentialPipelineBlocks.from_blocks_dict(IMAGE2IMAGE_BLOCKS) modular_repo_id = "YiYiXu/modular-loader-t2i-0704" pipeline = blocks.init_pipeline(modular_repo_id) pipeline.load_default_components(torch_dtype=torch.float16) pipeline.to("cuda") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-text2img.png" init_image = load_image(url) prompt = "a dog catching a frisbee in the jungle" image = pipeline(prompt=prompt, image=init_image, strength=0.8, output="images")[0] image.save("modular_i2i_out.png") ``` </hfoption> <hfoption id="inpainting"> ```py import torch from diffusers.modular_pipelines import SequentialPipelineBlocks from diffusers.modular_pipelines.stable_diffusion_xl import INPAINT_BLOCKS from diffusers.utils import load_image blocks = SequentialPipelineBlocks.from_blocks_dict(INPAINT_BLOCKS) modular_repo_id = "YiYiXu/modular-loader-t2i-0704" pipeline = blocks.init_pipeline(modular_repo_id) pipeline.load_default_components(torch_dtype=torch.float16) pipeline.to("cuda") img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-text2img.png" mask_url = "h ttps://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-inpaint-mask.png" init_image = load_image(img_url) mask_image = load_image(mask_url) prompt = "A deep sea diver floating" image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image, strength=0.85, output="images")[0] image.save("moduar_inpaint_out.png") ``` </hfoption> </hfoptions> 本指南将向您展示如何创建一个[`ModularPipeline`]并管理其中的组件。 ## 添加块 块是[`InsertableDict`]对象,可以在特定位置插入,提供了一种灵活的方式来混合和匹配块。 使用[`~modular_pipelines.modular_pipeline_utils.InsertableDict.insert`]在块类或`sub_blocks`属性上添加一个块。 ```py # BLOCKS是块类的字典,您需要向其中添加类 BLOCKS.insert("block_name", BlockClass, index) # sub_blocks属性包含实例,向该属性添加一个块实例 t2i_blocks.sub_blocks.insert("block_name", block_instance, index) ``` 使用[`~modular_pipelines.modular_pipeline_utils.InsertableDict.pop`]在块类或`sub_blocks`属性上移除一个块。 ```py # 从预设中移除一个块类 BLOCKS.pop("text_encoder") # 分离出一个块实例 text_encoder_block = t2i_blocks.sub_blocks.pop("text_encoder") ``` 通过将现有块设置为新块来交换块。 ```py # 在预设中替换块类 BLOCKS["prepare_latents"] = CustomPrepareLatents # 使用块实例在sub_blocks属性中替换 t2i_blocks.sub_blocks["prepare_latents"] = CustomPrepareLatents() ``` ## 创建管道 有两种方法可以创建一个[`ModularPipeline`]。从[`ModularPipelineBlocks`]组装并创建管道,或使用[`~ModularPipeline.from_pretrained`]加载现有管道。 您还应该初始化一个[`ComponentsManager`]来处理设备放置和内存以及组件管理。 > [!TIP] > 有关它如何帮助管理不同工作流中的组件的更多详细信息,请参阅[ComponentsManager](./components_manager)文档。 <hfoptions id="create"> <hfoption id="ModularPipelineBlocks"> 使用[`~ModularPipelineBlocks.init_pipeline`]方法从组件和配置规范创建一个[`ModularPipeline`]。此方法从`modular_model_index.json`文件加载*规范*,但尚未加载*模型*。 ```py from diffusers import ComponentsManager from diffusers.modular_pipelines import SequentialPipelineBlocks from diffusers.modular_pipelines.stable_diffusion_xl import TEXT2IMAGE_BLOCKS t2i_blocks = SequentialPipelineBlocks.from_blocks_dict(TEXT2IMAGE_BLOCKS) modular_repo_id = "YiYiXu/modular-loader-t2i-0704" components = ComponentsManager() t2i_pipeline = t2i_blocks.init_pipeline(modular_repo_id, components_manager=components) ``` </hfoption> <hfoption id="from_pretrained"> [`~ModularPipeline.from_pretrained`]方法创建一个[`ModularPipeline`]从Hub上的模块化仓库加载。 ```py from diffusers import ModularPipeline, ComponentsManager components = ComponentsManager() pipeline = ModularPipeline.from_pretrained("YiYiXu/modular-loader-t2i-0704", components_manager=components) ``` 添加`trust_remote_code`参数以加载自定义的[`ModularPipeline`]。 ```py from diffusers import ModularPipeline, ComponentsManager components = ComponentsManager() modular_repo_id = "YiYiXu/modular-diffdiff-0704" diffdiff_pipeline = ModularPipeline.from_pretrained(modular_repo_id, trust_remote_code=True, components_manager=components) ``` </hfoption> </hfoptions> ## 加载组件 一个[`ModularPipeline`]不会自动实例化组件。它只加载配置和组件规范。您可以使用[`~ModularPipeline.load_default_components`]加载所有组件,或仅使用[`~ModularPipeline.load_components`]加载特定组件。 <hfoptions id="load"> <hfoption id="load_default_components"> ```py import torch t2i_pipeline.load_default_components(torch_dtype=torch.float16) t2i_pipeline.to("cuda") ``` </hfoption> <hfoption id="load_components"> 下面的例子仅加载UNet和VAE。 ```py import torch t2i_pipeline.load_components(names=["unet", "vae"], torch_dtype=torch.float16) ``` </hfoption> </hfoptions> 打印管道以检查加载的预训练组件。 ```py t2i_pipeline ``` 这应该与管道初始化自的模块化仓库中的`modular_model_index.json`文件匹配。如果管道不需要某个组件,即使它在模块化仓库中存在,也不会被包含。 要修改组件加载的来源,编辑仓库中的`modular_model_index.json`文件,并将其更改为您希望的加载路径。下面的例子从不同的仓库加载UNet。 ```json # 原始 "unet": [ null, null, { "repo": "stabilityai/stable-diffusion-xl-base-1.0", "subfolder": "unet", "variant": "fp16" } ] # 修改后 "unet": [ null, null, { "repo": "RunDiffusion/Juggernaut-XL-v9", "subfolder": "unet", "variant": "fp16" } ] ``` ### 组件加载状态 下面的管道属性提供了关于哪些组件被加载的更多信息。 使用`component_names`返回所有预期的组件。 ```py t2i_pipeline.component_names ['text_encoder', 'text_encoder_2', 'tokenizer', 'tokenizer_2', 'guider', 'scheduler', 'unet', 'vae', 'image_processor'] ``` 使用`null_component_names`返回尚未加载的组件。使用[`~ModularPipeline.from_pretrained`]加载这些组件。 ```py t2i_pipeline.null_component_names ['text_encoder', 'text_encoder_2', 'tokenizer', 'tokenizer_2', 'scheduler'] ``` 使用`pretrained_component_names`返回将从预训练模型加载的组件。 ```py t2i_pipeline.pretrained_component_names ['text_encoder', 'text_encoder_2', 'tokenizer', 'tokenizer_2', 'scheduler', 'unet', 'vae'] ``` 使用 `config_component_names` 返回那些使用默认配置创建的组件(不是从模块化仓库加载的)。来自配置的组件不包括在内,因为它们已经在管道创建期间初始化。这就是为什么它们没有列在 `null_component_names` 中。 ```py t2i_pipeline.config_component_names ['guider', 'image_processor'] ``` ## 更新组件 根据组件是*预训练组件*还是*配置组件*,组件可能会被更新。 > [!WARNING] > 在更新组件时,组件可能会从预训练变为配置。组件类型最初是在块的 `expected_components` 字段中定义的。 预训练组件通过 [`ComponentSpec`] 更新,而配置组件则通过直接传递对象或使用 [`ComponentSpec`] 更新。 [`ComponentSpec`] 对于预训练组件显示 `default_creation_method="from_pretrained"`,对于配置组件显示 `default_creation_method="from_config`。 要更新预训练组件,创建一个 [`ComponentSpec`],指定组件的名称和从哪里加载它。使用 [`~ComponentSpec.load`] 方法来加载组件。 ```py from diffusers import ComponentSpec, UNet2DConditionModel unet_spec = ComponentSpec(name="unet",type_hint=UNet2DConditionModel, repo="stabilityai/stable-diffusion-xl-base-1.0", subfolder="unet", variant="fp16") unet = unet_spec.load(torch_dtype=torch.float16) ``` [`~ModularPipeline.update_components`] 方法用一个新的组件替换原来的组件。 ```py t2i_pipeline.update_components(unet=unet2) ``` 当组件被更新时,加载规范也会在管道配置中更新。 ### 组件提取和修改 当你使用 [`~ComponentSpec.load`] 时,新组件保持其加载规范。这使得提取规范并重新创建组件成为可能。 ```py spec = ComponentSpec.from_component("unet", unet2) spec ComponentSpec(name='unet', type_hint=<class 'diffusers.models.unets.unet_2d_condition.UNet2DConditionModel'>, description=None, config=None, repo='stabilityai/stable-diffusion-xl-base-1.0', subfolder='unet', variant='fp16', revision=None, default_creation_method='from_pretrained') unet2_recreated = spec.load(torch_dtype=torch.float16) ``` [`~ModularPipeline.get_component_spec`] 方法获取当前组件规范的副本以进行修改或更新。 ```py unet_spec = t2i_pipeline.get_component_spec("unet") unet_spec ComponentSpec( name='unet', type_hint=<class 'diffusers.models.unets.unet_2d_condition.UNet2DConditionModel'>, repo='RunDiffusion/Juggernaut-XL-v9', subfolder='unet', variant='fp16', default_creation_method='from_pretrained' ) # 修改以从不同的仓库加载 unet_spec.repo = "stabilityai/stable-diffusion-xl-base-1.0" # 使用修改后的规范加载组件 unet = unet_spec.load(torch_dtype=torch.float16) ``` ## 模块化仓库 一个仓库 如果管道块使用*预训练组件*,则需要y。该存储库提供了加载规范和元数据。 [`ModularPipeline`]特别需要*模块化存储库*(参见[示例存储库](https://huggingface.co/YiYiXu/modular-diffdiff)),这比典型的存储库更灵活。它包含一个`modular_model_index.json`文件,包含以下3个元素。 - `library`和`class`显示组件是从哪个库加载的及其类。如果是`null`,则表示组件尚未加载。 - `loading_specs_dict`包含加载组件所需的信息,例如从中加载的存储库和子文件夹。 与标准存储库不同,模块化存储库可以根据`loading_specs_dict`从不同的存储库获取组件。组件不需要存在于同一个存储库中。 模块化存储库可能包含用于加载[`ModularPipeline`]的自定义代码。这允许您使用不是Diffusers原生的专用块。 ``` modular-diffdiff-0704/ ├── block.py # 自定义管道块实现 ├── config.json # 管道配置和auto_map └── modular_model_index.json # 组件加载规范 ``` [config.json](https://huggingface.co/YiYiXu/modular-diffdiff-0704/blob/main/config.json)文件包含一个`auto_map`键,指向`block.py`中定义自定义块的位置。 ```json { "_class_name": "DiffDiffBlocks", "auto_map": { "ModularPipelineBlocks": "block.DiffDiffBlocks" } } ```
diffusers/docs/source/zh/modular_diffusers/modular_pipeline.md/0
{ "file_path": "diffusers/docs/source/zh/modular_diffusers/modular_pipeline.md", "repo_id": "diffusers", "token_count": 6590 }
131
# Pruna [Pruna](https://github.com/PrunaAI/pruna) 是一个模型优化框架,提供多种优化方法——量化、剪枝、缓存、编译——以加速推理并减少内存使用。以下是优化方法的概览。 | 技术 | 描述 | 速度 | 内存 | 质量 | |------------|---------------------------------------------------------------------------------------|:----:|:----:|:----:| | `batcher` | 将多个输入分组在一起同时处理,提高计算效率并减少处理时间。 | ✅ | ❌ | ➖ | | `cacher` | 存储计算的中间结果以加速后续操作。 | ✅ | ➖ | ➖ | | `compiler` | 为特定硬件优化模型指令。 | ✅ | ➖ | ➖ | | `distiller`| 训练一个更小、更简单的模型来模仿一个更大、更复杂的模型。 | ✅ | ✅ | ❌ | | `quantizer`| 降低权重和激活的精度,减少内存需求。 | ✅ | ✅ | ❌ | | `pruner` | 移除不重要或冗余的连接和神经元,产生一个更稀疏、更高效的网络。 | ✅ | ✅ | ❌ | | `recoverer`| 在压缩后恢复模型的性能。 | ➖ | ➖ | ✅ | | `factorizer`| 将多个小矩阵乘法批处理为一个大型融合操作。 | ✅ | ➖ | ➖ | | `enhancer` | 通过应用后处理算法(如去噪或上采样)来增强模型输出。 | ❌ | - | ✅ | ✅ (改进), ➖ (大致相同), ❌ (恶化) 在 [Pruna 文档](https://docs.pruna.ai/en/stable/docs_pruna/user_manual/configure.html#configure-algorithms) 中探索所有优化方法。 ## 安装 使用以下命令安装 Pruna。 ```bash pip install pruna ``` ## 优化 Diffusers 模型 Diffusers 模型支持广泛的优化算法,如下所示。 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/PrunaAI/documentation-images/resolve/main/diffusers/diffusers_combinations.png" alt="Diffusers 模型支持的优化算法概览"> </div> 下面的示例使用 factorizer、compiler 和 cacher 算法的组合优化 [black-forest-labs/FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev)。这种组合将推理速度加速高达 4.2 倍,并将峰值 GPU 内存使用从 34.7GB 减少到 28.0GB,同时几乎保持相同的输出质量。 > [!TIP] > 参考 [Pruna 优化](https://docs.pruna.ai/en/stable/docs_pruna/user_manual/configure.html) 文档以了解更多关于该操作的信息。 本示例中使用的优化技术。 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/PrunaAI/documentation-images/resolve/main/diffusers/flux_combination.png" alt="用于FLUX.1-dev的优化技术展示,结合了因子分解器、编译器和缓存器算法"> </div> 首先定义一个包含要使用的优化算法的`SmashConfig`。要优化模型,将管道和`SmashConfig`用`smash`包装,然后像往常一样使用管道进行推理。 ```python import torch from diffusers import FluxPipeline from pruna import PrunaModel, SmashConfig, smash # 加载模型 # 使用小GPU内存尝试segmind/Segmind-Vega或black-forest-labs/FLUX.1-schnell pipe = FluxPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16 ).to("cuda") # 定义配置 smash_config = SmashConfig() smash_config["factorizer"] = "qkv_diffusers" smash_config["compiler"] = "torch_compile" smash_config["torch_compile_target"] = "module_list" smash_config["cacher"] = "fora" smash_config["fora_interval"] = 2 # 为了获得最佳速度结果,可以添加这些配置 # 但它们会将预热时间从1.5分钟增加到10分钟 # smash_config["torch_compile_mode"] = "max-autotune-no-cudagraphs" # smash_config["quantizer"] = "torchao" # smash_config["torchao_quant_type"] = "fp8dq" # smash_config["torchao_excluded_modules"] = "norm+embedding" # 优化模型 smashed_pipe = smash(pipe, smash_config) # 运行模型 smashed_pipe("a knitted purple prune").images[0] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/PrunaAI/documentation-images/resolve/main/diffusers/flux_smashed_comparison.png"> </div> 优化后,我们可以使用Hugging Face Hub共享和加载优化后的模型。 ```python # 保存模型 smashed_pipe.save_to_hub("<username>/FLUX.1-dev-smashed") # 加载模型 smashed_pipe = PrunaModel.from_hub("<username>/FLUX.1-dev-smashed") ``` ## 评估和基准测试Diffusers模型 Pruna提供了[EvaluationAgent](https://docs.pruna.ai/en/stable/docs_pruna/user_manual/evaluate.html)来评估优化后模型的质量。 我们可以定义我们关心的指标,如总时间和吞吐量,以及要评估的数据集。我们可以定义一个模型并将其传递给`EvaluationAgent`。 <hfoptions id="eval"> <hfoption id="optimized model"> 我们可以通过使用`EvaluationAgent`加载和评估优化后的模型,并将其传递给`Task`。 ```python import torch from diffusers import FluxPipeline from pruna import PrunaModel from pruna.data.pruna_datamodule import PrunaDataModule from pruna.evaluation.evaluation_agent import EvaluationAgent from pruna.evaluation.metrics import ( ThroughputMetric, TorchMetricWrapper, TotalTimeMetric, ) from pruna.evaluation.task import Task # define the device device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu" # 加载模型 # 使用小GPU内存尝试 PrunaAI/Segmind-Vega-smashed 或 PrunaAI/FLUX.1-dev-smashed smashed_pipe = PrunaModel.from_hub("PrunaAI/FLUX.1-dev-smashed") # 定义指标 metrics = [ TotalTimeMetric(n_iterations=20, n_warmup_iterations=5), ThroughputMetric(n_iterations=20, n_warmup_iterations=5), TorchMetricWrapper("clip"), ] # 定义数据模块 datamodule = PrunaDataModule.from_string("LAION256") datamodule.limit_datasets(10) # 定义任务和评估代理 task = Task(metrics, datamodule=datamodule, device=device) eval_agent = EvaluationAgent(task) # 评估优化模型并卸载到CPU smashed_pipe.move_to_device(device) smashed_pipe_results = eval_agent.evaluate(smashed_pipe) smashed_pipe.move_to_device("cpu") ``` </hfoption> <hfoption id="standalone model"> 除了比较优化模型与基础模型,您还可以评估独立的 `diffusers` 模型。这在您想评估模型性能而不考虑优化时非常有用。我们可以通过使用 `PrunaModel` 包装器并运行 `EvaluationAgent` 来实现。 ```python import torch from diffusers import FluxPipeline from pruna import PrunaModel # 加载模型 # 使用小GPU内存尝试 PrunaAI/Segmind-Vega-smashed 或 PrunaAI/FLUX.1-dev-smashed pipe = FluxPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16 ).to("cpu") wrapped_pipe = PrunaModel(model=pipe) ``` </hfoption> </hfoptions> 现在您已经了解了如何优化和评估您的模型,可以开始使用 Pruna 来优化您自己的模型了。幸运的是,我们有许多示例来帮助您入门。 > [!TIP] > 有关基准测试 Flux 的更多详细信息,请查看 [宣布 FLUX-Juiced:最快的图像生成端点(快 2.6 倍)!](https://huggingface.co/blog/PrunaAI/flux-fastest-image-generation-endpoint) 博客文章和 [InferBench](https://huggingface.co/spaces/PrunaAI/InferBench) 空间。 ## 参考 - [Pruna](https://github.com/pruna-ai/pruna) - [Pruna 优化](https://docs.pruna.ai/en/stable/docs_pruna/user_manual/configure.html#configure-algorithms) - [Pruna 评估](https://docs.pruna.ai/en/stable/docs_pruna/user_manual/evaluate.html) - [Pruna 教程](https://docs.pruna.ai/en/stable/docs_pruna/tutorials/index.html)
diffusers/docs/source/zh/optimization/pruna.md/0
{ "file_path": "diffusers/docs/source/zh/optimization/pruna.md", "repo_id": "diffusers", "token_count": 4516 }
132
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 文生图 <Tip warning={true}> 文生图训练脚本目前处于实验阶段,容易出现过拟合和灾难性遗忘等问题。建议尝试不同超参数以获得最佳数据集适配效果。 </Tip> Stable Diffusion 等文生图模型能够根据文本提示生成对应图像。 模型训练对硬件要求较高,但启用 `gradient_checkpointing` 和 `mixed_precision` 后,可在单块24GB显存GPU上完成训练。如需更大批次或更快训练速度,建议使用30GB以上显存的GPU设备。通过启用 [xFormers](../optimization/xformers) 内存高效注意力机制可降低显存占用。JAX/Flax 训练方案也支持TPU/GPU高效训练,但不支持梯度检查点、梯度累积和xFormers。使用Flax训练时建议配备30GB以上显存GPU或TPU v3。 本指南将详解 [train_text_to_image.py](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py) 训练脚本,助您掌握其原理并适配自定义需求。 运行脚本前请确保已从源码安装库: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` 然后进入包含训练脚本的示例目录,安装对应依赖: <hfoptions id="installation"> <hfoption id="PyTorch"> ```bash cd examples/text_to_image pip install -r requirements.txt ``` </hfoption> <hfoption id="Flax"> ```bash cd examples/text_to_image pip install -r requirements_flax.txt ``` </hfoption> </hfoptions> <Tip> 🤗 Accelerate 是支持多GPU/TPU训练和混合精度的工具库,能根据硬件环境自动配置训练参数。参阅 🤗 Accelerate [快速入门](https://huggingface.co/docs/accelerate/quicktour) 了解更多。 </Tip> 初始化 🤗 Accelerate 环境: ```bash accelerate config ``` 要创建默认配置环境(不进行交互式选择): ```bash accelerate config default ``` 若环境不支持交互式shell(如notebook),可使用: ```py from accelerate.utils import write_basic_config write_basic_config() ``` 最后,如需在自定义数据集上训练,请参阅 [创建训练数据集](create_dataset) 指南了解如何准备适配脚本的数据集。 ## 脚本参数 <Tip> 以下重点介绍脚本中影响训练效果的关键参数,如需完整参数说明可查阅 [脚本源码](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py)。如有疑问欢迎反馈。 </Tip> 训练脚本提供丰富参数供自定义训练流程,所有参数及说明详见 [`parse_args()`](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L193) 函数。该函数为每个参数提供默认值(如批次大小、学习率等),也可通过命令行参数覆盖。 例如使用fp16混合精度加速训练: ```bash accelerate launch train_text_to_image.py \ --mixed_precision="fp16" ``` 基础重要参数包括: - `--pretrained_model_name_or_path`: Hub模型名称或本地预训练模型路径 - `--dataset_name`: Hub数据集名称或本地训练数据集路径 - `--image_column`: 数据集中图像列名 - `--caption_column`: 数据集中文本列名 - `--output_dir`: 模型保存路径 - `--push_to_hub`: 是否将训练模型推送至Hub - `--checkpointing_steps`: 模型检查点保存步数;训练中断时可添加 `--resume_from_checkpoint` 从该检查点恢复训练 ### Min-SNR加权策略 [Min-SNR](https://huggingface.co/papers/2303.09556) 加权策略通过重新平衡损失函数加速模型收敛。训练脚本支持预测 `epsilon`(噪声)或 `v_prediction`,而Min-SNR兼容两种预测类型。该策略仅限PyTorch版本,Flax训练脚本不支持。 添加 `--snr_gamma` 参数并设为推荐值5.0: ```bash accelerate launch train_text_to_image.py \ --snr_gamma=5.0 ``` 可通过此 [Weights and Biases](https://wandb.ai/sayakpaul/text2image-finetune-minsnr) 报告比较不同 `snr_gamma` 值的损失曲面。小数据集上Min-SNR效果可能不如大数据集显著。 ## 训练脚本解析 数据集预处理代码和训练循环位于 [`main()`](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L490) 函数,自定义修改需在此处进行。 `train_text_to_image` 脚本首先 [加载调度器](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L543) 和分词器,此处可替换其他调度器: ```py noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") tokenizer = CLIPTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision ) ``` 接着 [加载UNet模型](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L619): ```py load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") model.register_to_config(**load_model.config) model.load_state_dict(load_model.state_dict()) ``` 随后对数据集的文本和图像列进行预处理。[`tokenize_captions`](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L724) 函数处理文本分词,[`train_transforms`](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L742) 定义图像增强策略,二者集成于 `preprocess_train`: ```py def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] examples["pixel_values"] = [train_transforms(image) for image in images] examples["input_ids"] = tokenize_captions(examples) return examples ``` 最后,[训练循环](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L878) 处理剩余流程:图像编码为潜空间、添加噪声、计算文本嵌入条件、更新模型参数、保存并推送模型至Hub。想深入了解训练循环原理,可参阅 [理解管道、模型与调度器](../using-diffusers/write_own_pipeline) 教程,该教程解析了去噪过程的核心逻辑。 ## 启动脚本 完成所有配置后,即可启动训练脚本!🚀 <hfoptions id="training-inference"> <hfoption id="PyTorch"> 以 [火影忍者BLIP标注数据集](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions) 为例训练生成火影角色。设置环境变量 `MODEL_NAME` 和 `dataset_name` 指定模型和数据集(Hub或本地路径)。多GPU训练需在 `accelerate launch` 命令中添加 `--multi_gpu` 参数。 <Tip> 使用本地数据集时,设置 `TRAIN_DIR` 和 `OUTPUT_DIR` 环境变量为数据集路径和模型保存路径。 </Tip> ```bash export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5" export dataset_name="lambdalabs/naruto-blip-captions" accelerate launch --mixed_precision="fp16" train_text_to_image.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$dataset_name \ --use_ema \ --resolution=512 --center_crop --random_flip \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --enable_xformers_memory_efficient_attention \ --lr_scheduler="constant" --lr_warmup_steps=0 \ --output_dir="sd-naruto-model" \ --push_to_hub ``` </hfoption> <hfoption id="Flax"> Flax训练方案在TPU/GPU上效率更高(由 [@duongna211](https://github.com/duongna21) 实现),TPU性能更优但GPU表现同样出色。 设置环境变量 `MODEL_NAME` 和 `dataset_name` 指定模型和数据集(Hub或本地路径)。 <Tip> 使用本地数据集时,设置 `TRAIN_DIR` 和 `OUTPUT_DIR` 环境变量为数据集路径和模型保存路径。 </Tip> ```bash export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5" export dataset_name="lambdalabs/naruto-blip-captions" python train_text_to_image_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$dataset_name \ --resolution=512 --center_crop --random_flip \ --train_batch_size=1 \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --output_dir="sd-naruto-model" \ --push_to_hub ``` </hfoption> </hfoptions> 训练完成后,即可使用新模型进行推理: <hfoptions id="training-inference"> <hfoption id="PyTorch"> ```py from diffusers import StableDiffusionPipeline import torch pipeline = StableDiffusionPipeline.from_pretrained("path/to/saved_model", torch_dtype=torch.float16, use_safetensors=True).to("cuda") image = pipeline(prompt="yoda").images[0] image.save("yoda-naruto.png") ``` </hfoption> <hfoption id="Flax"> ```py import jax import numpy as np from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxStableDiffusionPipeline pipeline, params = FlaxStableDiffusionPipeline.from_pretrained("path/to/saved_model", dtype=jax.numpy.bfloat16) prompt = "yoda naruto" prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # 分片输入和随机数 params = replicate(params) prng_seed = jax.random.split(prng_seed, jax.device_count()) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) image.save("yoda-naruto.png") ``` </hfoption> </hfoptions> ## 后续步骤 恭喜完成文生图模型训练!如需进一步使用模型,以下指南可能有所帮助: - 了解如何加载 [LoRA权重](../using-diffusers/loading_adapters#LoRA) 进行推理(如果训练时使用了LoRA) - 在 [文生图](../using-diffusers/conditional_image_generation) 任务指南中,了解引导尺度等参数或提示词加权等技术如何控制生成效果
diffusers/docs/source/zh/training/text2image.md/0
{ "file_path": "diffusers/docs/source/zh/training/text2image.md", "repo_id": "diffusers", "token_count": 5802 }
133
# LoRA finetuning example for CogVideoX Low-Rank Adaption of Large Language Models was first introduced by Microsoft in [LoRA: Low-Rank Adaptation of Large Language Models](https://huggingface.co/papers/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen*. In a nutshell, LoRA allows adapting pretrained models by adding pairs of rank-decomposition matrices to existing weights and **only** training those newly added weights. This has a couple of advantages: - Previous pretrained weights are kept frozen so that model is not prone to [catastrophic forgetting](https://www.pnas.org/doi/10.1073/pnas.1611835114). - Rank-decomposition matrices have significantly fewer parameters than original model, which means that trained LoRA weights are easily portable. - LoRA attention layers allow to control to which extent the model is adapted toward new training images via a `scale` parameter. At the moment, LoRA finetuning has only been tested for [CogVideoX-2b](https://huggingface.co/THUDM/CogVideoX-2b). > [!NOTE] > The scripts for CogVideoX come with limited support and may not be fully compatible with different training techniques. They are not feature-rich either and simply serve as minimal examples of finetuning to take inspiration from and improve. > > A repository containing memory-optimized finetuning scripts with support for multiple resolutions, dataset preparation, captioning, etc. is available [here](https://github.com/a-r-r-o-w/cogvideox-factory), which will be maintained jointly by the CogVideoX and Diffusers team. ## Data Preparation The training scripts accepts data in two formats. **First data format** Two files where one file contains line-separated prompts and another file contains line-separated paths to video data (the path to video files must be relative to the path you pass when specifying `--instance_data_root`). Let's take a look at an example to understand this better! Assume you've specified `--instance_data_root` as `/dataset`, and that this directory contains the files: `prompts.txt` and `videos.txt`. The `prompts.txt` file should contain line-separated prompts: ``` A black and white animated sequence featuring a rabbit, named Rabbity Ribfried, and an anthropomorphic goat in a musical, playful environment, showcasing their evolving interaction. A black and white animated sequence on a ship's deck features a bulldog character, named Bully Bulldoger, showcasing exaggerated facial expressions and body language. The character progresses from confident to focused, then to strained and distressed, displaying a range of emotions as it navigates challenges. The ship's interior remains static in the background, with minimalistic details such as a bell and open door. The character's dynamic movements and changing expressions drive the narrative, with no camera movement to distract from its evolving reactions and physical gestures. ... ``` The `videos.txt` file should contain line-separate paths to video files. Note that the path should be _relative_ to the `--instance_data_root` directory. ``` videos/00000.mp4 videos/00001.mp4 ... ``` Overall, this is how your dataset would look like if you ran the `tree` command on the dataset root directory: ``` /dataset ├── prompts.txt ├── videos.txt ├── videos ├── videos/00000.mp4 ├── videos/00001.mp4 ├── ... ``` When using this format, the `--caption_column` must be `prompts.txt` and `--video_column` must be `videos.txt`. **Second data format** You could use a single CSV file. For the sake of this example, assume you have a `metadata.csv` file. The expected format is: ``` <CAPTION_COLUMN>,<PATH_TO_VIDEO_COLUMN> """A black and white animated sequence featuring a rabbit, named Rabbity Ribfried, and an anthropomorphic goat in a musical, playful environment, showcasing their evolving interaction.""","""00000.mp4""" """A black and white animated sequence on a ship's deck features a bulldog character, named Bully Bulldoger, showcasing exaggerated facial expressions and body language. The character progresses from confident to focused, then to strained and distressed, displaying a range of emotions as it navigates challenges. The ship's interior remains static in the background, with minimalistic details such as a bell and open door. The character's dynamic movements and changing expressions drive the narrative, with no camera movement to distract from its evolving reactions and physical gestures.""","""00001.mp4""" ... ``` In this case, the `--instance_data_root` should be the location where the videos are stored and `--dataset_name` should be either a path to local folder or `load_dataset` compatible hosted HF Dataset Repository or URL. Assuming you have videos of your Minecraft gameplay at `https://huggingface.co/datasets/my-awesome-username/minecraft-videos`, you would have to specify `my-awesome-username/minecraft-videos`. When using this format, the `--caption_column` must be `<CAPTION_COLUMN>` and `--video_column` must be `<PATH_TO_VIDEO_COLUMN>`. You are not strictly restricted to the CSV format. As long as the `load_dataset` method supports the file format to load a basic `<PATH_TO_VIDEO_COLUMN>` and `<CAPTION_COLUMN>`, you should be good to go. The reason for going through these dataset organization gymnastics for loading video data is because we found `load_dataset` from the datasets library to not fully support all kinds of video formats. This will undoubtedly be improved in the future. >![NOTE] > CogVideoX works best with long and descriptive LLM-augmented prompts for video generation. We recommend pre-processing your videos by first generating a summary using a VLM and then augmenting the prompts with an LLM. To generate the above captions, we use [MiniCPM-V-26](https://huggingface.co/openbmb/MiniCPM-V-2_6) and [Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct). A very barebones and no-frills example for this is available [here](https://gist.github.com/a-r-r-o-w/4dee20250e82f4e44690a02351324a4a). The official recommendation for augmenting prompts is [ChatGLM](https://huggingface.co/THUDM?search_models=chatglm) and a length of 50-100 words is considered good. >![NOTE] > It is expected that your dataset is already pre-processed. If not, some basic pre-processing can be done by playing with the following parameters: > `--height`, `--width`, `--fps`, `--max_num_frames`, `--skip_frames_start` and `--skip_frames_end`. > Presently, all videos in your dataset should contain the same number of video frames when using a training batch size > 1. <!-- TODO: Implement frame packing in future to address above issue. --> ## Training You need to setup your development environment by installing the necessary requirements. The following packages are required: - Torch 2.0 or above based on the training features you are utilizing (might require latest or nightly versions for quantized/deepspeed training) - `pip install diffusers transformers accelerate peft huggingface_hub` for all things modeling and training related - `pip install datasets decord` for loading video training data - `pip install bitsandbytes` for using 8-bit Adam or AdamW optimizers for memory-optimized training - `pip install wandb` optionally for monitoring training logs - `pip install deepspeed` optionally for [DeepSpeed](https://github.com/microsoft/DeepSpeed) training - `pip install prodigyopt` optionally if you would like to use the Prodigy optimizer for training To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` And initialize an [🤗 Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Or for a default accelerate configuration without answering questions about your environment ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell (e.g., a notebook) ```python from accelerate.utils import write_basic_config write_basic_config() ``` When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups. Note also that we use PEFT library as backend for LoRA training, make sure to have `peft>=0.6.0` installed in your environment. If you would like to push your model to the HF Hub after training is completed with a neat model card, make sure you're logged in: ``` hf auth login # Alternatively, you could upload your model manually using: # hf upload my-cool-account-name/my-cool-lora-name /path/to/awesome/lora ``` Make sure your data is prepared as described in [Data Preparation](#data-preparation). When ready, you can begin training! Assuming you are training on 50 videos of a similar concept, we have found 1500-2000 steps to work well. The official recommendation, however, is 100 videos with a total of 4000 steps. Assuming you are training on a single GPU with a `--train_batch_size` of `1`: - 1500 steps on 50 videos would correspond to `30` training epochs - 4000 steps on 100 videos would correspond to `40` training epochs The following bash script launches training for text-to-video lora. ```bash #!/bin/bash GPU_IDS="0" accelerate launch --gpu_ids $GPU_IDS examples/cogvideo/train_cogvideox_lora.py \ --pretrained_model_name_or_path THUDM/CogVideoX-2b \ --cache_dir <CACHE_DIR> \ --instance_data_root <PATH_TO_WHERE_VIDEO_FILES_ARE_STORED> \ --dataset_name my-awesome-name/my-awesome-dataset \ --caption_column <CAPTION_COLUMN> \ --video_column <PATH_TO_VIDEO_COLUMN> \ --id_token <ID_TOKEN> \ --validation_prompt "<ID_TOKEN> Spiderman swinging over buildings:::A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical atmosphere of this unique musical performance" \ --validation_prompt_separator ::: \ --num_validation_videos 1 \ --validation_epochs 10 \ --seed 42 \ --rank 64 \ --lora_alpha 64 \ --mixed_precision fp16 \ --output_dir /raid/aryan/cogvideox-lora \ --height 480 --width 720 --fps 8 --max_num_frames 49 --skip_frames_start 0 --skip_frames_end 0 \ --train_batch_size 1 \ --num_train_epochs 30 \ --checkpointing_steps 1000 \ --gradient_accumulation_steps 1 \ --learning_rate 1e-3 \ --lr_scheduler cosine_with_restarts \ --lr_warmup_steps 200 \ --lr_num_cycles 1 \ --enable_slicing \ --enable_tiling \ --optimizer Adam \ --adam_beta1 0.9 \ --adam_beta2 0.95 \ --max_grad_norm 1.0 \ --report_to wandb ``` For launching image-to-video finetuning instead, run the `train_cogvideox_image_to_video_lora.py` file instead. Additionally, you will have to pass `--validation_images` as paths to initial images corresponding to `--validation_prompts` for I2V validation to work. To better track our training experiments, we're using the following flags in the command above: * `--report_to wandb` will ensure the training runs are tracked on Weights and Biases. To use it, be sure to install `wandb` with `pip install wandb`. * `validation_prompt` and `validation_epochs` to allow the script to do a few validation inference runs. This allows us to qualitatively check if the training is progressing as expected. Note that setting the `<ID_TOKEN>` is not necessary. From some limited experimentation, we found it to work better (as it resembles [Dreambooth](https://huggingface.co/docs/diffusers/en/training/dreambooth) like training) than without. When provided, the ID_TOKEN is appended to the beginning of each prompt. So, if your ID_TOKEN was `"DISNEY"` and your prompt was `"Spiderman swinging over buildings"`, the effective prompt used in training would be `"DISNEY Spiderman swinging over buildings"`. When not provided, you would either be training without any such additional token or could augment your dataset to apply the token where you wish before starting the training. > [!TIP] > You can pass `--use_8bit_adam` to reduce the memory requirements of training. > You can pass `--video_reshape_mode` video cropping functionality, supporting options: ['center', 'random', 'none']. See [this](https://gist.github.com/glide-the/7658dbfd5f555be0a1a687a4139dba40) notebook for examples. > [!IMPORTANT] > The following settings have been tested at the time of adding CogVideoX LoRA training support: > - Our testing was primarily done on CogVideoX-2b. We will work on CogVideoX-5b and CogVideoX-5b-I2V soon > - One dataset comprised of 70 training videos of resolutions `200 x 480 x 720` (F x H x W). From this, by using frame skipping in data preprocessing, we created two smaller 49-frame and 16-frame datasets for faster experimentation and because the maximum limit recommended by the CogVideoX team is 49 frames. Out of the 70 videos, we created three groups of 10, 25 and 50 videos. All videos were similar in nature of the concept being trained. > - 25+ videos worked best for training new concepts and styles. > - We found that it is better to train with an identifier token that can be specified as `--id_token`. This is similar to Dreambooth-like training but normal finetuning without such a token works too. > - Trained concept seemed to work decently well when combined with completely unrelated prompts. We expect even better results if CogVideoX-5B is finetuned. > - The original repository uses a `lora_alpha` of `1`. We found this not suitable in many runs, possibly due to difference in modeling backends and training settings. Our recommendation is to set to the `lora_alpha` to either `rank` or `rank // 2`. > - If you're training on data whose captions generate bad results with the original model, a `rank` of 64 and above is good and also the recommendation by the team behind CogVideoX. If the generations are already moderately good on your training captions, a `rank` of 16/32 should work. We found that setting the rank too low, say `4`, is not ideal and doesn't produce promising results. > - The authors of CogVideoX recommend 4000 training steps and 100 training videos overall to achieve the best result. While that might yield the best results, we found from our limited experimentation that 2000 steps and 25 videos could also be sufficient. > - When using the Prodigy optimizer for training, one can follow the recommendations from [this](https://huggingface.co/blog/sdxl_lora_advanced_script) blog. Prodigy tends to overfit quickly. From my very limited testing, I found a learning rate of `0.5` to be suitable in addition to `--prodigy_use_bias_correction`, `prodigy_safeguard_warmup` and `--prodigy_decouple`. > - The recommended learning rate by the CogVideoX authors and from our experimentation with Adam/AdamW is between `1e-3` and `1e-4` for a dataset of 25+ videos. > > Note that our testing is not exhaustive due to limited time for exploration. Our recommendation would be to play around with the different knobs and dials to find the best settings for your data. ## Inference Once you have trained a lora model, the inference can be done simply loading the lora weights into the `CogVideoXPipeline`. ```python import torch from diffusers import CogVideoXPipeline from diffusers.utils import export_to_video pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-2b", torch_dtype=torch.float16) # pipe.load_lora_weights("/path/to/lora/weights", adapter_name="cogvideox-lora") # Or, pipe.load_lora_weights("my-awesome-hf-username/my-awesome-lora-name", adapter_name="cogvideox-lora") # If loading from the HF Hub pipe.to("cuda") # Assuming lora_alpha=32 and rank=64 for training. If different, set accordingly pipe.set_adapters(["cogvideox-lora"], [32 / 64]) prompt = ( "A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. The " "panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other " "pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, " "casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. " "The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical " "atmosphere of this unique musical performance" ) frames = pipe(prompt, guidance_scale=6, use_dynamic_cfg=True).frames[0] export_to_video(frames, "output.mp4", fps=8) ``` If you've trained a LoRA for `CogVideoXImageToVideoPipeline` instead, everything in the above example remains the same except you must also pass an image as initial condition for generation.
diffusers/examples/cogvideo/README.md/0
{ "file_path": "diffusers/examples/cogvideo/README.md", "repo_id": "diffusers", "token_count": 4728 }
134
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Callable, List, Optional, Union import torch from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.pipelines.pipeline_utils import StableDiffusionMixin from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name class ComposableStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin): r""" Pipeline for text-to-image generation using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ _optional_components = ["safety_checker", "feature_extractor"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if scheduler is not None and getattr(scheduler.config, "clip_sample", False) is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) is_unet_version_less_0_9_0 = ( unet is not None and hasattr(unet.config, "_diffusers_version") and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse("0.9.0.dev0") ) is_unet_sample_size_less_64 = ( unet is not None and hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 ) if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list(int)`): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). """ batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def run_safety_checker(self, image, device, dtype): if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) else: has_nsfw_concept = None return image, has_nsfw_concept def decode_latents(self, latents): latents = 1 / 0.18215 * latents image = self.vae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if latents is None: if device.type == "mps": # randn does not work reproducibly on mps latents = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device) else: latents = torch.randn(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, weights: Optional[str] = "", ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # 2. Define call parameters batch_size = 1 if isinstance(prompt, str) else len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 if "|" in prompt: prompt = [x.strip() for x in prompt.split("|")] print(f"composing {prompt}...") if not weights: # specify weights for prompts (excluding the unconditional score) print("using equal positive weights (conjunction) for all prompts...") weights = torch.tensor([guidance_scale] * len(prompt), device=self.device).reshape(-1, 1, 1, 1) else: # set prompt weight for each num_prompts = len(prompt) if isinstance(prompt, list) else 1 weights = [float(w.strip()) for w in weights.split("|")] # guidance scale as the default if len(weights) < num_prompts: weights.append(guidance_scale) else: weights = weights[:num_prompts] assert len(weights) == len(prompt), "weights specified are not equal to the number of prompts" weights = torch.tensor(weights, device=self.device).reshape(-1, 1, 1, 1) else: weights = guidance_scale # 3. Encode input prompt text_embeddings = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, text_embeddings.dtype, device, generator, latents, ) # composable diffusion if isinstance(prompt, list) and batch_size == 1: # remove extra unconditional embedding # N = one unconditional embed + conditional embeds text_embeddings = text_embeddings[len(prompt) - 1 :] # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = [] for j in range(text_embeddings.shape[0]): noise_pred.append( self.unet(latent_model_input[:1], t, encoder_hidden_states=text_embeddings[j : j + 1]).sample ) noise_pred = torch.cat(noise_pred, dim=0) # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred[:1], noise_pred[1:] noise_pred = noise_pred_uncond + (weights * (noise_pred_text - noise_pred_uncond)).sum( dim=0, keepdims=True ) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) # 8. Post-processing image = self.decode_latents(latents) # 9. Run safety checker image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype) # 10. Convert to PIL if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/examples/community/composable_stable_diffusion.py/0
{ "file_path": "diffusers/examples/community/composable_stable_diffusion.py", "repo_id": "diffusers", "token_count": 11961 }
135
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch from transformers import ( CLIPImageProcessor, CLIPVisionModelWithProjection, ) from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import ( FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, ) from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel from diffusers.models.attention_processor import ( AttnProcessor2_0, LoRAAttnProcessor2_0, LoRAXFormersAttnProcessor, XFormersAttnProcessor, ) from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( deprecate, is_invisible_watermark_available, is_torch_xla_available, logging, replace_example_docstring, ) from diffusers.utils.torch_utils import randn_tensor if is_invisible_watermark_available(): from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import KolorsInpaintPipeline >>> from diffusers.utils import load_image >>> pipe = KolorsInpaintPipeline.from_pretrained( ... "Kwai-Kolors/Kolors-diffusers", ... torch_dtype=torch.float16, ... variant="fp16" ... use_safetensors=True ... ) >>> pipe.enable_model_cpu_offload() >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" >>> init_image = load_image(img_url).convert("RGB") >>> mask_image = load_image(mask_url).convert("RGB") >>> prompt = "A majestic tiger sitting on a bench" >>> image = pipe( ... prompt=prompt, image=init_image, mask_image=mask_image, num_inference_steps=50, strength=0.80 ... ).images[0] ``` """ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). See Section 3.4 """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg def mask_pil_to_torch(mask, height, width): # preprocess mask if isinstance(mask, (PIL.Image.Image, np.ndarray)): mask = [mask] if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask] mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0) mask = mask.astype(np.float32) / 255.0 elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): mask = np.concatenate([m[None, None, :] for m in mask], axis=0) mask = torch.from_numpy(mask) return mask def prepare_mask_and_masked_image(image, mask, height, width, return_image: bool = False): """ Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the ``image`` and ``1`` for the ``mask``. The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be binarized (``mask > 0.5``) and cast to ``torch.float32`` too. Args: image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint. It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width`` ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``. mask (_type_): The mask to apply to the image, i.e. regions to inpaint. It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. Raises: ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions. TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not (ot the other way around). Returns: tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4 dimensions: ``batch x channels x height x width``. """ # checkpoint. TOD(Yiyi) - need to clean this up later deprecation_message = "The prepare_mask_and_masked_image method is deprecated and will be removed in a future version. Please use VaeImageProcessor.preprocess instead" deprecate( "prepare_mask_and_masked_image", "0.30.0", deprecation_message, ) if image is None: raise ValueError("`image` input cannot be undefined.") if mask is None: raise ValueError("`mask_image` input cannot be undefined.") if isinstance(image, torch.Tensor): if not isinstance(mask, torch.Tensor): mask = mask_pil_to_torch(mask, height, width) if image.ndim == 3: image = image.unsqueeze(0) # Batch and add channel dim for single mask if mask.ndim == 2: mask = mask.unsqueeze(0).unsqueeze(0) # Batch single mask or add channel dim if mask.ndim == 3: # Single batched mask, no channel dim or single mask not batched but channel dim if mask.shape[0] == 1: mask = mask.unsqueeze(0) # Batched masks no channel dim else: mask = mask.unsqueeze(1) assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions" # assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions" assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size" # Check image is in [-1, 1] # if image.min() < -1 or image.max() > 1: # raise ValueError("Image should be in [-1, 1] range") # Check mask is in [0, 1] if mask.min() < 0 or mask.max() > 1: raise ValueError("Mask should be in [0, 1] range") # Binarize mask mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 # Image as float32 image = image.to(dtype=torch.float32) elif isinstance(mask, torch.Tensor): raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not") else: # preprocess image if isinstance(image, (PIL.Image.Image, np.ndarray)): image = [image] if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): # resize all images w.r.t passed height an width image = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image] image = [np.array(i.convert("RGB"))[None, :] for i in image] image = np.concatenate(image, axis=0) elif isinstance(image, list) and isinstance(image[0], np.ndarray): image = np.concatenate([i[None, :] for i in image], axis=0) image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 mask = mask_pil_to_torch(mask, height, width) mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 if image.shape[1] == 4: # images are in latent space and thus can't # be masked set masked_image to None # we assume that the checkpoint is not an inpainting # checkpoint. TOD(Yiyi) - need to clean this up later masked_image = None else: masked_image = image * (mask < 0.5) # n.b. ensure backwards compatibility as old function does not return image if return_image: return mask, masked_image, image return mask, masked_image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): """ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class KolorsInpaintPipeline( DiffusionPipeline, StableDiffusionMixin, StableDiffusionXLLoraLoaderMixin, FromSingleFileMixin, IPAdapterMixin, ): r""" Pipeline for text-to-image generation using Kolors. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) The pipeline also inherits the following loading methods: - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.safetensors` files - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`ChatGLMModel`]): Frozen text-encoder. Kolors uses [ChatGLM3-6B](https://huggingface.co/THUDM/chatglm3-6b). tokenizer (`ChatGLMTokenizer`): Tokenizer of class [ChatGLMTokenizer](https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`): Whether the `unet` requires a aesthetic_score condition to be passed during inference. force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of `Kwai-Kolors/Kolors-diffusers`. add_watermarker (`bool`, *optional*): Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to watermark output images. If not defined, it will default to True if the package is installed, otherwise no watermarker will be used. """ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" _optional_components = [ "tokenizer", "text_encoder", "image_encoder", "feature_extractor", ] _callback_tensor_inputs = [ "latents", "prompt_embeds", "negative_prompt_embeds", "add_text_embeds", "add_time_ids", "negative_pooled_prompt_embeds", "add_neg_time_ids", "mask", "masked_image_latents", ] def __init__( self, vae: AutoencoderKL, text_encoder: ChatGLMModel, tokenizer: ChatGLMTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, image_encoder: CLIPVisionModelWithProjection = None, feature_extractor: CLIPImageProcessor = None, requires_aesthetics_score: bool = False, force_zeros_for_empty_prompt: bool = True, add_watermarker: Optional[bool] = None, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, image_encoder=image_encoder, feature_extractor=feature_extractor, scheduler=scheduler, ) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.mask_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True ) add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder( torch.zeros_like(image), output_hidden_states=True ).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( num_images_per_prompt, dim=0 ) return image_enc_hidden_states, uncond_image_enc_hidden_states else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds def prepare_ip_adapter_image_embeds( self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance ): if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError( f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." ) image_embeds = [] for single_ip_adapter_image, image_proj_layer in zip( ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers ): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) single_image_embeds, single_negative_image_embeds = self.encode_image( single_ip_adapter_image, device, 1, output_hidden_state ) single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) single_negative_image_embeds = torch.stack( [single_negative_image_embeds] * num_images_per_prompt, dim=0 ) if do_classifier_free_guidance: single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) single_image_embeds = single_image_embeds.to(device) image_embeds.append(single_image_embeds) else: repeat_dims = [1] image_embeds = [] for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) single_image_embeds = single_image_embeds.repeat( num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) ) single_negative_image_embeds = single_negative_image_embeds.repeat( num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) ) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) else: single_image_embeds = single_image_embeds.repeat( num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) ) image_embeds.append(single_image_embeds) return image_embeds def encode_prompt( self, prompt, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Define tokenizers and text encoders tokenizers = [self.tokenizer] text_encoders = [self.text_encoder] if prompt_embeds is None: # textual inversion: procecss multi-vector tokens if necessary prompt_embeds_list = [] for tokenizer, text_encoder in zip(tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer( prompt, padding="max_length", max_length=256, truncation=True, return_tensors="pt", ).to(self._execution_device) output = text_encoder( input_ids=text_inputs["input_ids"], attention_mask=text_inputs["attention_mask"], position_ids=text_inputs["position_ids"], output_hidden_states=True, ) prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone() pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() # [batch_size, 4096] bs_embed, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) prompt_embeds_list.append(prompt_embeds) # prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) prompt_embeds = prompt_embeds_list[0] # get unconditional embeddings for classifier free guidance zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: # negative_prompt = negative_prompt or "" uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt negative_prompt_embeds_list = [] for tokenizer, text_encoder in zip(tokenizers, text_encoders): # textual inversion: procecss multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ).to(self._execution_device) output = text_encoder( input_ids=uncond_input["input_ids"], attention_mask=uncond_input["attention_mask"], position_ids=uncond_input["position_ids"], output_hidden_states=True, ) negative_prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone() negative_pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() # [batch_size, 4096] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=text_encoder.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view( batch_size * num_images_per_prompt, seq_len, -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes negative_prompt_embeds_list.append(negative_prompt_embeds) # negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) negative_prompt_embeds = negative_prompt_embeds_list[0] bs_embed = pooled_prompt_embeds.shape[0] pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, image, mask_image, height, width, strength, callback_steps, output_type, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None, padding_mask_crop=None, ): if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if padding_mask_crop is not None: if not isinstance(image, PIL.Image.Image): raise ValueError( f"The image should be a PIL image when inpainting mask crop, but is of type {type(image)}." ) if not isinstance(mask_image, PIL.Image.Image): raise ValueError( f"The mask image should be a PIL image when inpainting mask crop, but is of type" f" {type(mask_image)}." ) if output_type != "pil": raise ValueError(f"The output type should be PIL when inpainting mask crop, but is {output_type}.") if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError( "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." ) if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError( f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" ) elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError( f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" ) def prepare_latents( self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, image=None, timestep=None, is_strength_max=True, add_noise=True, return_noise=False, return_image_latents=False, ): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if (image is None or timestep is None) and not is_strength_max: raise ValueError( "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise." "However, either the image or the noise timestep has not been provided." ) if image.shape[1] == 4: image_latents = image.to(device=device, dtype=dtype) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) elif return_image_latents or (latents is None and not is_strength_max): image = image.to(device=device, dtype=dtype) image_latents = self._encode_vae_image(image=image, generator=generator) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) if latents is None and add_noise: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # if strength is 1. then initialise the latents to noise, else initial to image + noise latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep) # if pure noise then scale the initial latents by the Scheduler's init sigma latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents elif add_noise: noise = latents.to(device) latents = noise * self.scheduler.init_noise_sigma else: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = image_latents.to(device) outputs = (latents,) if return_noise: outputs += (noise,) if return_image_latents: outputs += (image_latents,) return outputs def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): dtype = image.dtype if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list): image_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(image.shape[0]) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) image_latents = image_latents.to(dtype) image_latents = self.vae.config.scaling_factor * image_latents return image_latents def prepare_mask_latents( self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance ): # resize the mask to latents shape as we concatenate the mask to the latents # we do that before converting to dtype to avoid breaking in case we're using cpu_offload # and half precision mask = torch.nn.functional.interpolate( mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) ) mask = mask.to(device=device, dtype=dtype) # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method if mask.shape[0] < batch_size: if not batch_size % mask.shape[0] == 0: raise ValueError( "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" " of masks that you pass is divisible by the total requested batch size." ) mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask if masked_image is not None and masked_image.shape[1] == 4: masked_image_latents = masked_image else: masked_image_latents = None if masked_image is not None: if masked_image_latents is None: masked_image = masked_image.to(device=device, dtype=dtype) masked_image_latents = self._encode_vae_image(masked_image, generator=generator) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError( "The passed images and the required batch size don't match. Images are supposed to be duplicated" f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." " Make sure the number of images that you pass is divisible by the total requested batch size." ) masked_image_latents = masked_image_latents.repeat( batch_size // masked_image_latents.shape[0], 1, 1, 1 ) masked_image_latents = ( torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents ) # aligning device to prevent device errors when concating it with the latent model input masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) return mask, masked_image_latents # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): # get the original timestep using init_timestep if denoising_start is None: init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) else: t_start = 0 timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] # Strength is irrelevant if we directly request a timestep to start at; # that is, strength is determined by the denoising_start instead. if denoising_start is not None: discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (denoising_start * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item() if self.scheduler.order == 2 and num_inference_steps % 2 == 0: # if the scheduler is a 2nd order scheduler we might have to do +1 # because `num_inference_steps` might be even given that every timestep # (except the highest one) is duplicated. If `num_inference_steps` is even it would # mean that we cut the timesteps in the middle of the denoising step # (between 1st and 2nd derivative) which leads to incorrect results. By adding 1 # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler num_inference_steps = num_inference_steps + 1 # because t_n+1 >= t_n, we slice the timesteps starting from the end timesteps = timesteps[-num_inference_steps:] return timesteps, num_inference_steps return timesteps, num_inference_steps - t_start # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids def _get_add_time_ids( self, original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype, text_encoder_projection_dim=None, ): if self.config.requires_aesthetics_score: add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) add_neg_time_ids = list( negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,) ) else: add_time_ids = list(original_size + crops_coords_top_left + target_size) add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size) passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + 4096 expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if ( expected_add_embed_dim > passed_add_embed_dim and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim ): raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model." ) elif ( expected_add_embed_dim < passed_add_embed_dim and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim ): raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model." ) elif expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) return add_time_ids, add_neg_time_ids # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance( self.vae.decoder.mid_block.attentions[0].processor, ( AttnProcessor2_0, XFormersAttnProcessor, LoRAXFormersAttnProcessor, LoRAAttnProcessor2_0, ), ) # if xformers or torch_2_0 is used attention block does not need # to be in float32 which can save lots of memory if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding def get_guidance_scale_embedding( self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 ) -> torch.Tensor: """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: w (`torch.Tensor`): Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. embedding_dim (`int`, *optional*, defaults to 512): Dimension of the embeddings to generate. dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): Data type of the generated embeddings. Returns: `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_end(self): return self._denoising_end @property def denoising_start(self): return self._denoising_start @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, image: PipelineImageInput = None, mask_image: PipelineImageInput = None, masked_image_latents: torch.Tensor = None, height: Optional[int] = None, width: Optional[int] = None, padding_mask_crop: Optional[int] = None, strength: float = 0.9999, num_inference_steps: int = 50, timesteps: List[int] = None, sigmas: List[float] = None, denoising_start: Optional[float] = None, denoising_end: Optional[float] = None, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, original_size: Tuple[int, int] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Tuple[int, int] = None, negative_original_size: Optional[Tuple[int, int]] = None, negative_crops_coords_top_left: Tuple[int, int] = (0, 0), negative_target_size: Optional[Tuple[int, int]] = None, aesthetic_score: float = 6.0, negative_aesthetic_score: float = 2.5, callback_on_step_end: Optional[ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] ] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. image (`PIL.Image.Image`): `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will be masked out with `mask_image` and repainted according to `prompt`. mask_image (`PIL.Image.Image`): `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. This is set to 1024 by default for the best results. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. padding_mask_crop (`int`, *optional*, defaults to `None`): The size of margin in the crop to be applied to the image and masking. If `None`, no crop is applied to image and mask_image. If `padding_mask_crop` is not `None`, it will first find a rectangular region with the same aspect ration of the image and contains all masked area, and then expand that area based on `padding_mask_crop`. The image and mask_image will then be cropped based on the expanded area before resizing to the original image size for inpainting. This is useful when the masked area is small while the image is large and contain information irrelevant for inpainting, such as background. strength (`float`, *optional*, defaults to 0.9999): Conceptually, indicates how much to transform the masked portion of the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores the masked portion of the reference `image`. Note that in the case of `denoising_start` being declared as an integer, the value of `strength` will be ignored. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. sigmas (`List[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. denoising_start (`float`, *optional*): When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and it is assumed that the passed `image` is a partly denoised image. Note that when this is specified, strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refining the Image Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output). denoising_end (`float`, *optional*): When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be completed before it is intentionally prematurely terminated. As a result, the returned sample will still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output). guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not provided, embeddings are computed from the `ip_adapter_image` input argument. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a specific image resolution. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a target image resolution. It should be as same as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. aesthetic_score (`float`, *optional*, defaults to 6.0): Used to simulate an aesthetic score of the generated image by influencing the positive text condition. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). negative_aesthetic_score (`float`, *optional*, defaults to 2.5): Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to simulate an aesthetic score of the generated image by influencing the negative text condition. callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of each denoising step during the inference. with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a `tuple. `tuple. When returning a tuple, the first element is a list with the generated images. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs self.check_inputs( prompt, image, mask_image, height, width, strength, callback_steps, output_type, negative_prompt, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs, padding_mask_crop, ) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end self._denoising_start = denoising_start self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # 3. Encode input prompt text_encoder_lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale, ) # 4. set timesteps def denoising_value_valid(dnv): return isinstance(dnv, float) and 0 < dnv < 1 timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, timesteps, sigmas ) timesteps, num_inference_steps = self.get_timesteps( num_inference_steps, strength, device, denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None, ) # check that number of inference steps is not < 1 - as this doesn't make sense if num_inference_steps < 1: raise ValueError( f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." ) # at which timestep to set the initial noise (n.b. 50% if strength is 0.5) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise is_strength_max = strength == 1.0 # 5. Preprocess mask and image if padding_mask_crop is not None: crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop) resize_mode = "fill" else: crops_coords = None resize_mode = "default" original_image = image init_image = self.image_processor.preprocess( image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode ) init_image = init_image.to(dtype=torch.float32) mask = self.mask_processor.preprocess( mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords ) if masked_image_latents is not None: masked_image = masked_image_latents elif init_image.shape[1] == 4: # if images are in latent space, we can't mask it masked_image = None else: masked_image = init_image * (mask < 0.5) # 6. Prepare latent variables num_channels_latents = self.vae.config.latent_channels num_channels_unet = self.unet.config.in_channels return_image_latents = num_channels_unet == 4 add_noise = True if self.denoising_start is None else False latents_outputs = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, image=init_image, timestep=latent_timestep, is_strength_max=is_strength_max, add_noise=add_noise, return_noise=True, return_image_latents=return_image_latents, ) if return_image_latents: latents, noise, image_latents = latents_outputs else: latents, noise = latents_outputs # 7. Prepare mask latent variables mask, masked_image_latents = self.prepare_mask_latents( mask, masked_image, batch_size * num_images_per_prompt, height, width, prompt_embeds.dtype, device, generator, self.do_classifier_free_guidance, ) # 8. Check that sizes of mask, masked image and latents match if num_channels_unet == 9: # default case for runwayml/stable-diffusion-inpainting num_channels_mask = mask.shape[1] num_channels_masked_image = masked_image_latents.shape[1] if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: raise ValueError( f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" f" = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of" " `pipeline.unet` or your `mask_image` or `image` input." ) elif num_channels_unet != 4: raise ValueError( f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}." ) # 8.1 Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline height, width = latents.shape[-2:] height = height * self.vae_scale_factor width = width * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) # 10. Prepare added time ids & embeddings if negative_original_size is None: negative_original_size = original_size if negative_target_size is None: negative_target_size = target_size add_text_embeds = pooled_prompt_embeds text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) add_time_ids, add_neg_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds( ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance, ) # 11. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) if ( self.denoising_end is not None and self.denoising_start is not None and denoising_value_valid(self.denoising_end) and denoising_value_valid(self.denoising_start) and self.denoising_start >= self.denoising_end ): raise ValueError( f"`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: " + f" {self.denoising_end} when using type float." ) elif self.denoising_end is not None and denoising_value_valid(self.denoising_end): discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (self.denoising_end * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] # 11.1 Optionally get Guidance Scale Embedding timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding( guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim ).to(device=device, dtype=latents.dtype) self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents # concat latents, mask, masked_image_latents in the channel dimension latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) if num_channels_unet == 9: latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) # predict the noise residual added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} if ip_adapter_image is not None or ip_adapter_image_embeds is not None: added_cond_kwargs["image_embeds"] = image_embeds noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: # Based on 3.4. in https://huggingface.co/papers/2305.08891 noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) # compute the previous noisy sample x_t -> x_t-1 latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 latents = latents.to(latents_dtype) if num_channels_unet == 4: init_latents_proper = image_latents if self.do_classifier_free_guidance: init_mask, _ = mask.chunk(2) else: init_mask = mask if i < len(timesteps) - 1: noise_timestep = timesteps[i + 1] init_latents_proper = self.scheduler.add_noise( init_latents_proper, noise, torch.tensor([noise_timestep]) ) latents = (1 - init_mask) * init_latents_proper + init_mask * latents if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop( "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds ) add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) add_neg_time_ids = callback_outputs.pop("add_neg_time_ids", add_neg_time_ids) mask = callback_outputs.pop("mask", mask) masked_image_latents = callback_outputs.pop("masked_image_latents", masked_image_latents) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) elif latents.dtype != self.vae.dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 self.vae = self.vae.to(latents.dtype) # unscale/denormalize the latents # denormalize with the mean and std if available and not None has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None if has_latents_mean and has_latents_std: latents_mean = ( torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) ) latents_std = ( torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) ) latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean else: latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) else: return StableDiffusionXLPipelineOutput(images=latents) # apply watermark if available if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) if padding_mask_crop is not None: image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image] # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image)
diffusers/examples/community/pipeline_kolors_inpainting.py/0
{ "file_path": "diffusers/examples/community/pipeline_kolors_inpainting.py", "repo_id": "diffusers", "token_count": 39503 }
136
# Copyright Philip Brown, ppbrown@github # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ########################################################################### # This pipeline attempts to use a model that has SDXL vae, T5 text encoder, # and SDXL unet. # At the present time, there are no pretrained models that give pleasing # output. So as yet, (2025/06/10) this pipeline is somewhat of a tech # demo proving that the pieces can at least be put together. # Hopefully, it will encourage someone with the hardware available to # throw enough resources into training one up. from typing import Optional import torch.nn as nn from transformers import ( CLIPImageProcessor, CLIPTokenizer, CLIPVisionModelWithProjection, T5EncoderModel, ) from diffusers import DiffusionPipeline, StableDiffusionXLPipeline from diffusers.image_processor import VaeImageProcessor from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.schedulers import KarrasDiffusionSchedulers # Note: At this time, the intent is to use the T5 encoder mentioned # below, with zero changes. # Therefore, the model deliberately does not store the T5 encoder model bytes, # (Since they are not unique!) # but instead takes advantage of huggingface hub cache loading T5_NAME = "mcmonkey/google_t5-v1_1-xxl_encoderonly" # Caller is expected to load this, or equivalent, as model name for now # eg: pipe = StableDiffusionXL_T5Pipeline(SDXL_NAME) SDXL_NAME = "stabilityai/stable-diffusion-xl-base-1.0" class LinearWithDtype(nn.Linear): @property def dtype(self): return self.weight.dtype class StableDiffusionXL_T5Pipeline(StableDiffusionXLPipeline): _expected_modules = [ "vae", "unet", "scheduler", "tokenizer", "image_encoder", "feature_extractor", "t5_encoder", "t5_projection", "t5_pooled_projection", ] _optional_components = [ "image_encoder", "feature_extractor", "t5_encoder", "t5_projection", "t5_pooled_projection", ] def __init__( self, vae: AutoencoderKL, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, tokenizer: CLIPTokenizer, t5_encoder=None, t5_projection=None, t5_pooled_projection=None, image_encoder: CLIPVisionModelWithProjection = None, feature_extractor: CLIPImageProcessor = None, force_zeros_for_empty_prompt: bool = True, add_watermarker: Optional[bool] = None, ): DiffusionPipeline.__init__(self) if t5_encoder is None: self.t5_encoder = T5EncoderModel.from_pretrained(T5_NAME, torch_dtype=unet.dtype) else: self.t5_encoder = t5_encoder # ----- build T5 4096 => 2048 dim projection ----- if t5_projection is None: self.t5_projection = LinearWithDtype(4096, 2048) # trainable else: self.t5_projection = t5_projection self.t5_projection.to(dtype=unet.dtype) # ----- build T5 4096 => 1280 dim projection ----- if t5_pooled_projection is None: self.t5_pooled_projection = LinearWithDtype(4096, 1280) # trainable else: self.t5_pooled_projection = t5_pooled_projection self.t5_pooled_projection.to(dtype=unet.dtype) print("dtype of Linear is ", self.t5_projection.dtype) self.register_modules( vae=vae, unet=unet, scheduler=scheduler, tokenizer=tokenizer, t5_encoder=self.t5_encoder, t5_projection=self.t5_projection, t5_pooled_projection=self.t5_pooled_projection, image_encoder=image_encoder, feature_extractor=feature_extractor, ) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.default_sample_size = ( self.unet.config.sample_size if hasattr(self, "unet") and self.unet is not None and hasattr(self.unet.config, "sample_size") else 128 ) self.watermark = None # Parts of original SDXL class complain if these attributes are not # at least PRESENT self.text_encoder = self.text_encoder_2 = None # ------------------------------------------------------------------ # Encode a text prompt (T5-XXL + 4096→2048 projection) # Returns exactly four tensors in the order SDXL’s __call__ expects. # ------------------------------------------------------------------ def encode_prompt( self, prompt, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: str | None = None, **_, ): """ Returns ------- prompt_embeds : Tensor [B, T, 2048] negative_prompt_embeds : Tensor [B, T, 2048] | None pooled_prompt_embeds : Tensor [B, 1280] negative_pooled_prompt_embeds: Tensor [B, 1280] | None where B = batch * num_images_per_prompt """ # --- helper to tokenize on the pipeline’s device ---------------- def _tok(text: str): tok_out = self.tokenizer( text, return_tensors="pt", padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, ).to(self.device) return tok_out.input_ids, tok_out.attention_mask # ---------- positive stream ------------------------------------- ids, mask = _tok(prompt) h_pos = self.t5_encoder(ids, attention_mask=mask).last_hidden_state # [b, T, 4096] tok_pos = self.t5_projection(h_pos) # [b, T, 2048] pool_pos = self.t5_pooled_projection(h_pos.mean(dim=1)) # [b, 1280] # expand for multiple images per prompt tok_pos = tok_pos.repeat_interleave(num_images_per_prompt, 0) pool_pos = pool_pos.repeat_interleave(num_images_per_prompt, 0) # ---------- negative / CFG stream -------------------------------- if do_classifier_free_guidance: neg_text = "" if negative_prompt is None else negative_prompt ids_n, mask_n = _tok(neg_text) h_neg = self.t5_encoder(ids_n, attention_mask=mask_n).last_hidden_state tok_neg = self.t5_projection(h_neg) pool_neg = self.t5_pooled_projection(h_neg.mean(dim=1)) tok_neg = tok_neg.repeat_interleave(num_images_per_prompt, 0) pool_neg = pool_neg.repeat_interleave(num_images_per_prompt, 0) else: tok_neg = pool_neg = None # ----------------- final ordered return -------------------------- # 1) positive token embeddings # 2) negative token embeddings (or None) # 3) positive pooled embeddings # 4) negative pooled embeddings (or None) return tok_pos, tok_neg, pool_pos, pool_neg
diffusers/examples/community/pipeline_stable_diffusion_xl_t5.py/0
{ "file_path": "diffusers/examples/community/pipeline_stable_diffusion_xl_t5.py", "repo_id": "diffusers", "token_count": 3297 }
137
import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel, ) from diffusers.pipelines.pipeline_utils import StableDiffusionMixin from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name class SpeechToImagePipeline(DiffusionPipeline, StableDiffusionMixin): def __init__( self, speech_model: WhisperForConditionalGeneration, speech_processor: WhisperProcessor, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, ): super().__init__() if safety_checker is None: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( speech_model=speech_model, speech_processor=speech_processor, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, feature_extractor=feature_extractor, ) @torch.no_grad() def __call__( self, audio, sampling_rate=16_000, height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, **kwargs, ): inputs = self.speech_processor.feature_extractor( audio, return_tensors="pt", sampling_rate=sampling_rate ).input_features.to(self.device) predicted_ids = self.speech_model.generate(inputs, max_length=480_000) prompt = self.speech_processor.tokenizer.batch_decode(predicted_ids, skip_special_tokens=True, normalize=True)[ 0 ] if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ) text_input_ids = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) latents_dtype = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to( self.device ) else: latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") latents = latents.to(self.device) # set timesteps self.scheduler.set_timesteps(num_inference_steps) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand timesteps_tensor = self.scheduler.timesteps.to(self.device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta for i, t in enumerate(self.progress_bar(timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) latents = 1 / 0.18215 * latents image = self.vae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return image return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
diffusers/examples/community/speech_to_image_diffusion.py/0
{ "file_path": "diffusers/examples/community/speech_to_image_diffusion.py", "repo_id": "diffusers", "token_count": 5031 }
138
from typing import Callable, List, Optional, Union import PIL.Image import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.pipelines.pipeline_utils import StableDiffusionMixin from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name class TextInpainting(DiffusionPipeline, StableDiffusionMixin): r""" Pipeline for text based inpainting using Stable Diffusion. Uses CLIPSeg to get a mask from the given text, then calls the Inpainting pipeline with the generated mask This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: segmentation_model ([`CLIPSegForImageSegmentation`]): CLIPSeg Model to generate mask from the given text. Please refer to the [model card]() for details. segmentation_processor ([`CLIPSegProcessor`]): CLIPSeg processor to get image, text features to translate prompt to English, if necessary. Please refer to the [model card](https://huggingface.co/docs/transformers/model_doc/clipseg) for details. vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ def __init__( self, segmentation_model: CLIPSegForImageSegmentation, segmentation_processor: CLIPSegProcessor, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, ): super().__init__() if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if scheduler is not None and getattr(scheduler.config, "skip_prk_steps", True) is False: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration" " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" " Hub, it would be very nice if you could open a Pull request for the" " `scheduler/scheduler_config.json` file" ) deprecate("skip_prk_steps not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["skip_prk_steps"] = True scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( segmentation_model=segmentation_model, segmentation_processor=segmentation_processor, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], image: Union[torch.Tensor, PIL.Image.Image], text: str, height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, **kwargs, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. image (`PIL.Image.Image`): `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will be masked out with `mask_image` and repainted according to `prompt`. text (`str``): The text to use to generate the mask. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # We use the input text to generate the mask inputs = self.segmentation_processor( text=[text], images=[image], padding="max_length", return_tensors="pt" ).to(self.device) outputs = self.segmentation_model(**inputs) mask = torch.sigmoid(outputs.logits).cpu().detach().unsqueeze(-1).numpy() mask_pil = self.numpy_to_pil(mask)[0].resize(image.size) # Run inpainting pipeline with the generated mask inpainting_pipeline = StableDiffusionInpaintPipeline( vae=self.vae, text_encoder=self.text_encoder, tokenizer=self.tokenizer, unet=self.unet, scheduler=self.scheduler, safety_checker=self.safety_checker, feature_extractor=self.feature_extractor, ) return inpainting_pipeline( prompt=prompt, image=image, mask_image=mask_pil, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps, )
diffusers/examples/community/text_inpainting.py/0
{ "file_path": "diffusers/examples/community/text_inpainting.py", "repo_id": "diffusers", "token_count": 5460 }
139
# ControlNet training example for FLUX The `train_controlnet_flux.py` script shows how to implement the ControlNet training procedure and adapt it for [FLUX](https://github.com/black-forest-labs/flux). Training script provided by LibAI, which is an institution dedicated to the progress and achievement of artificial general intelligence. LibAI is the developer of [cutout.pro](https://www.cutout.pro/) and [promeai.pro](https://www.promeai.pro/). > [!NOTE] > **Memory consumption** > > Flux can be quite expensive to run on consumer hardware devices and as a result, ControlNet training of it comes with higher memory requirements than usual. Here is a gpu memory consumption for reference, tested on a single A100 with 80G. | period | GPU | | - | - | | load as float32 | ~70G | | mv transformer and vae to bf16 | ~48G | | pre compute txt embeddings | ~62G | | **offload te to cpu** | ~30G | | training | ~58G | | validation | ~71G | > **Gated access** > > As the model is gated, before using it with diffusers you first need to go to the [FLUX.1 [dev] Hugging Face page](https://huggingface.co/black-forest-labs/FLUX.1-dev), fill in the form and accept the gate. Once you are in, you need to log in so that your system knows you’ve accepted the gate. Use the command below to log in: `hf auth login` ## Running locally with PyTorch ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` Then cd in the `examples/controlnet` folder and run ```bash pip install -r requirements_flux.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Or for a default accelerate configuration without answering questions about your environment ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell (e.g., a notebook) ```python from accelerate.utils import write_basic_config write_basic_config() ``` When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups. ## Custom Datasets We support dataset formats: The original dataset is hosted in the [ControlNet repo](https://huggingface.co/lllyasviel/ControlNet/blob/main/training/fill50k.zip). We re-uploaded it to be compatible with `datasets` [here](https://huggingface.co/datasets/fusing/fill50k). Note that `datasets` handles dataloading within the training script. To use our example, add `--dataset_name=fusing/fill50k \` to the script and remove line `--jsonl_for_train` mentioned below. We also support importing data from jsonl(xxx.jsonl),using `--jsonl_for_train` to enable it, here is a brief example of jsonl files: ```sh {"image": "xxx", "text": "xxx", "conditioning_image": "xxx"} {"image": "xxx", "text": "xxx", "conditioning_image": "xxx"} ``` ## Training Our training examples use two test conditioning images. They can be downloaded by running ```sh wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png ``` Then run `hf auth login` to log into your Hugging Face account. This is needed to be able to push the trained ControlNet parameters to Hugging Face Hub. we can define the num_layers, num_single_layers, which determines the size of the control(default values are num_layers=4, num_single_layers=10) ```bash accelerate launch train_controlnet_flux.py \ --pretrained_model_name_or_path="black-forest-labs/FLUX.1-dev" \ --dataset_name=fusing/fill50k \ --conditioning_image_column=conditioning_image \ --image_column=image \ --caption_column=text \ --output_dir="path to save model" \ --mixed_precision="bf16" \ --resolution=512 \ --learning_rate=1e-5 \ --max_train_steps=15000 \ --validation_steps=100 \ --checkpointing_steps=200 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=1 \ --gradient_accumulation_steps=16 \ --report_to="wandb" \ --lr_scheduler="cosine" \ --num_double_layers=4 \ --num_single_layers=0 \ --seed=42 \ --push_to_hub \ ``` To better track our training experiments, we're using the following flags in the command above: * `report_to="wandb` will ensure the training runs are tracked on Weights and Biases. * `validation_image`, `validation_prompt`, and `validation_steps` to allow the script to do a few validation inference runs. This allows us to qualitatively check if the training is progressing as expected. Our experiments were conducted on a single 80GB A100 GPU. ### Inference Once training is done, we can perform inference like so: ```python import torch from diffusers.utils import load_image from diffusers.pipelines.flux.pipeline_flux_controlnet import FluxControlNetPipeline from diffusers.models.controlnet_flux import FluxControlNetModel base_model = 'black-forest-labs/FLUX.1-dev' controlnet_model = 'promeai/FLUX.1-controlnet-lineart-promeai' controlnet = FluxControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.bfloat16) pipe = FluxControlNetPipeline.from_pretrained( base_model, controlnet=controlnet, torch_dtype=torch.bfloat16 ) # enable memory optimizations pipe.enable_model_cpu_offload() control_image = load_image("https://huggingface.co/promeai/FLUX.1-controlnet-lineart-promeai/resolve/main/images/example-control.jpg")resize((1024, 1024)) prompt = "cute anime girl with massive fluffy fennec ears and a big fluffy tail blonde messy long hair blue eyes wearing a maid outfit with a long black gold leaf pattern dress and a white apron mouth open holding a fancy black forest cake with candles on top in the kitchen of an old dark Victorian mansion lit by candlelight with a bright window to the foggy forest and very expensive stuff everywhere" image = pipe( prompt, control_image=control_image, controlnet_conditioning_scale=0.6, num_inference_steps=28, guidance_scale=3.5, ).images[0] image.save("./output.png") ``` ## Apply Deepspeed Zero3 This is an experimental process, I am not sure if it is suitable for everyone, we used this process to successfully train 512 resolution on A100(40g) * 8. Please modify some of the code in the script. ### 1.Customize zero3 settings Copy the **accelerate_config_zero3.yaml**,modify `num_processes` according to the number of gpus you want to use: ```bash compute_environment: LOCAL_MACHINE debug: false deepspeed_config: gradient_accumulation_steps: 8 offload_optimizer_device: cpu offload_param_device: cpu zero3_init_flag: true zero3_save_16bit_model: true zero_stage: 3 distributed_type: DEEPSPEED downcast_bf16: 'no' enable_cpu_affinity: false machine_rank: 0 main_training_function: main mixed_precision: bf16 num_machines: 1 num_processes: 8 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` ### 2.Precompute all inputs (latent, embeddings) In the train_controlnet_flux.py, We need to pre-calculate all parameters and put them into batches.So we first need to rewrite the `compute_embeddings` function. ```python def compute_embeddings(batch, proportion_empty_prompts, vae, flux_controlnet_pipeline, weight_dtype, is_train=True): ### compute text embeddings prompt_batch = batch[args.caption_column] captions = [] for caption in prompt_batch: if random.random() < proportion_empty_prompts: captions.append("") elif isinstance(caption, str): captions.append(caption) elif isinstance(caption, (list, np.ndarray)): # take a random caption if there are multiple captions.append(random.choice(caption) if is_train else caption[0]) prompt_batch = captions prompt_embeds, pooled_prompt_embeds, text_ids = flux_controlnet_pipeline.encode_prompt( prompt_batch, prompt_2=prompt_batch ) prompt_embeds = prompt_embeds.to(dtype=weight_dtype) pooled_prompt_embeds = pooled_prompt_embeds.to(dtype=weight_dtype) text_ids = text_ids.to(dtype=weight_dtype) # text_ids [512,3] to [bs,512,3] text_ids = text_ids.unsqueeze(0).expand(prompt_embeds.shape[0], -1, -1) ### compute latents def _pack_latents(latents, batch_size, num_channels_latents, height, width): latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) latents = latents.permute(0, 2, 4, 1, 3, 5) latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4) return latents # vae encode pixel_values = batch["pixel_values"] pixel_values = torch.stack([image for image in pixel_values]).to(dtype=weight_dtype).to(vae.device) pixel_latents_tmp = vae.encode(pixel_values).latent_dist.sample() pixel_latents_tmp = (pixel_latents_tmp - vae.config.shift_factor) * vae.config.scaling_factor pixel_latents = _pack_latents( pixel_latents_tmp, pixel_values.shape[0], pixel_latents_tmp.shape[1], pixel_latents_tmp.shape[2], pixel_latents_tmp.shape[3], ) control_values = batch["conditioning_pixel_values"] control_values = torch.stack([image for image in control_values]).to(dtype=weight_dtype).to(vae.device) control_latents = vae.encode(control_values).latent_dist.sample() control_latents = (control_latents - vae.config.shift_factor) * vae.config.scaling_factor control_latents = _pack_latents( control_latents, control_values.shape[0], control_latents.shape[1], control_latents.shape[2], control_latents.shape[3], ) # copied from pipeline_flux_controlnet def _prepare_latent_image_ids(batch_size, height, width, device, dtype): latent_image_ids = torch.zeros(height // 2, width // 2, 3) latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height // 2)[:, None] latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width // 2)[None, :] latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape latent_image_ids = latent_image_ids[None, :].repeat(batch_size, 1, 1, 1) latent_image_ids = latent_image_ids.reshape( batch_size, latent_image_id_height * latent_image_id_width, latent_image_id_channels ) return latent_image_ids.to(device=device, dtype=dtype) latent_image_ids = _prepare_latent_image_ids( batch_size=pixel_latents_tmp.shape[0], height=pixel_latents_tmp.shape[2], width=pixel_latents_tmp.shape[3], device=pixel_values.device, dtype=pixel_values.dtype, ) # unet_added_cond_kwargs = {"pooled_prompt_embeds": pooled_prompt_embeds, "text_ids": text_ids} return {"prompt_embeds": prompt_embeds, "pooled_prompt_embeds": pooled_prompt_embeds, "text_ids": text_ids, "pixel_latents": pixel_latents, "control_latents": control_latents, "latent_image_ids": latent_image_ids} ``` Because we need images to pass through vae, we need to preprocess the images in the dataset first. At the same time, vae requires more gpu memory, so you may need to modify the `batch_size` below ```diff +train_dataset = prepare_train_dataset(train_dataset, accelerator) with accelerator.main_process_first(): from datasets.fingerprint import Hasher # fingerprint used by the cache for the other processes to load the result # details: https://github.com/huggingface/diffusers/pull/4038#discussion_r1266078401 new_fingerprint = Hasher.hash(args) train_dataset = train_dataset.map( - compute_embeddings_fn, batched=True, new_fingerprint=new_fingerprint, batch_size=100 + compute_embeddings_fn, batched=True, new_fingerprint=new_fingerprint, batch_size=10 ) del text_encoders, tokenizers gc.collect() torch.cuda.empty_cache() # Then get the training dataset ready to be passed to the dataloader. -train_dataset = prepare_train_dataset(train_dataset, accelerator) ``` ### 3.Redefine the behavior of getting batchsize Now that we have all the preprocessing done, we need to modify the `collate_fn` function. ```python def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() conditioning_pixel_values = torch.stack([example["conditioning_pixel_values"] for example in examples]) conditioning_pixel_values = conditioning_pixel_values.to(memory_format=torch.contiguous_format).float() pixel_latents = torch.stack([torch.tensor(example["pixel_latents"]) for example in examples]) pixel_latents = pixel_latents.to(memory_format=torch.contiguous_format).float() control_latents = torch.stack([torch.tensor(example["control_latents"]) for example in examples]) control_latents = control_latents.to(memory_format=torch.contiguous_format).float() latent_image_ids= torch.stack([torch.tensor(example["latent_image_ids"]) for example in examples]) prompt_ids = torch.stack([torch.tensor(example["prompt_embeds"]) for example in examples]) pooled_prompt_embeds = torch.stack([torch.tensor(example["pooled_prompt_embeds"]) for example in examples]) text_ids = torch.stack([torch.tensor(example["text_ids"]) for example in examples]) return { "pixel_values": pixel_values, "conditioning_pixel_values": conditioning_pixel_values, "pixel_latents": pixel_latents, "control_latents": control_latents, "latent_image_ids": latent_image_ids, "prompt_ids": prompt_ids, "unet_added_conditions": {"pooled_prompt_embeds": pooled_prompt_embeds, "time_ids": text_ids}, } ``` Finally, we just need to modify the way of obtaining various parameters during training. ```python for epoch in range(first_epoch, args.num_train_epochs): for step, batch in enumerate(train_dataloader): with accelerator.accumulate(flux_controlnet): # Convert images to latent space pixel_latents = batch["pixel_latents"].to(dtype=weight_dtype) control_image = batch["control_latents"].to(dtype=weight_dtype) latent_image_ids = batch["latent_image_ids"].to(dtype=weight_dtype) # Sample noise that we'll add to the latents noise = torch.randn_like(pixel_latents).to(accelerator.device).to(dtype=weight_dtype) bsz = pixel_latents.shape[0] # Sample a random timestep for each image t = torch.sigmoid(torch.randn((bsz,), device=accelerator.device, dtype=weight_dtype)) # apply flow matching noisy_latents = ( 1 - t.unsqueeze(1).unsqueeze(2).repeat(1, pixel_latents.shape[1], pixel_latents.shape[2]) ) * pixel_latents + t.unsqueeze(1).unsqueeze(2).repeat( 1, pixel_latents.shape[1], pixel_latents.shape[2] ) * noise guidance_vec = torch.full( (noisy_latents.shape[0],), 3.5, device=noisy_latents.device, dtype=weight_dtype ) controlnet_block_samples, controlnet_single_block_samples = flux_controlnet( hidden_states=noisy_latents, controlnet_cond=control_image, timestep=t, guidance=guidance_vec, pooled_projections=batch["unet_added_conditions"]["pooled_prompt_embeds"].to(dtype=weight_dtype), encoder_hidden_states=batch["prompt_ids"].to(dtype=weight_dtype), txt_ids=batch["unet_added_conditions"]["time_ids"][0].to(dtype=weight_dtype), img_ids=latent_image_ids[0], return_dict=False, ) noise_pred = flux_transformer( hidden_states=noisy_latents, timestep=t, guidance=guidance_vec, pooled_projections=batch["unet_added_conditions"]["pooled_prompt_embeds"].to(dtype=weight_dtype), encoder_hidden_states=batch["prompt_ids"].to(dtype=weight_dtype), controlnet_block_samples=[sample.to(dtype=weight_dtype) for sample in controlnet_block_samples] if controlnet_block_samples is not None else None, controlnet_single_block_samples=[ sample.to(dtype=weight_dtype) for sample in controlnet_single_block_samples ] if controlnet_single_block_samples is not None else None, txt_ids=batch["unet_added_conditions"]["time_ids"][0].to(dtype=weight_dtype), img_ids=latent_image_ids[0], return_dict=False, )[0] ``` Congratulations! You have completed all the required code modifications required for deepspeedzero3. ### 4.Training with deepspeedzero3 Start!!! ```bash export pretrained_model_name_or_path='flux-dev-model-path' export MODEL_TYPE='train_model_type' export TRAIN_JSON_FILE="your_json_file" export CONTROL_TYPE='control_preprocessor_type' export CAPTION_COLUMN='caption_column' export CACHE_DIR="/data/train_csr/.cache/huggingface/" export OUTPUT_DIR='/data/train_csr/FLUX/MODEL_OUT/'$MODEL_TYPE # The first step is to use Python to precompute all caches.Replace the first line below with this line. (I am not sure why using accelerate would cause problems.) CUDA_VISIBLE_DEVICES=0 python3 train_controlnet_flux.py \ # The second step is to use the above accelerate config to train accelerate launch --config_file "./accelerate_config_zero3.yaml" train_controlnet_flux.py \ --pretrained_model_name_or_path=$pretrained_model_name_or_path \ --jsonl_for_train=$TRAIN_JSON_FILE \ --conditioning_image_column=$CONTROL_TYPE \ --image_column=image \ --caption_column=$CAPTION_COLUMN\ --cache_dir=$CACHE_DIR \ --tracker_project_name=$MODEL_TYPE \ --output_dir=$OUTPUT_DIR \ --max_train_steps=500000 \ --mixed_precision bf16 \ --checkpointing_steps=1000 \ --gradient_accumulation_steps=8 \ --resolution=512 \ --train_batch_size=1 \ --learning_rate=1e-5 \ --num_double_layers=4 \ --num_single_layers=0 \ --gradient_checkpointing \ --resume_from_checkpoint="latest" \ # --use_adafactor \ dont use # --validation_steps=3 \ not support # --validation_image $VALIDATION_IMAGE \ not support # --validation_prompt "xxx" \ not support ```
diffusers/examples/controlnet/README_flux.md/0
{ "file_path": "diffusers/examples/controlnet/README_flux.md", "repo_id": "diffusers", "token_count": 7208 }
140
# Copyright 2025 Custom Diffusion authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def retrieve(class_prompt, class_data_dir, num_class_images): factor = 1.5 num_images = int(factor * num_class_images) client = ClipClient( url="https://knn.laion.ai/knn-service", indice_name="laion_400m", num_images=num_images, aesthetic_weight=0.1 ) os.makedirs(f"{class_data_dir}/images", exist_ok=True) if len(list(Path(f"{class_data_dir}/images").iterdir())) >= num_class_images: return while True: class_images = client.query(text=class_prompt) if len(class_images) >= factor * num_class_images or num_images > 1e4: break else: num_images = int(factor * num_images) client = ClipClient( url="https://knn.laion.ai/knn-service", indice_name="laion_400m", num_images=num_images, aesthetic_weight=0.1, ) count = 0 total = 0 pbar = tqdm(desc="downloading real regularization images", total=num_class_images) with ( open(f"{class_data_dir}/caption.txt", "w") as f1, open(f"{class_data_dir}/urls.txt", "w") as f2, open(f"{class_data_dir}/images.txt", "w") as f3, ): while total < num_class_images: images = class_images[count] count += 1 try: img = requests.get(images["url"], timeout=30) if img.status_code == 200: _ = Image.open(BytesIO(img.content)) with open(f"{class_data_dir}/images/{total}.jpg", "wb") as f: f.write(img.content) f1.write(images["caption"] + "\n") f2.write(images["url"] + "\n") f3.write(f"{class_data_dir}/images/{total}.jpg" + "\n") total += 1 pbar.update(1) else: continue except Exception: continue return def parse_args(): parser = argparse.ArgumentParser("", add_help=False) parser.add_argument("--class_prompt", help="text prompt to retrieve images", required=True, type=str) parser.add_argument("--class_data_dir", help="path to save images", required=True, type=str) parser.add_argument("--num_class_images", help="number of images to download", default=200, type=int) return parser.parse_args() if __name__ == "__main__": args = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
diffusers/examples/custom_diffusion/retrieve.py/0
{ "file_path": "diffusers/examples/custom_diffusion/retrieve.py", "repo_id": "diffusers", "token_count": 1446 }
141
import argparse import logging import math import os from pathlib import Path import jax import jax.numpy as jnp import numpy as np import optax import torch import torch.utils.checkpoint import transformers from flax import jax_utils from flax.training import train_state from flax.training.common_utils import shard from huggingface_hub import create_repo, upload_folder from huggingface_hub.utils import insecure_hashlib from jax.experimental.compilation_cache import compilation_cache as cc from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel, set_seed from diffusers import ( FlaxAutoencoderKL, FlaxDDPMScheduler, FlaxPNDMScheduler, FlaxStableDiffusionPipeline, FlaxUNet2DConditionModel, ) from diffusers.pipelines.stable_diffusion import FlaxStableDiffusionSafetyChecker from diffusers.utils import check_min_version # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.36.0.dev0") # Cache compiled models across invocations of this script. cc.initialize_cache(os.path.expanduser("~/.cache/jax/compilation_cache")) logger = logging.getLogger(__name__) def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--pretrained_vae_name_or_path", type=str, default=None, help="Path to pretrained vae or vae identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--instance_data_dir", type=str, default=None, required=True, help="A folder containing the training data of instance images.", ) parser.add_argument( "--class_data_dir", type=str, default=None, required=False, help="A folder containing the training data of class images.", ) parser.add_argument( "--instance_prompt", type=str, default=None, help="The prompt with identifier specifying the instance", ) parser.add_argument( "--class_prompt", type=str, default=None, help="The prompt to specify images in the same class as provided instance images.", ) parser.add_argument( "--with_prior_preservation", default=False, action="store_true", help="Flag to add prior preservation loss.", ) parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") parser.add_argument( "--num_class_images", type=int, default=100, help=( "Minimal class images for prior preservation loss. If there are not enough images already present in" " class_data_dir, additional images will be sampled with class_prompt." ), ) parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--save_steps", type=int, default=None, help="Save a checkpoint every X steps.") parser.add_argument("--seed", type=int, default=0, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder") parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--learning_rate", type=float, default=5e-6, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default="no", choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.instance_data_dir is None: raise ValueError("You must specify a train data directory.") if args.with_prior_preservation: if args.class_data_dir is None: raise ValueError("You must specify a data directory for class images.") if args.class_prompt is None: raise ValueError("You must specify prompt for class images.") return args class DreamBoothDataset(Dataset): """ A dataset to prepare the instance and class images with the prompts for fine-tuning the model. It pre-processes the images and the tokenizes prompts. """ def __init__( self, instance_data_root, instance_prompt, tokenizer, class_data_root=None, class_prompt=None, class_num=None, size=512, center_crop=False, ): self.size = size self.center_crop = center_crop self.tokenizer = tokenizer self.instance_data_root = Path(instance_data_root) if not self.instance_data_root.exists(): raise ValueError("Instance images root doesn't exists.") self.instance_images_path = list(Path(instance_data_root).iterdir()) self.num_instance_images = len(self.instance_images_path) self.instance_prompt = instance_prompt self._length = self.num_instance_images if class_data_root is not None: self.class_data_root = Path(class_data_root) self.class_data_root.mkdir(parents=True, exist_ok=True) self.class_images_path = list(self.class_data_root.iterdir()) if class_num is not None: self.num_class_images = min(len(self.class_images_path), class_num) else: self.num_class_images = len(self.class_images_path) self._length = max(self.num_class_images, self.num_instance_images) self.class_prompt = class_prompt else: self.class_data_root = None self.image_transforms = transforms.Compose( [ transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __len__(self): return self._length def __getitem__(self, index): example = {} instance_image = Image.open(self.instance_images_path[index % self.num_instance_images]) if not instance_image.mode == "RGB": instance_image = instance_image.convert("RGB") example["instance_images"] = self.image_transforms(instance_image) example["instance_prompt_ids"] = self.tokenizer( self.instance_prompt, padding="do_not_pad", truncation=True, max_length=self.tokenizer.model_max_length, ).input_ids if self.class_data_root: class_image = Image.open(self.class_images_path[index % self.num_class_images]) if not class_image.mode == "RGB": class_image = class_image.convert("RGB") example["class_images"] = self.image_transforms(class_image) example["class_prompt_ids"] = self.tokenizer( self.class_prompt, padding="do_not_pad", truncation=True, max_length=self.tokenizer.model_max_length, ).input_ids return example class PromptDataset(Dataset): """A simple dataset to prepare the prompts to generate class images on multiple GPUs.""" def __init__(self, prompt, num_samples): self.prompt = prompt self.num_samples = num_samples def __len__(self): return self.num_samples def __getitem__(self, index): example = {} example["prompt"] = self.prompt example["index"] = index return example def get_params_to_save(params): return jax.device_get(jax.tree_util.tree_map(lambda x: x[0], params)) def main(): args = parse_args() logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) if jax.process_index() == 0: transformers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() if args.seed is not None: set_seed(args.seed) rng = jax.random.PRNGKey(args.seed) if args.with_prior_preservation: class_images_dir = Path(args.class_data_dir) if not class_images_dir.exists(): class_images_dir.mkdir(parents=True) cur_class_images = len(list(class_images_dir.iterdir())) if cur_class_images < args.num_class_images: pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, safety_checker=None, revision=args.revision ) pipeline.set_progress_bar_config(disable=True) num_new_images = args.num_class_images - cur_class_images logger.info(f"Number of class images to sample: {num_new_images}.") sample_dataset = PromptDataset(args.class_prompt, num_new_images) total_sample_batch_size = args.sample_batch_size * jax.local_device_count() sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=total_sample_batch_size) for example in tqdm( sample_dataloader, desc="Generating class images", disable=not jax.process_index() == 0 ): prompt_ids = pipeline.prepare_inputs(example["prompt"]) prompt_ids = shard(prompt_ids) p_params = jax_utils.replicate(params) rng = jax.random.split(rng)[0] sample_rng = jax.random.split(rng, jax.device_count()) images = pipeline(prompt_ids, p_params, sample_rng, jit=True).images images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) images = pipeline.numpy_to_pil(np.array(images)) for i, image in enumerate(images): hash_image = insecure_hashlib.sha1(image.tobytes()).hexdigest() image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" image.save(image_filename) del pipeline # Handle the repository creation if jax.process_index() == 0: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load the tokenizer and add the placeholder token as a additional special token if args.tokenizer_name: tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) elif args.pretrained_model_name_or_path: tokenizer = CLIPTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision ) else: raise NotImplementedError("No tokenizer specified!") train_dataset = DreamBoothDataset( instance_data_root=args.instance_data_dir, instance_prompt=args.instance_prompt, class_data_root=args.class_data_dir if args.with_prior_preservation else None, class_prompt=args.class_prompt, class_num=args.num_class_images, tokenizer=tokenizer, size=args.resolution, center_crop=args.center_crop, ) def collate_fn(examples): input_ids = [example["instance_prompt_ids"] for example in examples] pixel_values = [example["instance_images"] for example in examples] # Concat class and instance examples for prior preservation. # We do this to avoid doing two forward passes. if args.with_prior_preservation: input_ids += [example["class_prompt_ids"] for example in examples] pixel_values += [example["class_images"] for example in examples] pixel_values = torch.stack(pixel_values) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = tokenizer.pad( {"input_ids": input_ids}, padding="max_length", max_length=tokenizer.model_max_length, return_tensors="pt" ).input_ids batch = { "input_ids": input_ids, "pixel_values": pixel_values, } batch = {k: v.numpy() for k, v in batch.items()} return batch total_train_batch_size = args.train_batch_size * jax.local_device_count() if len(train_dataset) < total_train_batch_size: raise ValueError( f"Training batch size is {total_train_batch_size}, but your dataset only contains" f" {len(train_dataset)} images. Please, use a larger dataset or reduce the effective batch size. Note that" f" there are {jax.local_device_count()} parallel devices, so your batch size can't be smaller than that." ) train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=total_train_batch_size, shuffle=True, collate_fn=collate_fn, drop_last=True ) weight_dtype = jnp.float32 if args.mixed_precision == "fp16": weight_dtype = jnp.float16 elif args.mixed_precision == "bf16": weight_dtype = jnp.bfloat16 if args.pretrained_vae_name_or_path: # TODO(patil-suraj): Upload flax weights for the VAE vae_arg, vae_kwargs = (args.pretrained_vae_name_or_path, {"from_pt": True}) else: vae_arg, vae_kwargs = (args.pretrained_model_name_or_path, {"subfolder": "vae", "revision": args.revision}) # Load models and create wrapper for stable diffusion text_encoder = FlaxCLIPTextModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", dtype=weight_dtype, revision=args.revision, ) vae, vae_params = FlaxAutoencoderKL.from_pretrained( vae_arg, dtype=weight_dtype, **vae_kwargs, ) unet, unet_params = FlaxUNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", dtype=weight_dtype, revision=args.revision, ) # Optimization if args.scale_lr: args.learning_rate = args.learning_rate * total_train_batch_size constant_scheduler = optax.constant_schedule(args.learning_rate) adamw = optax.adamw( learning_rate=constant_scheduler, b1=args.adam_beta1, b2=args.adam_beta2, eps=args.adam_epsilon, weight_decay=args.adam_weight_decay, ) optimizer = optax.chain( optax.clip_by_global_norm(args.max_grad_norm), adamw, ) unet_state = train_state.TrainState.create(apply_fn=unet.__call__, params=unet_params, tx=optimizer) text_encoder_state = train_state.TrainState.create( apply_fn=text_encoder.__call__, params=text_encoder.params, tx=optimizer ) noise_scheduler = FlaxDDPMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000 ) noise_scheduler_state = noise_scheduler.create_state() # Initialize our training train_rngs = jax.random.split(rng, jax.local_device_count()) def train_step(unet_state, text_encoder_state, vae_params, batch, train_rng): dropout_rng, sample_rng, new_train_rng = jax.random.split(train_rng, 3) if args.train_text_encoder: params = {"text_encoder": text_encoder_state.params, "unet": unet_state.params} else: params = {"unet": unet_state.params} def compute_loss(params): # Convert images to latent space vae_outputs = vae.apply( {"params": vae_params}, batch["pixel_values"], deterministic=True, method=vae.encode ) latents = vae_outputs.latent_dist.sample(sample_rng) # (NHWC) -> (NCHW) latents = jnp.transpose(latents, (0, 3, 1, 2)) latents = latents * vae.config.scaling_factor # Sample noise that we'll add to the latents noise_rng, timestep_rng = jax.random.split(sample_rng) noise = jax.random.normal(noise_rng, latents.shape) # Sample a random timestep for each image bsz = latents.shape[0] timesteps = jax.random.randint( timestep_rng, (bsz,), 0, noise_scheduler.config.num_train_timesteps, ) # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(noise_scheduler_state, latents, noise, timesteps) # Get the text embedding for conditioning if args.train_text_encoder: encoder_hidden_states = text_encoder_state.apply_fn( batch["input_ids"], params=params["text_encoder"], dropout_rng=dropout_rng, train=True )[0] else: encoder_hidden_states = text_encoder( batch["input_ids"], params=text_encoder_state.params, train=False )[0] # Predict the noise residual model_pred = unet.apply( {"params": params["unet"]}, noisy_latents, timesteps, encoder_hidden_states, train=True ).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(noise_scheduler_state, latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") if args.with_prior_preservation: # Chunk the noise and noise_pred into two parts and compute the loss on each part separately. model_pred, model_pred_prior = jnp.split(model_pred, 2, axis=0) target, target_prior = jnp.split(target, 2, axis=0) # Compute instance loss loss = (target - model_pred) ** 2 loss = loss.mean() # Compute prior loss prior_loss = (target_prior - model_pred_prior) ** 2 prior_loss = prior_loss.mean() # Add the prior loss to the instance loss. loss = loss + args.prior_loss_weight * prior_loss else: loss = (target - model_pred) ** 2 loss = loss.mean() return loss grad_fn = jax.value_and_grad(compute_loss) loss, grad = grad_fn(params) grad = jax.lax.pmean(grad, "batch") new_unet_state = unet_state.apply_gradients(grads=grad["unet"]) if args.train_text_encoder: new_text_encoder_state = text_encoder_state.apply_gradients(grads=grad["text_encoder"]) else: new_text_encoder_state = text_encoder_state metrics = {"loss": loss} metrics = jax.lax.pmean(metrics, axis_name="batch") return new_unet_state, new_text_encoder_state, metrics, new_train_rng # Create parallel version of the train step p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0, 1)) # Replicate the train state on each device unet_state = jax_utils.replicate(unet_state) text_encoder_state = jax_utils.replicate(text_encoder_state) vae_params = jax_utils.replicate(vae_params) # Train! num_update_steps_per_epoch = math.ceil(len(train_dataloader)) # Scheduler and math around the number of training steps. if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel & distributed) = {total_train_batch_size}") logger.info(f" Total optimization steps = {args.max_train_steps}") def checkpoint(step=None): # Create the pipeline using the trained modules and save it. scheduler, _ = FlaxPNDMScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler") safety_checker = FlaxStableDiffusionSafetyChecker.from_pretrained( "CompVis/stable-diffusion-safety-checker", from_pt=True ) pipeline = FlaxStableDiffusionPipeline( text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32"), ) outdir = os.path.join(args.output_dir, str(step)) if step else args.output_dir pipeline.save_pretrained( outdir, params={ "text_encoder": get_params_to_save(text_encoder_state.params), "vae": get_params_to_save(vae_params), "unet": get_params_to_save(unet_state.params), "safety_checker": safety_checker.params, }, ) if args.push_to_hub: message = f"checkpoint-{step}" if step is not None else "End of training" upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message=message, ignore_patterns=["step_*", "epoch_*"], ) global_step = 0 epochs = tqdm(range(args.num_train_epochs), desc="Epoch ... ", position=0) for epoch in epochs: # ======================== Training ================================ train_metrics = [] steps_per_epoch = len(train_dataset) // total_train_batch_size train_step_progress_bar = tqdm(total=steps_per_epoch, desc="Training...", position=1, leave=False) # train for batch in train_dataloader: batch = shard(batch) unet_state, text_encoder_state, train_metric, train_rngs = p_train_step( unet_state, text_encoder_state, vae_params, batch, train_rngs ) train_metrics.append(train_metric) train_step_progress_bar.update(jax.local_device_count()) global_step += 1 if jax.process_index() == 0 and args.save_steps and global_step % args.save_steps == 0: checkpoint(global_step) if global_step >= args.max_train_steps: break train_metric = jax_utils.unreplicate(train_metric) train_step_progress_bar.close() epochs.write(f"Epoch... ({epoch + 1}/{args.num_train_epochs} | Loss: {train_metric['loss']})") if jax.process_index() == 0: checkpoint() if __name__ == "__main__": main()
diffusers/examples/dreambooth/train_dreambooth_flax.py/0
{ "file_path": "diffusers/examples/dreambooth/train_dreambooth_flax.py", "repo_id": "diffusers", "token_count": 11967 }
142
# Inference Examples **The inference examples folder is deprecated and will be removed in a future version**. **Officially supported inference examples can be found in the [Pipelines folder](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines)**. - For `Image-to-Image text-guided generation with Stable Diffusion`, please have a look at the official [Pipeline examples](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines#examples) - For `In-painting using Stable Diffusion`, please have a look at the official [Pipeline examples](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines#examples) - For `Tweak prompts reusing seeds and latents`, please have a look at the official [Pipeline examples](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines#examples)
diffusers/examples/inference/README.md/0
{ "file_path": "diffusers/examples/inference/README.md", "repo_id": "diffusers", "token_count": 252 }
143
# coding=utf-8 # Copyright 2025 suzukimain # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re import types from collections import OrderedDict from dataclasses import asdict, dataclass, field from typing import Dict, List, Optional, Union import requests import torch from huggingface_hub import hf_api, hf_hub_download from huggingface_hub.file_download import http_get from huggingface_hub.utils import validate_hf_hub_args from diffusers.loaders.single_file_utils import ( VALID_URL_PREFIXES, _extract_repo_id_and_weights_name, infer_diffusers_model_type, load_single_file_checkpoint, ) from diffusers.pipelines.animatediff import AnimateDiffPipeline, AnimateDiffSDXLPipeline from diffusers.pipelines.auto_pipeline import ( AutoPipelineForImage2Image, AutoPipelineForInpainting, AutoPipelineForText2Image, ) from diffusers.pipelines.controlnet import ( StableDiffusionControlNetImg2ImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionXLControlNetImg2ImgPipeline, StableDiffusionXLControlNetPipeline, ) from diffusers.pipelines.flux import FluxImg2ImgPipeline, FluxPipeline from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion import ( StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionPipeline, StableDiffusionUpscalePipeline, ) from diffusers.pipelines.stable_diffusion_3 import StableDiffusion3Img2ImgPipeline, StableDiffusion3Pipeline from diffusers.pipelines.stable_diffusion_xl import ( StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline, StableDiffusionXLPipeline, ) from diffusers.utils import logging logger = logging.get_logger(__name__) SINGLE_FILE_CHECKPOINT_TEXT2IMAGE_PIPELINE_MAPPING = OrderedDict( [ ("animatediff_rgb", AnimateDiffPipeline), ("animatediff_scribble", AnimateDiffPipeline), ("animatediff_sdxl_beta", AnimateDiffSDXLPipeline), ("animatediff_v1", AnimateDiffPipeline), ("animatediff_v2", AnimateDiffPipeline), ("animatediff_v3", AnimateDiffPipeline), ("autoencoder-dc-f128c512", None), ("autoencoder-dc-f32c32", None), ("autoencoder-dc-f32c32-sana", None), ("autoencoder-dc-f64c128", None), ("controlnet", StableDiffusionControlNetPipeline), ("controlnet_xl", StableDiffusionXLControlNetPipeline), ("controlnet_xl_large", StableDiffusionXLControlNetPipeline), ("controlnet_xl_mid", StableDiffusionXLControlNetPipeline), ("controlnet_xl_small", StableDiffusionXLControlNetPipeline), ("flux-depth", FluxPipeline), ("flux-dev", FluxPipeline), ("flux-fill", FluxPipeline), ("flux-schnell", FluxPipeline), ("hunyuan-video", None), ("inpainting", None), ("inpainting_v2", None), ("ltx-video", None), ("ltx-video-0.9.1", None), ("mochi-1-preview", None), ("playground-v2-5", StableDiffusionXLPipeline), ("sd3", StableDiffusion3Pipeline), ("sd35_large", StableDiffusion3Pipeline), ("sd35_medium", StableDiffusion3Pipeline), ("stable_cascade_stage_b", None), ("stable_cascade_stage_b_lite", None), ("stable_cascade_stage_c", None), ("stable_cascade_stage_c_lite", None), ("upscale", StableDiffusionUpscalePipeline), ("v1", StableDiffusionPipeline), ("v2", StableDiffusionPipeline), ("xl_base", StableDiffusionXLPipeline), ("xl_inpaint", None), ("xl_refiner", StableDiffusionXLPipeline), ] ) SINGLE_FILE_CHECKPOINT_IMAGE2IMAGE_PIPELINE_MAPPING = OrderedDict( [ ("animatediff_rgb", AnimateDiffPipeline), ("animatediff_scribble", AnimateDiffPipeline), ("animatediff_sdxl_beta", AnimateDiffSDXLPipeline), ("animatediff_v1", AnimateDiffPipeline), ("animatediff_v2", AnimateDiffPipeline), ("animatediff_v3", AnimateDiffPipeline), ("autoencoder-dc-f128c512", None), ("autoencoder-dc-f32c32", None), ("autoencoder-dc-f32c32-sana", None), ("autoencoder-dc-f64c128", None), ("controlnet", StableDiffusionControlNetImg2ImgPipeline), ("controlnet_xl", StableDiffusionXLControlNetImg2ImgPipeline), ("controlnet_xl_large", StableDiffusionXLControlNetImg2ImgPipeline), ("controlnet_xl_mid", StableDiffusionXLControlNetImg2ImgPipeline), ("controlnet_xl_small", StableDiffusionXLControlNetImg2ImgPipeline), ("flux-depth", FluxImg2ImgPipeline), ("flux-dev", FluxImg2ImgPipeline), ("flux-fill", FluxImg2ImgPipeline), ("flux-schnell", FluxImg2ImgPipeline), ("hunyuan-video", None), ("inpainting", None), ("inpainting_v2", None), ("ltx-video", None), ("ltx-video-0.9.1", None), ("mochi-1-preview", None), ("playground-v2-5", StableDiffusionXLImg2ImgPipeline), ("sd3", StableDiffusion3Img2ImgPipeline), ("sd35_large", StableDiffusion3Img2ImgPipeline), ("sd35_medium", StableDiffusion3Img2ImgPipeline), ("stable_cascade_stage_b", None), ("stable_cascade_stage_b_lite", None), ("stable_cascade_stage_c", None), ("stable_cascade_stage_c_lite", None), ("upscale", StableDiffusionUpscalePipeline), ("v1", StableDiffusionImg2ImgPipeline), ("v2", StableDiffusionImg2ImgPipeline), ("xl_base", StableDiffusionXLImg2ImgPipeline), ("xl_inpaint", None), ("xl_refiner", StableDiffusionXLImg2ImgPipeline), ] ) SINGLE_FILE_CHECKPOINT_INPAINT_PIPELINE_MAPPING = OrderedDict( [ ("animatediff_rgb", None), ("animatediff_scribble", None), ("animatediff_sdxl_beta", None), ("animatediff_v1", None), ("animatediff_v2", None), ("animatediff_v3", None), ("autoencoder-dc-f128c512", None), ("autoencoder-dc-f32c32", None), ("autoencoder-dc-f32c32-sana", None), ("autoencoder-dc-f64c128", None), ("controlnet", StableDiffusionControlNetInpaintPipeline), ("controlnet_xl", None), ("controlnet_xl_large", None), ("controlnet_xl_mid", None), ("controlnet_xl_small", None), ("flux-depth", None), ("flux-dev", None), ("flux-fill", None), ("flux-schnell", None), ("hunyuan-video", None), ("inpainting", StableDiffusionInpaintPipeline), ("inpainting_v2", StableDiffusionInpaintPipeline), ("ltx-video", None), ("ltx-video-0.9.1", None), ("mochi-1-preview", None), ("playground-v2-5", None), ("sd3", None), ("sd35_large", None), ("sd35_medium", None), ("stable_cascade_stage_b", None), ("stable_cascade_stage_b_lite", None), ("stable_cascade_stage_c", None), ("stable_cascade_stage_c_lite", None), ("upscale", StableDiffusionUpscalePipeline), ("v1", None), ("v2", None), ("xl_base", None), ("xl_inpaint", StableDiffusionXLInpaintPipeline), ("xl_refiner", None), ] ) CONFIG_FILE_LIST = [ "pytorch_model.bin", "pytorch_model.fp16.bin", "diffusion_pytorch_model.bin", "diffusion_pytorch_model.fp16.bin", "diffusion_pytorch_model.safetensors", "diffusion_pytorch_model.fp16.safetensors", "diffusion_pytorch_model.ckpt", "diffusion_pytorch_model.fp16.ckpt", "diffusion_pytorch_model.non_ema.bin", "diffusion_pytorch_model.non_ema.safetensors", ] DIFFUSERS_CONFIG_DIR = [ "safety_checker", "unet", "vae", "text_encoder", "text_encoder_2", ] TOKENIZER_SHAPE_MAP = { 768: [ "SD 1.4", "SD 1.5", "SD 1.5 LCM", "SDXL 0.9", "SDXL 1.0", "SDXL 1.0 LCM", "SDXL Distilled", "SDXL Turbo", "SDXL Lightning", "PixArt a", "Playground v2", "Pony", ], 1024: ["SD 2.0", "SD 2.0 768", "SD 2.1", "SD 2.1 768", "SD 2.1 Unclip"], } EXTENSION = [".safetensors", ".ckpt", ".bin"] CACHE_HOME = os.path.expanduser("~/.cache") @dataclass class RepoStatus: r""" Data class for storing repository status information. Attributes: repo_id (`str`): The name of the repository. repo_hash (`str`): The hash of the repository. version (`str`): The version ID of the repository. """ repo_id: str = "" repo_hash: str = "" version: str = "" @dataclass class ModelStatus: r""" Data class for storing model status information. Attributes: search_word (`str`): The search word used to find the model. download_url (`str`): The URL to download the model. file_name (`str`): The name of the model file. local (`bool`): Whether the model exists locally site_url (`str`): The URL of the site where the model is hosted. """ search_word: str = "" download_url: str = "" file_name: str = "" local: bool = False site_url: str = "" @dataclass class ExtraStatus: r""" Data class for storing extra status information. Attributes: trained_words (`str`): The words used to trigger the model """ trained_words: Union[List[str], None] = None @dataclass class SearchResult: r""" Data class for storing model data. Attributes: model_path (`str`): The path to the model. loading_method (`str`): The type of loading method used for the model ( None or 'from_single_file' or 'from_pretrained') checkpoint_format (`str`): The format of the model checkpoint (`single_file` or `diffusers`). repo_status (`RepoStatus`): The status of the repository. model_status (`ModelStatus`): The status of the model. """ model_path: str = "" loading_method: Union[str, None] = None checkpoint_format: Union[str, None] = None repo_status: RepoStatus = field(default_factory=RepoStatus) model_status: ModelStatus = field(default_factory=ModelStatus) extra_status: ExtraStatus = field(default_factory=ExtraStatus) @validate_hf_hub_args def load_pipeline_from_single_file(pretrained_model_or_path, pipeline_mapping, **kwargs): r""" Instantiate a [`DiffusionPipeline`] from pretrained pipeline weights saved in the `.ckpt` or `.safetensors` format. The pipeline is set in evaluation mode (`model.eval()`) by default. Parameters: pretrained_model_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A link to the `.ckpt` file (for example `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub. - A path to a *file* containing all pipeline weights. pipeline_mapping (`dict`): A mapping of model types to their corresponding pipeline classes. This is used to determine which pipeline class to instantiate based on the model type inferred from the checkpoint. torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model with another dtype. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. original_config_file (`str`, *optional*): The path to the original config file that was used to train the model. If not provided, the config file will be inferred from the checkpoint file. config (`str`, *optional*): Can be either: - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline hosted on the Hub. - A path to a *directory* (for example `./my_pipeline_directory/`) containing the pipeline component configs in Diffusers format. checkpoint (`dict`, *optional*): The loaded state dictionary of the model. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline class). The overwritten components are passed directly to the pipelines `__init__` method. See example below for more information. """ # Load the checkpoint from the provided link or path checkpoint = load_single_file_checkpoint(pretrained_model_or_path) # Infer the model type from the loaded checkpoint model_type = infer_diffusers_model_type(checkpoint) # Get the corresponding pipeline class from the pipeline mapping pipeline_class = pipeline_mapping[model_type] # For tasks not supported by this pipeline if pipeline_class is None: raise ValueError( f"{model_type} is not supported in this pipeline." "For `Text2Image`, please use `AutoPipelineForText2Image.from_pretrained`, " "for `Image2Image` , please use `AutoPipelineForImage2Image.from_pretrained`, " "and `inpaint` is only supported in `AutoPipelineForInpainting.from_pretrained`" ) else: # Instantiate and return the pipeline with the loaded checkpoint and any additional kwargs return pipeline_class.from_single_file(pretrained_model_or_path, **kwargs) def get_keyword_types(keyword): r""" Determine the type and loading method for a given keyword. Parameters: keyword (`str`): The input keyword to classify. Returns: `dict`: A dictionary containing the model format, loading method, and various types and extra types flags. """ # Initialize the status dictionary with default values status = { "checkpoint_format": None, "loading_method": None, "type": { "other": False, "hf_url": False, "hf_repo": False, "civitai_url": False, "local": False, }, "extra_type": { "url": False, "missing_model_index": None, }, } # Check if the keyword is an HTTP or HTTPS URL status["extra_type"]["url"] = bool(re.search(r"^(https?)://", keyword)) # Check if the keyword is a file if os.path.isfile(keyword): status["type"]["local"] = True status["checkpoint_format"] = "single_file" status["loading_method"] = "from_single_file" # Check if the keyword is a directory elif os.path.isdir(keyword): status["type"]["local"] = True status["checkpoint_format"] = "diffusers" status["loading_method"] = "from_pretrained" if not os.path.exists(os.path.join(keyword, "model_index.json")): status["extra_type"]["missing_model_index"] = True # Check if the keyword is a Civitai URL elif keyword.startswith("https://civitai.com/"): status["type"]["civitai_url"] = True status["checkpoint_format"] = "single_file" status["loading_method"] = None # Check if the keyword starts with any valid URL prefixes elif any(keyword.startswith(prefix) for prefix in VALID_URL_PREFIXES): repo_id, weights_name = _extract_repo_id_and_weights_name(keyword) if weights_name: status["type"]["hf_url"] = True status["checkpoint_format"] = "single_file" status["loading_method"] = "from_single_file" else: status["type"]["hf_repo"] = True status["checkpoint_format"] = "diffusers" status["loading_method"] = "from_pretrained" # Check if the keyword matches a Hugging Face repository format elif re.match(r"^[^/]+/[^/]+$", keyword): status["type"]["hf_repo"] = True status["checkpoint_format"] = "diffusers" status["loading_method"] = "from_pretrained" # If none of the above apply else: status["type"]["other"] = True status["checkpoint_format"] = None status["loading_method"] = None return status def file_downloader( url, save_path, **kwargs, ) -> None: """ Downloads a file from a given URL and saves it to the specified path. parameters: url (`str`): The URL of the file to download. save_path (`str`): The local path where the file will be saved. resume (`bool`, *optional*, defaults to `False`): Whether to resume an incomplete download. headers (`dict`, *optional*, defaults to `None`): Dictionary of HTTP Headers to send with the request. proxies (`dict`, *optional*, defaults to `None`): Dictionary mapping protocol to the URL of the proxy passed to `requests.request`. force_download (`bool`, *optional*, defaults to `False`): Whether to force the download even if the file already exists. displayed_filename (`str`, *optional*): The filename of the file that is being downloaded. Value is used only to display a nice progress bar. If not set, the filename is guessed from the URL or the `Content-Disposition` header. returns: None """ # Get optional parameters from kwargs, with their default values resume = kwargs.pop("resume", False) headers = kwargs.pop("headers", None) proxies = kwargs.pop("proxies", None) force_download = kwargs.pop("force_download", False) displayed_filename = kwargs.pop("displayed_filename", None) # Default mode for file writing and initial file size mode = "wb" file_size = 0 # Create directory os.makedirs(os.path.dirname(save_path), exist_ok=True) # Check if the file already exists at the save path if os.path.exists(save_path): if not force_download: # If the file exists and force_download is False, skip the download logger.info(f"File already exists: {save_path}, skipping download.") return None elif resume: # If resuming, set mode to append binary and get current file size mode = "ab" file_size = os.path.getsize(save_path) # Open the file in the appropriate mode (write or append) with open(save_path, mode) as model_file: # Call the http_get function to perform the file download return http_get( url=url, temp_file=model_file, resume_size=file_size, displayed_filename=displayed_filename, headers=headers, proxies=proxies, **kwargs, ) def search_huggingface(search_word: str, **kwargs) -> Union[str, SearchResult, None]: r""" Downloads a model from Hugging Face. Parameters: search_word (`str`): The search query string. revision (`str`, *optional*): The specific version of the model to download. checkpoint_format (`str`, *optional*, defaults to `"single_file"`): The format of the model checkpoint. download (`bool`, *optional*, defaults to `False`): Whether to download the model. force_download (`bool`, *optional*, defaults to `False`): Whether to force the download if the model already exists. include_params (`bool`, *optional*, defaults to `False`): Whether to include parameters in the returned data. pipeline_tag (`str`, *optional*): Tag to filter models by pipeline. token (`str`, *optional*): API token for Hugging Face authentication. gated (`bool`, *optional*, defaults to `False` ): A boolean to filter models on the Hub that are gated or not. skip_error (`bool`, *optional*, defaults to `False`): Whether to skip errors and return None. Returns: `Union[str, SearchResult, None]`: The model path or SearchResult or None. """ # Extract additional parameters from kwargs revision = kwargs.pop("revision", None) checkpoint_format = kwargs.pop("checkpoint_format", "single_file") download = kwargs.pop("download", False) force_download = kwargs.pop("force_download", False) include_params = kwargs.pop("include_params", False) pipeline_tag = kwargs.pop("pipeline_tag", None) token = kwargs.pop("token", None) gated = kwargs.pop("gated", False) skip_error = kwargs.pop("skip_error", False) file_list = [] hf_repo_info = {} hf_security_info = {} model_path = "" repo_id, file_name = "", "" diffusers_model_exists = False # Get the type and loading method for the keyword search_word_status = get_keyword_types(search_word) if search_word_status["type"]["hf_repo"]: hf_repo_info = hf_api.model_info(repo_id=search_word, securityStatus=True) if download: model_path = DiffusionPipeline.download( search_word, revision=revision, token=token, force_download=force_download, **kwargs, ) else: model_path = search_word elif search_word_status["type"]["hf_url"]: repo_id, weights_name = _extract_repo_id_and_weights_name(search_word) if download: model_path = hf_hub_download( repo_id=repo_id, filename=weights_name, force_download=force_download, token=token, ) else: model_path = search_word elif search_word_status["type"]["local"]: model_path = search_word elif search_word_status["type"]["civitai_url"]: if skip_error: return None else: raise ValueError("The URL for Civitai is invalid with `for_hf`. Please use `for_civitai` instead.") else: # Get model data from HF API hf_models = hf_api.list_models( search=search_word, direction=-1, limit=100, fetch_config=True, pipeline_tag=pipeline_tag, full=True, gated=gated, token=token, ) model_dicts = [asdict(value) for value in list(hf_models)] # Loop through models to find a suitable candidate for repo_info in model_dicts: repo_id = repo_info["id"] file_list = [] hf_repo_info = hf_api.model_info(repo_id=repo_id, securityStatus=True) # Lists files with security issues. hf_security_info = hf_repo_info.security_repo_status exclusion = [issue["path"] for issue in hf_security_info["filesWithIssues"]] # Checks for multi-folder diffusers model or valid files (models with security issues are excluded). if hf_security_info["scansDone"]: for info in repo_info["siblings"]: file_path = info["rfilename"] if "model_index.json" == file_path and checkpoint_format in [ "diffusers", "all", ]: diffusers_model_exists = True break elif ( any(file_path.endswith(ext) for ext in EXTENSION) and not any(config in file_path for config in CONFIG_FILE_LIST) and not any(exc in file_path for exc in exclusion) and os.path.basename(os.path.dirname(file_path)) not in DIFFUSERS_CONFIG_DIR ): file_list.append(file_path) # Exit from the loop if a multi-folder diffusers model or valid file is found if diffusers_model_exists or file_list: break else: # Handle case where no models match the criteria if skip_error: return None else: raise ValueError("No models matching your criteria were found on huggingface.") if diffusers_model_exists: if download: model_path = DiffusionPipeline.download( repo_id, token=token, **kwargs, ) else: model_path = repo_id elif file_list: # Sort and find the safest model file_name = next( (model for model in sorted(file_list, reverse=True) if re.search(r"(?i)[-_](safe|sfw)", model)), file_list[0], ) if download: model_path = hf_hub_download( repo_id=repo_id, filename=file_name, revision=revision, token=token, force_download=force_download, ) # `pathlib.PosixPath` may be returned if model_path: model_path = str(model_path) if file_name: download_url = f"https://huggingface.co/{repo_id}/blob/main/{file_name}" else: download_url = f"https://huggingface.co/{repo_id}" output_info = get_keyword_types(model_path) if include_params: return SearchResult( model_path=model_path or download_url, loading_method=output_info["loading_method"], checkpoint_format=output_info["checkpoint_format"], repo_status=RepoStatus(repo_id=repo_id, repo_hash=hf_repo_info.sha, version=revision), model_status=ModelStatus( search_word=search_word, site_url=download_url, download_url=download_url, file_name=file_name, local=download, ), extra_status=ExtraStatus(trained_words=None), ) else: return model_path def search_civitai(search_word: str, **kwargs) -> Union[str, SearchResult, None]: r""" Downloads a model from Civitai. Parameters: search_word (`str`): The search query string. model_type (`str`, *optional*, defaults to `Checkpoint`): The type of model to search for. sort (`str`, *optional*): The order in which you wish to sort the results(for example, `Highest Rated`, `Most Downloaded`, `Newest`). base_model (`str`, *optional*): The base model to filter by. download (`bool`, *optional*, defaults to `False`): Whether to download the model. force_download (`bool`, *optional*, defaults to `False`): Whether to force the download if the model already exists. token (`str`, *optional*): API token for Civitai authentication. include_params (`bool`, *optional*, defaults to `False`): Whether to include parameters in the returned data. cache_dir (`str`, `Path`, *optional*): Path to the folder where cached files are stored. resume (`bool`, *optional*, defaults to `False`): Whether to resume an incomplete download. skip_error (`bool`, *optional*, defaults to `False`): Whether to skip errors and return None. Returns: `Union[str, SearchResult, None]`: The model path or ` SearchResult` or None. """ # Extract additional parameters from kwargs model_type = kwargs.pop("model_type", "Checkpoint") sort = kwargs.pop("sort", None) download = kwargs.pop("download", False) base_model = kwargs.pop("base_model", None) force_download = kwargs.pop("force_download", False) token = kwargs.pop("token", None) include_params = kwargs.pop("include_params", False) resume = kwargs.pop("resume", False) cache_dir = kwargs.pop("cache_dir", None) skip_error = kwargs.pop("skip_error", False) # Initialize additional variables with default values model_path = "" repo_name = "" repo_id = "" version_id = "" trainedWords = "" models_list = [] selected_repo = {} selected_model = {} selected_version = {} civitai_cache_dir = cache_dir or os.path.join(CACHE_HOME, "Civitai") # Set up parameters and headers for the CivitAI API request params = { "query": search_word, "types": model_type, "limit": 20, } if base_model is not None: if not isinstance(base_model, list): base_model = [base_model] params["baseModel"] = base_model if sort is not None: params["sort"] = sort headers = {} if token: headers["Authorization"] = f"Bearer {token}" try: # Make the request to the CivitAI API response = requests.get("https://civitai.com/api/v1/models", params=params, headers=headers) response.raise_for_status() except requests.exceptions.HTTPError as err: raise requests.HTTPError(f"Could not get elements from the URL: {err}") else: try: data = response.json() except AttributeError: if skip_error: return None else: raise ValueError("Invalid JSON response") # Sort repositories by download count in descending order sorted_repos = sorted(data["items"], key=lambda x: x["stats"]["downloadCount"], reverse=True) for selected_repo in sorted_repos: repo_name = selected_repo["name"] repo_id = selected_repo["id"] # Sort versions within the selected repo by download count sorted_versions = sorted( selected_repo["modelVersions"], key=lambda x: x["stats"]["downloadCount"], reverse=True, ) for selected_version in sorted_versions: version_id = selected_version["id"] trainedWords = selected_version["trainedWords"] models_list = [] # When searching for textual inversion, results other than the values entered for the base model may come up, so check again. if base_model is None or selected_version["baseModel"] in base_model: for model_data in selected_version["files"]: # Check if the file passes security scans and has a valid extension file_name = model_data["name"] if ( model_data["pickleScanResult"] == "Success" and model_data["virusScanResult"] == "Success" and any(file_name.endswith(ext) for ext in EXTENSION) and os.path.basename(os.path.dirname(file_name)) not in DIFFUSERS_CONFIG_DIR ): file_status = { "filename": file_name, "download_url": model_data["downloadUrl"], } models_list.append(file_status) if models_list: # Sort the models list by filename and find the safest model sorted_models = sorted(models_list, key=lambda x: x["filename"], reverse=True) selected_model = next( ( model_data for model_data in sorted_models if bool(re.search(r"(?i)[-_](safe|sfw)", model_data["filename"])) ), sorted_models[0], ) break else: continue break # Exception handling when search candidates are not found if not selected_model: if skip_error: return None else: raise ValueError("No model found. Please try changing the word you are searching for.") # Define model file status file_name = selected_model["filename"] download_url = selected_model["download_url"] # Handle file download and setting model information if download: # The path where the model is to be saved. model_path = os.path.join(str(civitai_cache_dir), str(repo_id), str(version_id), str(file_name)) # Download Model File file_downloader( url=download_url, save_path=model_path, resume=resume, force_download=force_download, displayed_filename=file_name, headers=headers, **kwargs, ) else: model_path = download_url output_info = get_keyword_types(model_path) if not include_params: return model_path else: return SearchResult( model_path=model_path, loading_method=output_info["loading_method"], checkpoint_format=output_info["checkpoint_format"], repo_status=RepoStatus(repo_id=repo_name, repo_hash=repo_id, version=version_id), model_status=ModelStatus( search_word=search_word, site_url=f"https://civitai.com/models/{repo_id}?modelVersionId={version_id}", download_url=download_url, file_name=file_name, local=output_info["type"]["local"], ), extra_status=ExtraStatus(trained_words=trainedWords or None), ) def add_methods(pipeline): r""" Add methods from `AutoConfig` to the pipeline. Parameters: pipeline (`Pipeline`): The pipeline to which the methods will be added. """ for attr_name in dir(AutoConfig): attr_value = getattr(AutoConfig, attr_name) if callable(attr_value) and not attr_name.startswith("__"): setattr(pipeline, attr_name, types.MethodType(attr_value, pipeline)) return pipeline class AutoConfig: def auto_load_textual_inversion( self, pretrained_model_name_or_path: Union[str, List[str]], token: Optional[Union[str, List[str]]] = None, base_model: Optional[Union[str, List[str]]] = None, tokenizer=None, text_encoder=None, **kwargs, ): r""" Load Textual Inversion embeddings into the text encoder of [`StableDiffusionPipeline`] (both 🤗 Diffusers and Automatic1111 formats are supported). Parameters: pretrained_model_name_or_path (`str` or `os.PathLike` or `List[str or os.PathLike]` or `Dict` or `List[Dict]`): Can be either one of the following or a list of them: - Search keywords for pretrained model (for example `EasyNegative`). - A string, the *model id* (for example `sd-concepts-library/low-poly-hd-logos-icons`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_text_inversion_directory/`) containing the textual inversion weights. - A path to a *file* (for example `./my_text_inversions.pt`) containing textual inversion weights. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). token (`str` or `List[str]`, *optional*): Override the token to use for the textual inversion weights. If `pretrained_model_name_or_path` is a list, then `token` must also be a list of equal length. text_encoder ([`~transformers.CLIPTextModel`], *optional*): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). If not specified, function will take self.tokenizer. tokenizer ([`~transformers.CLIPTokenizer`], *optional*): A `CLIPTokenizer` to tokenize text. If not specified, function will take self.tokenizer. weight_name (`str`, *optional*): Name of a custom weight file. This should be used when: - The saved textual inversion file is in 🤗 Diffusers format, but was saved under a specific weight name such as `text_inv.bin`. - The saved textual inversion file is in the Automatic1111 format. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. subfolder (`str`, *optional*, defaults to `""`): The subfolder location of a model file within a larger model repository on the Hub or locally. mirror (`str`, *optional*): Mirror source to resolve accessibility issues if you're downloading a model in China. We do not guarantee the timeliness or safety of the source, and you should refer to the mirror site for more information. Examples: ```py >>> from auto_diffusers import EasyPipelineForText2Image >>> pipeline = EasyPipelineForText2Image.from_huggingface("stable-diffusion-v1-5") >>> pipeline.auto_load_textual_inversion("EasyNegative", token="EasyNegative") >>> image = pipeline(prompt).images[0] ``` """ # 1. Set tokenizer and text encoder tokenizer = tokenizer or getattr(self, "tokenizer", None) text_encoder = text_encoder or getattr(self, "text_encoder", None) # Check if tokenizer and text encoder are provided if tokenizer is None or text_encoder is None: raise ValueError("Tokenizer and text encoder must be provided.") # 2. Normalize inputs pretrained_model_name_or_paths = ( [pretrained_model_name_or_path] if not isinstance(pretrained_model_name_or_path, list) else pretrained_model_name_or_path ) # 2.1 Normalize tokens tokens = [token] if not isinstance(token, list) else token if tokens[0] is None: tokens = tokens * len(pretrained_model_name_or_paths) for check_token in tokens: # Check if token is already in tokenizer vocabulary if check_token in tokenizer.get_vocab(): raise ValueError( f"Token {token} already in tokenizer vocabulary. Please choose a different token name or remove {token} and embedding from the tokenizer and text encoder." ) expected_shape = text_encoder.get_input_embeddings().weight.shape[-1] # Expected shape of tokenizer for search_word in pretrained_model_name_or_paths: if isinstance(search_word, str): # Update kwargs to ensure the model is downloaded and parameters are included _status = { "download": True, "include_params": True, "skip_error": False, "model_type": "TextualInversion", } # Get tags for the base model of textual inversion compatible with tokenizer. # If the tokenizer is 768-dimensional, set tags for SD 1.x and SDXL. # If the tokenizer is 1024-dimensional, set tags for SD 2.x. if expected_shape in TOKENIZER_SHAPE_MAP: # Retrieve the appropriate tags from the TOKENIZER_SHAPE_MAP based on the expected shape tags = TOKENIZER_SHAPE_MAP[expected_shape] if base_model is not None: if isinstance(base_model, list): tags.extend(base_model) else: tags.append(base_model) _status["base_model"] = tags kwargs.update(_status) # Search for the model on Civitai and get the model status textual_inversion_path = search_civitai(search_word, **kwargs) logger.warning( f"textual_inversion_path: {search_word} -> {textual_inversion_path.model_status.site_url}" ) pretrained_model_name_or_paths[pretrained_model_name_or_paths.index(search_word)] = ( textual_inversion_path.model_path ) self.load_textual_inversion( pretrained_model_name_or_paths, token=tokens, tokenizer=tokenizer, text_encoder=text_encoder, **kwargs ) def auto_load_lora_weights( self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name=None, **kwargs ): r""" Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and `self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is loaded into `self.unet`. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state dict is loaded into `self.text_encoder`. Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. kwargs (`dict`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. """ if isinstance(pretrained_model_name_or_path_or_dict, str): # Update kwargs to ensure the model is downloaded and parameters are included _status = { "download": True, "include_params": True, "skip_error": False, "model_type": "LORA", } kwargs.update(_status) # Search for the model on Civitai and get the model status lora_path = search_civitai(pretrained_model_name_or_path_or_dict, **kwargs) logger.warning(f"lora_path: {lora_path.model_status.site_url}") logger.warning(f"trained_words: {lora_path.extra_status.trained_words}") pretrained_model_name_or_path_or_dict = lora_path.model_path self.load_lora_weights(pretrained_model_name_or_path_or_dict, adapter_name=adapter_name, **kwargs) class EasyPipelineForText2Image(AutoPipelineForText2Image): r""" [`EasyPipelineForText2Image`] is a generic pipeline class that instantiates a text-to-image pipeline class. The specific underlying pipeline class is automatically selected from either the [`~EasyPipelineForText2Image.from_pretrained`], [`~EasyPipelineForText2Image.from_pipe`], [`~EasyPipelineForText2Image.from_huggingface`] or [`~EasyPipelineForText2Image.from_civitai`] methods. This class cannot be instantiated using `__init__()` (throws an error). Class attributes: - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the diffusion pipeline's components. """ config_name = "model_index.json" def __init__(self, *args, **kwargs): # EnvironmentError is returned super().__init__() @classmethod @validate_hf_hub_args def from_huggingface(cls, pretrained_model_link_or_path, **kwargs): r""" Parameters: pretrained_model_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A keyword to search for Hugging Face (for example `Stable Diffusion`) - Link to `.ckpt` or `.safetensors` file (for example `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.safetensors"`) on the Hub. - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline hosted on the Hub. - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights saved using [`~DiffusionPipeline.save_pretrained`]. checkpoint_format (`str`, *optional*, defaults to `"single_file"`): The format of the model checkpoint. pipeline_tag (`str`, *optional*): Tag to filter models by pipeline. torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the dtype is automatically derived from the model's weights. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. custom_revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id similar to `revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub. mirror (`str`, *optional*): Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not guarantee the timeliness or safety of the source, and you should refer to the mirror site for more information. device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn’t need to be defined for each parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the same device. Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier for the maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): The path to offload weights if device_map contains the value `"disk"`. offload_state_dict (`bool`, *optional*): If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` when there is some disk offload. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. use_safetensors (`bool`, *optional*, defaults to `None`): If set to `None`, the safetensors weights are downloaded if they're available **and** if the safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors weights. If set to `False`, safetensors weights are not loaded. gated (`bool`, *optional*, defaults to `False` ): A boolean to filter models on the Hub that are gated or not. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline class). The overwritten components are passed directly to the pipelines `__init__` method. See example below for more information. variant (`str`, *optional*): Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when loading `from_flax`. <Tip> To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `hf auth login`. </Tip> Examples: ```py >>> from auto_diffusers import EasyPipelineForText2Image >>> pipeline = EasyPipelineForText2Image.from_huggingface("stable-diffusion-v1-5") >>> image = pipeline(prompt).images[0] ``` """ # Update kwargs to ensure the model is downloaded and parameters are included _status = { "download": True, "include_params": True, "skip_error": False, "pipeline_tag": "text-to-image", } kwargs.update(_status) # Search for the model on Hugging Face and get the model status hf_checkpoint_status = search_huggingface(pretrained_model_link_or_path, **kwargs) logger.warning(f"checkpoint_path: {hf_checkpoint_status.model_status.download_url}") checkpoint_path = hf_checkpoint_status.model_path # Check the format of the model checkpoint if hf_checkpoint_status.loading_method == "from_single_file": # Load the pipeline from a single file checkpoint pipeline = load_pipeline_from_single_file( pretrained_model_or_path=checkpoint_path, pipeline_mapping=SINGLE_FILE_CHECKPOINT_TEXT2IMAGE_PIPELINE_MAPPING, **kwargs, ) else: pipeline = cls.from_pretrained(checkpoint_path, **kwargs) return add_methods(pipeline) @classmethod def from_civitai(cls, pretrained_model_link_or_path, **kwargs): r""" Parameters: pretrained_model_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A keyword to search for Hugging Face (for example `Stable Diffusion`) - Link to `.ckpt` or `.safetensors` file (for example `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.safetensors"`) on the Hub. - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline hosted on the Hub. - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights saved using [`~DiffusionPipeline.save_pretrained`]. model_type (`str`, *optional*, defaults to `Checkpoint`): The type of model to search for. (for example `Checkpoint`, `TextualInversion`, `LORA`, `Controlnet`) base_model (`str`, *optional*): The base model to filter by. cache_dir (`str`, `Path`, *optional*): Path to the folder where cached files are stored. resume (`bool`, *optional*, defaults to `False`): Whether to resume an incomplete download. torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the dtype is automatically derived from the model's weights. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str`, *optional*): The token to use as HTTP bearer authorization for remote files. device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn’t need to be defined for each parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the same device. Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier for the maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): The path to offload weights if device_map contains the value `"disk"`. offload_state_dict (`bool`, *optional*): If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` when there is some disk offload. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. use_safetensors (`bool`, *optional*, defaults to `None`): If set to `None`, the safetensors weights are downloaded if they're available **and** if the safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors weights. If set to `False`, safetensors weights are not loaded. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline class). The overwritten components are passed directly to the pipelines `__init__` method. See example below for more information. <Tip> To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `hf auth login`. </Tip> Examples: ```py >>> from auto_diffusers import EasyPipelineForText2Image >>> pipeline = EasyPipelineForText2Image.from_huggingface("stable-diffusion-v1-5") >>> image = pipeline(prompt).images[0] ``` """ # Update kwargs to ensure the model is downloaded and parameters are included _status = { "download": True, "include_params": True, "skip_error": False, "model_type": "Checkpoint", } kwargs.update(_status) # Search for the model on Civitai and get the model status checkpoint_status = search_civitai(pretrained_model_link_or_path, **kwargs) logger.warning(f"checkpoint_path: {checkpoint_status.model_status.site_url}") checkpoint_path = checkpoint_status.model_path # Load the pipeline from a single file checkpoint pipeline = load_pipeline_from_single_file( pretrained_model_or_path=checkpoint_path, pipeline_mapping=SINGLE_FILE_CHECKPOINT_TEXT2IMAGE_PIPELINE_MAPPING, **kwargs, ) return add_methods(pipeline) class EasyPipelineForImage2Image(AutoPipelineForImage2Image): r""" [`EasyPipelineForImage2Image`] is a generic pipeline class that instantiates an image-to-image pipeline class. The specific underlying pipeline class is automatically selected from either the [`~EasyPipelineForImage2Image.from_pretrained`], [`~EasyPipelineForImage2Image.from_pipe`], [`~EasyPipelineForImage2Image.from_huggingface`] or [`~EasyPipelineForImage2Image.from_civitai`] methods. This class cannot be instantiated using `__init__()` (throws an error). Class attributes: - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the diffusion pipeline's components. """ config_name = "model_index.json" def __init__(self, *args, **kwargs): # EnvironmentError is returned super().__init__() @classmethod @validate_hf_hub_args def from_huggingface(cls, pretrained_model_link_or_path, **kwargs): r""" Parameters: pretrained_model_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A keyword to search for Hugging Face (for example `Stable Diffusion`) - Link to `.ckpt` or `.safetensors` file (for example `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.safetensors"`) on the Hub. - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline hosted on the Hub. - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights saved using [`~DiffusionPipeline.save_pretrained`]. checkpoint_format (`str`, *optional*, defaults to `"single_file"`): The format of the model checkpoint. pipeline_tag (`str`, *optional*): Tag to filter models by pipeline. torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the dtype is automatically derived from the model's weights. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. custom_revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id similar to `revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub. mirror (`str`, *optional*): Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not guarantee the timeliness or safety of the source, and you should refer to the mirror site for more information. device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn’t need to be defined for each parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the same device. Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier for the maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): The path to offload weights if device_map contains the value `"disk"`. offload_state_dict (`bool`, *optional*): If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` when there is some disk offload. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. use_safetensors (`bool`, *optional*, defaults to `None`): If set to `None`, the safetensors weights are downloaded if they're available **and** if the safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors weights. If set to `False`, safetensors weights are not loaded. gated (`bool`, *optional*, defaults to `False` ): A boolean to filter models on the Hub that are gated or not. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline class). The overwritten components are passed directly to the pipelines `__init__` method. See example below for more information. variant (`str`, *optional*): Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when loading `from_flax`. <Tip> To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `hf auth login`. </Tip> Examples: ```py >>> from auto_diffusers import EasyPipelineForImage2Image >>> pipeline = EasyPipelineForImage2Image.from_huggingface("stable-diffusion-v1-5") >>> image = pipeline(prompt, image).images[0] ``` """ # Update kwargs to ensure the model is downloaded and parameters are included _parmas = { "download": True, "include_params": True, "skip_error": False, "pipeline_tag": "image-to-image", } kwargs.update(_parmas) # Search for the model on Hugging Face and get the model status hf_checkpoint_status = search_huggingface(pretrained_model_link_or_path, **kwargs) logger.warning(f"checkpoint_path: {hf_checkpoint_status.model_status.download_url}") checkpoint_path = hf_checkpoint_status.model_path # Check the format of the model checkpoint if hf_checkpoint_status.loading_method == "from_single_file": # Load the pipeline from a single file checkpoint pipeline = load_pipeline_from_single_file( pretrained_model_or_path=checkpoint_path, pipeline_mapping=SINGLE_FILE_CHECKPOINT_IMAGE2IMAGE_PIPELINE_MAPPING, **kwargs, ) else: pipeline = cls.from_pretrained(checkpoint_path, **kwargs) return add_methods(pipeline) @classmethod def from_civitai(cls, pretrained_model_link_or_path, **kwargs): r""" Parameters: pretrained_model_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A keyword to search for Hugging Face (for example `Stable Diffusion`) - Link to `.ckpt` or `.safetensors` file (for example `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.safetensors"`) on the Hub. - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline hosted on the Hub. - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights saved using [`~DiffusionPipeline.save_pretrained`]. model_type (`str`, *optional*, defaults to `Checkpoint`): The type of model to search for. (for example `Checkpoint`, `TextualInversion`, `LORA`, `Controlnet`) base_model (`str`, *optional*): The base model to filter by. cache_dir (`str`, `Path`, *optional*): Path to the folder where cached files are stored. resume (`bool`, *optional*, defaults to `False`): Whether to resume an incomplete download. torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the dtype is automatically derived from the model's weights. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str`, *optional*): The token to use as HTTP bearer authorization for remote files. device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn’t need to be defined for each parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the same device. Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier for the maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): The path to offload weights if device_map contains the value `"disk"`. offload_state_dict (`bool`, *optional*): If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` when there is some disk offload. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. use_safetensors (`bool`, *optional*, defaults to `None`): If set to `None`, the safetensors weights are downloaded if they're available **and** if the safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors weights. If set to `False`, safetensors weights are not loaded. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline class). The overwritten components are passed directly to the pipelines `__init__` method. See example below for more information. <Tip> To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `hf auth login`. </Tip> Examples: ```py >>> from auto_diffusers import EasyPipelineForImage2Image >>> pipeline = EasyPipelineForImage2Image.from_huggingface("stable-diffusion-v1-5") >>> image = pipeline(prompt, image).images[0] ``` """ # Update kwargs to ensure the model is downloaded and parameters are included _status = { "download": True, "include_params": True, "skip_error": False, "model_type": "Checkpoint", } kwargs.update(_status) # Search for the model on Civitai and get the model status checkpoint_status = search_civitai(pretrained_model_link_or_path, **kwargs) logger.warning(f"checkpoint_path: {checkpoint_status.model_status.site_url}") checkpoint_path = checkpoint_status.model_path # Load the pipeline from a single file checkpoint pipeline = load_pipeline_from_single_file( pretrained_model_or_path=checkpoint_path, pipeline_mapping=SINGLE_FILE_CHECKPOINT_IMAGE2IMAGE_PIPELINE_MAPPING, **kwargs, ) return add_methods(pipeline) class EasyPipelineForInpainting(AutoPipelineForInpainting): r""" [`EasyPipelineForInpainting`] is a generic pipeline class that instantiates an inpainting pipeline class. The specific underlying pipeline class is automatically selected from either the [`~EasyPipelineForInpainting.from_pretrained`], [`~EasyPipelineForInpainting.from_pipe`], [`~EasyPipelineForInpainting.from_huggingface`] or [`~EasyPipelineForInpainting.from_civitai`] methods. This class cannot be instantiated using `__init__()` (throws an error). Class attributes: - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the diffusion pipeline's components. """ config_name = "model_index.json" def __init__(self, *args, **kwargs): # EnvironmentError is returned super().__init__() @classmethod @validate_hf_hub_args def from_huggingface(cls, pretrained_model_link_or_path, **kwargs): r""" Parameters: pretrained_model_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A keyword to search for Hugging Face (for example `Stable Diffusion`) - Link to `.ckpt` or `.safetensors` file (for example `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.safetensors"`) on the Hub. - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline hosted on the Hub. - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights saved using [`~DiffusionPipeline.save_pretrained`]. checkpoint_format (`str`, *optional*, defaults to `"single_file"`): The format of the model checkpoint. pipeline_tag (`str`, *optional*): Tag to filter models by pipeline. torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the dtype is automatically derived from the model's weights. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. custom_revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id similar to `revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub. mirror (`str`, *optional*): Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not guarantee the timeliness or safety of the source, and you should refer to the mirror site for more information. device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn’t need to be defined for each parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the same device. Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier for the maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): The path to offload weights if device_map contains the value `"disk"`. offload_state_dict (`bool`, *optional*): If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` when there is some disk offload. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. use_safetensors (`bool`, *optional*, defaults to `None`): If set to `None`, the safetensors weights are downloaded if they're available **and** if the safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors weights. If set to `False`, safetensors weights are not loaded. gated (`bool`, *optional*, defaults to `False` ): A boolean to filter models on the Hub that are gated or not. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline class). The overwritten components are passed directly to the pipelines `__init__` method. See example below for more information. variant (`str`, *optional*): Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when loading `from_flax`. <Tip> To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `hf auth login </Tip> Examples: ```py >>> from auto_diffusers import EasyPipelineForInpainting >>> pipeline = EasyPipelineForInpainting.from_huggingface("stable-diffusion-2-inpainting") >>> image = pipeline(prompt, image=init_image, mask_image=mask_image).images[0] ``` """ # Update kwargs to ensure the model is downloaded and parameters are included _status = { "download": True, "include_params": True, "skip_error": False, "pipeline_tag": "image-to-image", } kwargs.update(_status) # Search for the model on Hugging Face and get the model status hf_checkpoint_status = search_huggingface(pretrained_model_link_or_path, **kwargs) logger.warning(f"checkpoint_path: {hf_checkpoint_status.model_status.download_url}") checkpoint_path = hf_checkpoint_status.model_path # Check the format of the model checkpoint if hf_checkpoint_status.loading_method == "from_single_file": # Load the pipeline from a single file checkpoint pipeline = load_pipeline_from_single_file( pretrained_model_or_path=checkpoint_path, pipeline_mapping=SINGLE_FILE_CHECKPOINT_INPAINT_PIPELINE_MAPPING, **kwargs, ) else: pipeline = cls.from_pretrained(checkpoint_path, **kwargs) return add_methods(pipeline) @classmethod def from_civitai(cls, pretrained_model_link_or_path, **kwargs): r""" Parameters: pretrained_model_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A keyword to search for Hugging Face (for example `Stable Diffusion`) - Link to `.ckpt` or `.safetensors` file (for example `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.safetensors"`) on the Hub. - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline hosted on the Hub. - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights saved using [`~DiffusionPipeline.save_pretrained`]. model_type (`str`, *optional*, defaults to `Checkpoint`): The type of model to search for. (for example `Checkpoint`, `TextualInversion`, `LORA`, `Controlnet`) base_model (`str`, *optional*): The base model to filter by. cache_dir (`str`, `Path`, *optional*): Path to the folder where cached files are stored. resume (`bool`, *optional*, defaults to `False`): Whether to resume an incomplete download. torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the dtype is automatically derived from the model's weights. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str`, *optional*): The token to use as HTTP bearer authorization for remote files. device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn’t need to be defined for each parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the same device. Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier for the maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): The path to offload weights if device_map contains the value `"disk"`. offload_state_dict (`bool`, *optional*): If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` when there is some disk offload. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. use_safetensors (`bool`, *optional*, defaults to `None`): If set to `None`, the safetensors weights are downloaded if they're available **and** if the safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors weights. If set to `False`, safetensors weights are not loaded. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline class). The overwritten components are passed directly to the pipelines `__init__` method. See example below for more information. <Tip> To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `hf auth login </Tip> Examples: ```py >>> from auto_diffusers import EasyPipelineForInpainting >>> pipeline = EasyPipelineForInpainting.from_huggingface("stable-diffusion-2-inpainting") >>> image = pipeline(prompt, image=init_image, mask_image=mask_image).images[0] ``` """ # Update kwargs to ensure the model is downloaded and parameters are included _status = { "download": True, "include_params": True, "skip_error": False, "model_type": "Checkpoint", } kwargs.update(_status) # Search for the model on Civitai and get the model status checkpoint_status = search_civitai(pretrained_model_link_or_path, **kwargs) logger.warning(f"checkpoint_path: {checkpoint_status.model_status.site_url}") checkpoint_path = checkpoint_status.model_path # Load the pipeline from a single file checkpoint pipeline = load_pipeline_from_single_file( pretrained_model_or_path=checkpoint_path, pipeline_mapping=SINGLE_FILE_CHECKPOINT_INPAINT_PIPELINE_MAPPING, **kwargs, ) return add_methods(pipeline)
diffusers/examples/model_search/pipeline_easy.py/0
{ "file_path": "diffusers/examples/model_search/pipeline_easy.py", "repo_id": "diffusers", "token_count": 39415 }
144
# AutoencoderKL training example ## Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then cd in the example folder and run ```bash pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` ## Training on CIFAR10 Please replace the validation image with your own image. ```bash accelerate launch train_autoencoderkl.py \ --pretrained_model_name_or_path stabilityai/sd-vae-ft-mse \ --dataset_name=cifar10 \ --image_column=img \ --validation_image images/bird.jpg images/car.jpg images/dog.jpg images/frog.jpg \ --num_train_epochs 100 \ --gradient_accumulation_steps 2 \ --learning_rate 4.5e-6 \ --lr_scheduler cosine \ --report_to wandb \ ``` ## Training on ImageNet ```bash accelerate launch train_autoencoderkl.py \ --pretrained_model_name_or_path stabilityai/sd-vae-ft-mse \ --num_train_epochs 100 \ --gradient_accumulation_steps 2 \ --learning_rate 4.5e-6 \ --lr_scheduler cosine \ --report_to wandb \ --mixed_precision bf16 \ --train_data_dir /path/to/ImageNet/train \ --validation_image ./image.png \ --decoder_only ```
diffusers/examples/research_projects/autoencoderkl/README.md/0
{ "file_path": "diffusers/examples/research_projects/autoencoderkl/README.md", "repo_id": "diffusers", "token_count": 580 }
145
<jupyter_start><jupyter_code>%load_ext autoreload %autoreload 2 from diffusers import StableDiffusionGLIGENPipeline from transformers import CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, DDPMScheduler, EulerDiscreteScheduler, UNet2DConditionModel, ) # pretrained_model_name_or_path = 'masterful/gligen-1-4-generation-text-box' pretrained_model_name_or_path = "/root/data/zhizhonghuang/checkpoints/models--masterful--gligen-1-4-generation-text-box/snapshots/d2820dc1e9ba6ca082051ce79cfd3eb468ae2c83" tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_name_or_path, subfolder="tokenizer") noise_scheduler = DDPMScheduler.from_pretrained(pretrained_model_name_or_path, subfolder="scheduler") text_encoder = CLIPTextModel.from_pretrained(pretrained_model_name_or_path, subfolder="text_encoder") vae = AutoencoderKL.from_pretrained(pretrained_model_name_or_path, subfolder="vae") # unet = UNet2DConditionModel.from_pretrained( # pretrained_model_name_or_path, subfolder="unet" # ) noise_scheduler = EulerDiscreteScheduler.from_config(noise_scheduler.config) unet = UNet2DConditionModel.from_pretrained("/root/data/zhizhonghuang/ckpt/GLIGEN_Text_Retrain_COCO") pipe = StableDiffusionGLIGENPipeline( vae, text_encoder, tokenizer, unet, noise_scheduler, safety_checker=None, feature_extractor=None, ) pipe = pipe.to("cuda") import numpy as np # prompt = 'A realistic image of landscape scene depicting a green car parking on the left of a blue truck, with a red air balloon and a bird in the sky' # gen_boxes = [('a green car', [21, 281, 211, 159]), ('a blue truck', [269, 283, 209, 160]), ('a red air balloon', [66, 8, 145, 135]), ('a bird', [296, 42, 143, 100])] # prompt = 'A realistic top-down view of a wooden table with two apples on it' # gen_boxes = [('a wooden table', [20, 148, 472, 216]), ('an apple', [150, 226, 100, 100]), ('an apple', [280, 226, 100, 100])] # prompt = 'A realistic scene of three skiers standing in a line on the snow near a palm tree' # gen_boxes = [('a skier', [5, 152, 139, 168]), ('a skier', [278, 192, 121, 158]), ('a skier', [148, 173, 124, 155]), ('a palm tree', [404, 105, 103, 251])] prompt = "An oil painting of a pink dolphin jumping on the left of a steam boat on the sea" gen_boxes = [("a steam boat", [232, 225, 257, 149]), ("a jumping pink dolphin", [21, 249, 189, 123])] boxes = np.array([x[1] for x in gen_boxes]) boxes = boxes / 512 boxes[:, 2] = boxes[:, 0] + boxes[:, 2] boxes[:, 3] = boxes[:, 1] + boxes[:, 3] boxes = boxes.tolist() gligen_phrases = [x[0] for x in gen_boxes] images = pipe( prompt=prompt, gligen_phrases=gligen_phrases, gligen_boxes=boxes, gligen_scheduled_sampling_beta=1.0, output_type="pil", num_inference_steps=50, negative_prompt="artifacts, blurry, smooth texture, bad quality, distortions, unrealistic, distorted image, bad proportions, duplicate", num_images_per_prompt=16, ).images diffusers.utils.make_image_grid(images, 4, len(images) // 4)<jupyter_output><empty_output>
diffusers/examples/research_projects/gligen/demo.ipynb/0
{ "file_path": "diffusers/examples/research_projects/gligen/demo.ipynb", "repo_id": "diffusers", "token_count": 1156 }
146
# IP Adapter Training Example [IP Adapter](https://huggingface.co/papers/2308.06721) is a novel approach designed to enhance text-to-image models such as Stable Diffusion by enabling them to generate images based on image prompts rather than text prompts alone. Unlike traditional methods that rely solely on complex text prompts, IP Adapter introduces the concept of using image prompts, leveraging the idea that "an image is worth a thousand words." By decoupling cross-attention layers for text and image features, IP Adapter effectively integrates image prompts into the generation process without the need for extensive fine-tuning or large computing resources. ## Training locally with PyTorch ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` Then cd in the example folder and run ```bash pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Or for a default accelerate configuration without answering questions about your environment ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell e.g. a notebook ```python from accelerate.utils import write_basic_config write_basic_config() ``` Certainly! Below is the documentation in pure Markdown format: ### Accelerate Launch Command Documentation #### Description: The Accelerate launch command is used to train a model using multiple GPUs and mixed precision training. It launches the training script `tutorial_train_ip-adapter.py` with specified parameters and configurations. #### Usage Example: ``` accelerate launch --mixed_precision "fp16" \ tutorial_train_ip-adapter.py \ --pretrained_model_name_or_path="runwayml/stable-diffusion-v1-5/" \ --image_encoder_path="{image_encoder_path}" \ --data_json_file="{data.json}" \ --data_root_path="{image_path}" \ --mixed_precision="fp16" \ --resolution=512 \ --train_batch_size=8 \ --dataloader_num_workers=4 \ --learning_rate=1e-04 \ --weight_decay=0.01 \ --output_dir="{output_dir}" \ --save_steps=10000 ``` ### Multi-GPU Script: ``` accelerate launch --num_processes 8 --multi_gpu --mixed_precision "fp16" \ tutorial_train_ip-adapter.py \ --pretrained_model_name_or_path="runwayml/stable-diffusion-v1-5/" \ --image_encoder_path="{image_encoder_path}" \ --data_json_file="{data.json}" \ --data_root_path="{image_path}" \ --mixed_precision="fp16" \ --resolution=512 \ --train_batch_size=8 \ --dataloader_num_workers=4 \ --learning_rate=1e-04 \ --weight_decay=0.01 \ --output_dir="{output_dir}" \ --save_steps=10000 ``` #### Parameters: - `--num_processes`: Number of processes to launch for distributed training (in this example, 8 processes). - `--multi_gpu`: Flag indicating the usage of multiple GPUs for training. - `--mixed_precision "fp16"`: Enables mixed precision training with 16-bit floating-point precision. - `tutorial_train_ip-adapter.py`: Name of the training script to be executed. - `--pretrained_model_name_or_path`: Path or identifier for a pretrained model. - `--image_encoder_path`: Path to the CLIP image encoder. - `--data_json_file`: Path to the training data in JSON format. - `--data_root_path`: Root path where training images are located. - `--resolution`: Resolution of input images (512x512 in this example). - `--train_batch_size`: Batch size for training data (8 in this example). - `--dataloader_num_workers`: Number of subprocesses for data loading (4 in this example). - `--learning_rate`: Learning rate for training (1e-04 in this example). - `--weight_decay`: Weight decay for regularization (0.01 in this example). - `--output_dir`: Directory to save model checkpoints and predictions. - `--save_steps`: Frequency of saving checkpoints during training (10000 in this example). ### Inference #### Description: The provided inference code is used to load a trained model checkpoint and extract the components related to image projection and IP (Image Processing) adapter. These components are then saved into a binary file for later use in inference. #### Usage Example: ```python from safetensors.torch import load_file, save_file # Load the trained model checkpoint in safetensors format ckpt = "checkpoint-50000/pytorch_model.safetensors" sd = load_file(ckpt) # Using safetensors load function # Extract image projection and IP adapter components image_proj_sd = {} ip_sd = {} for k in sd: if k.startswith("unet"): pass # Skip unet-related keys elif k.startswith("image_proj_model"): image_proj_sd[k.replace("image_proj_model.", "")] = sd[k] elif k.startswith("adapter_modules"): ip_sd[k.replace("adapter_modules.", "")] = sd[k] # Save the components into separate safetensors files save_file(image_proj_sd, "image_proj.safetensors") save_file(ip_sd, "ip_adapter.safetensors") ``` ### Sample Inference Script using the CLIP Model ```python import torch from safetensors.torch import load_file from transformers import CLIPProcessor, CLIPModel # Using the Hugging Face CLIP model # Load model components from safetensors image_proj_ckpt = "image_proj.safetensors" ip_adapter_ckpt = "ip_adapter.safetensors" # Load the saved weights image_proj_sd = load_file(image_proj_ckpt) ip_adapter_sd = load_file(ip_adapter_ckpt) # Define the model Parameters class ImageProjectionModel(torch.nn.Module): def __init__(self, input_dim=768, output_dim=512): # CLIP's default embedding size is 768 super().__init__() self.model = torch.nn.Linear(input_dim, output_dim) def forward(self, x): return self.model(x) class IPAdapterModel(torch.nn.Module): def __init__(self, input_dim=512, output_dim=10): # Example for 10 classes super().__init__() self.model = torch.nn.Linear(input_dim, output_dim) def forward(self, x): return self.model(x) # Initialize models image_proj_model = ImageProjectionModel() ip_adapter_model = IPAdapterModel() # Load weights into models image_proj_model.load_state_dict(image_proj_sd) ip_adapter_model.load_state_dict(ip_adapter_sd) # Set models to evaluation mode image_proj_model.eval() ip_adapter_model.eval() #Inference pipeline def inference(image_tensor): """ Run inference using the loaded models. Args: image_tensor: Preprocessed image tensor from CLIPProcessor Returns: Final inference results """ with torch.no_grad(): # Step 1: Project the image features image_proj = image_proj_model(image_tensor) # Step 2: Pass the projected features through the IP Adapter result = ip_adapter_model(image_proj) return result # Using CLIP for image preprocessing processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") #Image file path image_path = "path/to/image.jpg" # Preprocess the image inputs = processor(images=image_path, return_tensors="pt") image_features = clip_model.get_image_features(inputs["pixel_values"]) # Normalize the image features as per CLIP's recommendations image_features = image_features / image_features.norm(dim=-1, keepdim=True) # Run inference output = inference(image_features) print("Inference output:", output) ``` #### Parameters: - `ckpt`: Path to the trained model checkpoint file. - `map_location="cpu"`: Specifies that the model should be loaded onto the CPU. - `image_proj_sd`: Dictionary to store the components related to image projection. - `ip_sd`: Dictionary to store the components related to the IP adapter. - `"unet"`, `"image_proj_model"`, `"adapter_modules"`: Prefixes indicating components of the model.
diffusers/examples/research_projects/ip_adapter/README.md/0
{ "file_path": "diffusers/examples/research_projects/ip_adapter/README.md", "repo_id": "diffusers", "token_count": 2605 }
147
""" The main idea for this code is to provide a way for users to not need to bother with the hassle of multiple tokens for a concept by typing a photo of <concept>_0 <concept>_1 ... and so on and instead just do a photo of <concept> which gets translated to the above. This needs to work for both inference and training. For inference, the tokenizer encodes the text. So, we would want logic for our tokenizer to replace the placeholder token with it's underlying vectors For training, we would want to abstract away some logic like 1. Adding tokens 2. Updating gradient mask 3. Saving embeddings to our Util class here. so TODO: 1. have tokenizer keep track of concept, multiconcept pairs and replace during encode call x 2. have mechanism for adding tokens x 3. have mech for saving emebeddings x 4. get mask to update x 5. Loading tokens from embedding x 6. Integrate to training x 7. Test """ import copy import random from transformers import CLIPTokenizer class MultiTokenCLIPTokenizer(CLIPTokenizer): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.token_map = {} def try_adding_tokens(self, placeholder_token, *args, **kwargs): num_added_tokens = super().add_tokens(placeholder_token, *args, **kwargs) if num_added_tokens == 0: raise ValueError( f"The tokenizer already contains the token {placeholder_token}. Please pass a different" " `placeholder_token` that is not already in the tokenizer." ) def add_placeholder_tokens(self, placeholder_token, *args, num_vec_per_token=1, **kwargs): output = [] if num_vec_per_token == 1: self.try_adding_tokens(placeholder_token, *args, **kwargs) output.append(placeholder_token) else: output = [] for i in range(num_vec_per_token): ith_token = placeholder_token + f"_{i}" self.try_adding_tokens(ith_token, *args, **kwargs) output.append(ith_token) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( f"The tokenizer already has placeholder token {token} that can get confused with" f" {placeholder_token}keep placeholder tokens independent" ) self.token_map[placeholder_token] = output def replace_placeholder_tokens_in_text(self, text, vector_shuffle=False, prop_tokens_to_load=1.0): """ Here, we replace the placeholder tokens in text recorded in token_map so that the text_encoder can encode them vector_shuffle was inspired by https://github.com/rinongal/textual_inversion/pull/119 where shuffling tokens were found to force the model to learn the concepts more descriptively. """ if isinstance(text, list): output = [] for i in range(len(text)): output.append(self.replace_placeholder_tokens_in_text(text[i], vector_shuffle=vector_shuffle)) return output for placeholder_token in self.token_map: if placeholder_token in text: tokens = self.token_map[placeholder_token] tokens = tokens[: 1 + int(len(tokens) * prop_tokens_to_load)] if vector_shuffle: tokens = copy.copy(tokens) random.shuffle(tokens) text = text.replace(placeholder_token, " ".join(tokens)) return text def __call__(self, text, *args, vector_shuffle=False, prop_tokens_to_load=1.0, **kwargs): return super().__call__( self.replace_placeholder_tokens_in_text( text, vector_shuffle=vector_shuffle, prop_tokens_to_load=prop_tokens_to_load ), *args, **kwargs, ) def encode(self, text, *args, vector_shuffle=False, prop_tokens_to_load=1.0, **kwargs): return super().encode( self.replace_placeholder_tokens_in_text( text, vector_shuffle=vector_shuffle, prop_tokens_to_load=prop_tokens_to_load ), *args, **kwargs, )
diffusers/examples/research_projects/multi_token_textual_inversion/multi_token_clip.py/0
{ "file_path": "diffusers/examples/research_projects/multi_token_textual_inversion/multi_token_clip.py", "repo_id": "diffusers", "token_count": 1828 }
148
from typing import Any, Dict, Optional import torch from torch import nn from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.models import PixArtTransformer2DModel from diffusers.models.attention import BasicTransformerBlock from diffusers.models.modeling_outputs import Transformer2DModelOutput from diffusers.models.modeling_utils import ModelMixin class PixArtControlNetAdapterBlock(nn.Module): def __init__( self, block_index, # taken from PixArtTransformer2DModel num_attention_heads: int = 16, attention_head_dim: int = 72, dropout: float = 0.0, cross_attention_dim: Optional[int] = 1152, attention_bias: bool = True, activation_fn: str = "gelu-approximate", num_embeds_ada_norm: Optional[int] = 1000, upcast_attention: bool = False, norm_type: str = "ada_norm_single", norm_elementwise_affine: bool = False, norm_eps: float = 1e-6, attention_type: Optional[str] = "default", ): super().__init__() self.block_index = block_index self.inner_dim = num_attention_heads * attention_head_dim # the first block has a zero before layer if self.block_index == 0: self.before_proj = nn.Linear(self.inner_dim, self.inner_dim) nn.init.zeros_(self.before_proj.weight) nn.init.zeros_(self.before_proj.bias) self.transformer_block = BasicTransformerBlock( self.inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, cross_attention_dim=cross_attention_dim, activation_fn=activation_fn, num_embeds_ada_norm=num_embeds_ada_norm, attention_bias=attention_bias, upcast_attention=upcast_attention, norm_type=norm_type, norm_elementwise_affine=norm_elementwise_affine, norm_eps=norm_eps, attention_type=attention_type, ) self.after_proj = nn.Linear(self.inner_dim, self.inner_dim) nn.init.zeros_(self.after_proj.weight) nn.init.zeros_(self.after_proj.bias) def train(self, mode: bool = True): self.transformer_block.train(mode) if self.block_index == 0: self.before_proj.train(mode) self.after_proj.train(mode) def forward( self, hidden_states: torch.Tensor, controlnet_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, timestep: Optional[torch.LongTensor] = None, added_cond_kwargs: Dict[str, torch.Tensor] = None, cross_attention_kwargs: Dict[str, Any] = None, attention_mask: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, ): if self.block_index == 0: controlnet_states = self.before_proj(controlnet_states) controlnet_states = hidden_states + controlnet_states controlnet_states_down = self.transformer_block( hidden_states=controlnet_states, encoder_hidden_states=encoder_hidden_states, timestep=timestep, added_cond_kwargs=added_cond_kwargs, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, class_labels=None, ) controlnet_states_left = self.after_proj(controlnet_states_down) return controlnet_states_left, controlnet_states_down class PixArtControlNetAdapterModel(ModelMixin, ConfigMixin): # N=13, as specified in the paper https://arxiv.org/html/2401.05252v1/#S4 ControlNet-Transformer @register_to_config def __init__(self, num_layers=13) -> None: super().__init__() self.num_layers = num_layers self.controlnet_blocks = nn.ModuleList( [PixArtControlNetAdapterBlock(block_index=i) for i in range(num_layers)] ) @classmethod def from_transformer(cls, transformer: PixArtTransformer2DModel): control_net = PixArtControlNetAdapterModel() # copied the specified number of blocks from the transformer for depth in range(control_net.num_layers): control_net.controlnet_blocks[depth].transformer_block.load_state_dict( transformer.transformer_blocks[depth].state_dict() ) return control_net def train(self, mode: bool = True): for block in self.controlnet_blocks: block.train(mode) class PixArtControlNetTransformerModel(ModelMixin, ConfigMixin): def __init__( self, transformer: PixArtTransformer2DModel, controlnet: PixArtControlNetAdapterModel, blocks_num=13, init_from_transformer=False, training=False, ): super().__init__() self.blocks_num = blocks_num self.gradient_checkpointing = False self.register_to_config(**transformer.config) self.training = training if init_from_transformer: # copies the specified number of blocks from the transformer controlnet.from_transformer(transformer, self.blocks_num) self.transformer = transformer self.controlnet = controlnet def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, timestep: Optional[torch.LongTensor] = None, controlnet_cond: Optional[torch.Tensor] = None, added_cond_kwargs: Dict[str, torch.Tensor] = None, cross_attention_kwargs: Dict[str, Any] = None, attention_mask: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, return_dict: bool = True, ): if self.transformer.use_additional_conditions and added_cond_kwargs is None: raise ValueError("`added_cond_kwargs` cannot be None when using additional conditions for `adaln_single`.") # ensure attention_mask is a bias, and give it a singleton query_tokens dimension. # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward. # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias. # expects mask of shape: # [batch, key_tokens] # adds singleton query_tokens dimension: # [batch, 1, key_tokens] # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) if attention_mask is not None and attention_mask.ndim == 2: # assume that mask is expressed as: # (1 = keep, 0 = discard) # convert mask into a bias that can be added to attention scores: # (keep = +0, discard = -10000.0) attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) # convert encoder_attention_mask to a bias the same way we do for attention_mask if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2: encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1) # 1. Input batch_size = hidden_states.shape[0] height, width = ( hidden_states.shape[-2] // self.transformer.config.patch_size, hidden_states.shape[-1] // self.transformer.config.patch_size, ) hidden_states = self.transformer.pos_embed(hidden_states) timestep, embedded_timestep = self.transformer.adaln_single( timestep, added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_states.dtype ) if self.transformer.caption_projection is not None: encoder_hidden_states = self.transformer.caption_projection(encoder_hidden_states) encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, hidden_states.shape[-1]) controlnet_states_down = None if controlnet_cond is not None: controlnet_states_down = self.transformer.pos_embed(controlnet_cond) # 2. Blocks for block_index, block in enumerate(self.transformer.transformer_blocks): if torch.is_grad_enabled() and self.gradient_checkpointing: # rc todo: for training and gradient checkpointing print("Gradient checkpointing is not supported for the controlnet transformer model, yet.") exit(1) hidden_states = self._gradient_checkpointing_func( block, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, timestep, cross_attention_kwargs, None, ) else: # the control nets are only used for the blocks 1 to self.blocks_num if block_index > 0 and block_index <= self.blocks_num and controlnet_states_down is not None: controlnet_states_left, controlnet_states_down = self.controlnet.controlnet_blocks[ block_index - 1 ]( hidden_states=hidden_states, # used only in the first block controlnet_states=controlnet_states_down, encoder_hidden_states=encoder_hidden_states, timestep=timestep, added_cond_kwargs=added_cond_kwargs, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, ) hidden_states = hidden_states + controlnet_states_left hidden_states = block( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=None, ) # 3. Output shift, scale = ( self.transformer.scale_shift_table[None] + embedded_timestep[:, None].to(self.transformer.scale_shift_table.device) ).chunk(2, dim=1) hidden_states = self.transformer.norm_out(hidden_states) # Modulation hidden_states = hidden_states * (1 + scale.to(hidden_states.device)) + shift.to(hidden_states.device) hidden_states = self.transformer.proj_out(hidden_states) hidden_states = hidden_states.squeeze(1) # unpatchify hidden_states = hidden_states.reshape( shape=( -1, height, width, self.transformer.config.patch_size, self.transformer.config.patch_size, self.transformer.out_channels, ) ) hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) output = hidden_states.reshape( shape=( -1, self.transformer.out_channels, height * self.transformer.config.patch_size, width * self.transformer.config.patch_size, ) ) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
diffusers/examples/research_projects/pixart/controlnet_pixart_alpha.py/0
{ "file_path": "diffusers/examples/research_projects/pixart/controlnet_pixart_alpha.py", "repo_id": "diffusers", "token_count": 5561 }
149
import inspect from typing import Callable, List, Optional, Union import torch from PIL import Image from retriever import Retriever, normalize_images, preprocess_images from transformers import CLIPImageProcessor, CLIPModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, ImagePipelineOutput, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.pipeline_utils import StableDiffusionMixin from diffusers.utils import logging from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name class RDMPipeline(DiffusionPipeline, StableDiffusionMixin): r""" Pipeline for text-to-image generation using Retrieval Augmented Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. clip ([`CLIPModel`]): Frozen CLIP model. Retrieval Augmented Diffusion uses the CLIP model, specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ def __init__( self, vae: AutoencoderKL, clip: CLIPModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], feature_extractor: CLIPImageProcessor, retriever: Optional[Retriever] = None, ): super().__init__() self.register_modules( vae=vae, clip=clip, tokenizer=tokenizer, unet=unet, scheduler=scheduler, feature_extractor=feature_extractor, ) # Copy from statement here and all the methods we take from stable_diffusion_pipeline self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.retriever = retriever def _encode_prompt(self, prompt): # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] prompt_embeds = self.clip.get_text_features(text_input_ids.to(self.device)) prompt_embeds = prompt_embeds / torch.linalg.norm(prompt_embeds, dim=-1, keepdim=True) prompt_embeds = prompt_embeds[:, None, :] return prompt_embeds def _encode_image(self, retrieved_images, batch_size): if len(retrieved_images[0]) == 0: return None for i in range(len(retrieved_images)): retrieved_images[i] = normalize_images(retrieved_images[i]) retrieved_images[i] = preprocess_images(retrieved_images[i], self.feature_extractor).to( self.clip.device, dtype=self.clip.dtype ) _, c, h, w = retrieved_images[0].shape retrieved_images = torch.reshape(torch.cat(retrieved_images, dim=0), (-1, c, h, w)) image_embeddings = self.clip.get_image_features(retrieved_images) image_embeddings = image_embeddings / torch.linalg.norm(image_embeddings, dim=-1, keepdim=True) _, d = image_embeddings.shape image_embeddings = torch.reshape(image_embeddings, (batch_size, -1, d)) return image_embeddings def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def retrieve_images(self, retrieved_images, prompt_embeds, knn=10): if self.retriever is not None: additional_images = self.retriever.retrieve_imgs_batch(prompt_embeds[:, 0].cpu(), knn).total_examples for i in range(len(retrieved_images)): retrieved_images[i] += additional_images[i][self.retriever.config.image_column] return retrieved_images @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], retrieved_images: Optional[List[Image.Image]] = None, height: int = 768, width: int = 768, num_inference_steps: int = 50, guidance_scale: float = 7.5, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: Optional[int] = 1, knn: Optional[int] = 10, **kwargs, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. Returns: [`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images. """ height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if retrieved_images is not None: retrieved_images = [retrieved_images for _ in range(batch_size)] else: retrieved_images = [[] for _ in range(batch_size)] device = self._execution_device if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt_embeds is None: prompt_embeds = self._encode_prompt(prompt) retrieved_images = self.retrieve_images(retrieved_images, prompt_embeds, knn=knn) image_embeddings = self._encode_image(retrieved_images, batch_size) if image_embeddings is not None: prompt_embeds = torch.cat([prompt_embeds, image_embeddings], dim=1) # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_embeddings = torch.zeros_like(prompt_embeds).to(prompt_embeds.device) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([uncond_embeddings, prompt_embeds]) # get the initial random noise unless the user supplied it num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # set timesteps self.scheduler.set_timesteps(num_inference_steps) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand timesteps_tensor = self.scheduler.timesteps.to(self.device) # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator for i, t in enumerate(self.progress_bar(timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = latents image = self.image_processor.postprocess( image, output_type=output_type, do_denormalize=[True] * image.shape[0] ) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/examples/research_projects/rdm/pipeline_rdm.py/0
{ "file_path": "diffusers/examples/research_projects/rdm/pipeline_rdm.py", "repo_id": "diffusers", "token_count": 7216 }
150
#!/usr/bin/env python # coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fine-tuning script for Stable Diffusion XL for text2image.""" import argparse import functools import gc import logging import math import os import random import shutil from pathlib import Path import accelerate import datasets import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from datasets import concatenate_datasets, load_dataset from huggingface_hub import create_repo, upload_folder from packaging import version from torchvision import transforms from torchvision.transforms.functional import crop from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig import diffusers from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionXLPipeline, UNet2DConditionModel from diffusers.optimization import get_scheduler from diffusers.training_utils import EMAModel, compute_snr from diffusers.utils import check_min_version, is_wandb_available from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.torch_utils import is_compiled_module # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.28.0.dev0") logger = get_logger(__name__) DATASET_NAME_MAPPING = { "lambdalabs/naruto-blip-captions": ("image", "text"), } def save_model_card( repo_id: str, images: list = None, validation_prompt: str = None, base_model: str = None, dataset_name: str = None, repo_folder: str = None, vae_path: str = None, ): img_str = "" if images is not None: for i, image in enumerate(images): image.save(os.path.join(repo_folder, f"image_{i}.png")) img_str += f"![img_{i}](./image_{i}.png)\n" model_description = f""" # Text-to-image finetuning - {repo_id} This pipeline was finetuned from **{base_model}** on the **{dataset_name}** dataset. Below are some example images generated with the finetuned pipeline using the following prompt: {validation_prompt}: \n {img_str} Special VAE used for training: {vae_path}. """ model_card = load_or_create_model_card( repo_id_or_path=repo_id, from_training=True, license="creativeml-openrail-m", base_model=base_model, model_description=model_description, inference=True, ) tags = [ "stable-diffusion-xl", "stable-diffusion-xl-diffusers", "text-to-image", "diffusers-training", "diffusers", ] model_card = populate_model_card(model_card, tags=tags) model_card.save(os.path.join(repo_folder, "README.md")) def import_model_class_from_model_name_or_path( pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder" ): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder=subfolder, revision=revision ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "CLIPTextModelWithProjection": from transformers import CLIPTextModelWithProjection return CLIPTextModelWithProjection else: raise ValueError(f"{model_class} is not supported.") def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--pretrained_vae_model_name_or_path", type=str, default=None, help="Path to pretrained VAE model with better numerical stability. More details: https://github.com/huggingface/diffusers/pull/4038.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--variant", type=str, default=None, help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--train_data_dir", type=str, default=None, help=( "A folder containing the training data. Folder contents must follow the structure described in" " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." ), ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing an image." ) parser.add_argument( "--caption_column", type=str, default="text", help="The column of the dataset containing a caption or a list of captions.", ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is used during validation to verify that the model is learning.", ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images that should be generated during validation with `validation_prompt`.", ) parser.add_argument( "--validation_epochs", type=int, default=1, help=( "Run fine-tuning validation every X epochs. The validation process consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`." ), ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--proportion_empty_prompts", type=float, default=0, help="Proportion of image prompts to be replaced with empty strings. Defaults to 0 (no prompt replacement).", ) parser.add_argument( "--output_dir", type=str, default="sdxl-model-finetuned", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=1024, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--random_flip", action="store_true", help="whether to randomly flip images horizontally", ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--timestep_bias_strategy", type=str, default="none", choices=["earlier", "later", "range", "none"], help=( "The timestep bias strategy, which may help direct the model toward learning low or high frequency details." " Choices: ['earlier', 'later', 'range', 'none']." " The default is 'none', which means no bias is applied, and training proceeds normally." " The value of 'later' will increase the frequency of the model's final training timesteps." ), ) parser.add_argument( "--timestep_bias_multiplier", type=float, default=1.0, help=( "The multiplier for the bias. Defaults to 1.0, which means no bias is applied." " A value of 2.0 will double the weight of the bias, and a value of 0.5 will halve it." ), ) parser.add_argument( "--timestep_bias_begin", type=int, default=0, help=( "When using `--timestep_bias_strategy=range`, the beginning (inclusive) timestep to bias." " Defaults to zero, which equates to having no specific bias." ), ) parser.add_argument( "--timestep_bias_end", type=int, default=1000, help=( "When using `--timestep_bias_strategy=range`, the final timestep (inclusive) to bias." " Defaults to 1000, which is the number of timesteps that Stable Diffusion is trained on." ), ) parser.add_argument( "--timestep_bias_portion", type=float, default=0.25, help=( "The portion of timesteps to bias. Defaults to 0.25, which 25% of timesteps will be biased." " A value of 0.5 will bias one half of the timesteps. The value provided for `--timestep_bias_strategy` determines" " whether the biased portions are in the earlier or later timesteps." ), ) parser.add_argument( "--snr_gamma", type=float, default=None, help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. " "More details here: https://huggingface.co/papers/2303.09556.", ) parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--prediction_type", type=str, default=None, help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediction_type` is chosen.", ) parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.") parser.add_argument( "--loss_type", type=str, default="l2", choices=["l2", "huber", "smooth_l1"], help="The type of loss to use and whether it's timestep-scheduled. See Issue #7488 for more info.", ) parser.add_argument( "--huber_schedule", type=str, default="snr", choices=["constant", "exponential", "snr"], help="The schedule to use for the huber losses parameter", ) parser.add_argument( "--huber_c", type=float, default=0.1, help="The huber loss parameter. Only used if one of the huber loss modes (huber or smooth l1) is selected with loss_type.", ) if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank # Sanity checks if args.dataset_name is None and args.train_data_dir is None: raise ValueError("Need either a dataset name or a training folder.") if args.proportion_empty_prompts < 0 or args.proportion_empty_prompts > 1: raise ValueError("`--proportion_empty_prompts` must be in the range [0, 1].") return args # Adapted from pipelines.StableDiffusionXLPipeline.encode_prompt def encode_prompt(batch, text_encoders, tokenizers, proportion_empty_prompts, caption_column, is_train=True): prompt_embeds_list = [] prompt_batch = batch[caption_column] captions = [] for caption in prompt_batch: if random.random() < proportion_empty_prompts: captions.append("") elif isinstance(caption, str): captions.append(caption) elif isinstance(caption, (list, np.ndarray)): # take a random caption if there are multiple captions.append(random.choice(caption) if is_train else caption[0]) with torch.no_grad(): for tokenizer, text_encoder in zip(tokenizers, text_encoders): text_inputs = tokenizer( captions, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids prompt_embeds = text_encoder( text_input_ids.to(text_encoder.device), output_hidden_states=True, return_dict=False, ) # We are only ALWAYS interested in the pooled output of the final text encoder pooled_prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds[-1][-2] bs_embed, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1) prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1) return {"prompt_embeds": prompt_embeds.cpu(), "pooled_prompt_embeds": pooled_prompt_embeds.cpu()} def compute_vae_encodings(batch, vae): images = batch.pop("pixel_values") pixel_values = torch.stack(list(images)) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() pixel_values = pixel_values.to(vae.device, dtype=vae.dtype) with torch.no_grad(): model_input = vae.encode(pixel_values).latent_dist.sample() model_input = model_input * vae.config.scaling_factor return {"model_input": model_input.cpu()} def generate_timestep_weights(args, num_timesteps): weights = torch.ones(num_timesteps) # Determine the indices to bias num_to_bias = int(args.timestep_bias_portion * num_timesteps) if args.timestep_bias_strategy == "later": bias_indices = slice(-num_to_bias, None) elif args.timestep_bias_strategy == "earlier": bias_indices = slice(0, num_to_bias) elif args.timestep_bias_strategy == "range": # Out of the possible 1000 timesteps, we might want to focus on eg. 200-500. range_begin = args.timestep_bias_begin range_end = args.timestep_bias_end if range_begin < 0: raise ValueError( "When using the range strategy for timestep bias, you must provide a beginning timestep greater or equal to zero." ) if range_end > num_timesteps: raise ValueError( "When using the range strategy for timestep bias, you must provide an ending timestep smaller than the number of timesteps." ) bias_indices = slice(range_begin, range_end) else: # 'none' or any other string return weights if args.timestep_bias_multiplier <= 0: return ValueError( "The parameter --timestep_bias_multiplier is not intended to be used to disable the training of specific timesteps." " If it was intended to disable timestep bias, use `--timestep_bias_strategy none` instead." " A timestep bias multiplier less than or equal to 0 is not allowed." ) # Apply the bias weights[bias_indices] *= args.timestep_bias_multiplier # Normalize weights /= weights.sum() return weights # NOTE: if you're using the scheduled version, huber_c has to depend on the timesteps already def conditional_loss( model_pred: torch.Tensor, target: torch.Tensor, reduction: str = "mean", loss_type: str = "l2", huber_c: float = 0.1, ): if loss_type == "l2": loss = F.mse_loss(model_pred, target, reduction=reduction) elif loss_type == "huber": loss = 2 * huber_c * (torch.sqrt((model_pred - target) ** 2 + huber_c**2) - huber_c) if reduction == "mean": loss = torch.mean(loss) elif reduction == "sum": loss = torch.sum(loss) elif loss_type == "smooth_l1": loss = 2 * (torch.sqrt((model_pred - target) ** 2 + huber_c**2) - huber_c) if reduction == "mean": loss = torch.mean(loss) elif reduction == "sum": loss = torch.sum(loss) else: raise NotImplementedError(f"Unsupported Loss Type {loss_type}") return loss def main(args): if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `hf auth login` to authenticate with the Hub." ) logging_dir = Path(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) if args.report_to == "wandb": if not is_wandb_available(): raise ImportError("Make sure to install wandb if you want to use it for logging during training.") import wandb # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load the tokenizers tokenizer_one = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False, ) tokenizer_two = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer_2", revision=args.revision, use_fast=False, ) # import correct text encoder classes text_encoder_cls_one = import_model_class_from_model_name_or_path( args.pretrained_model_name_or_path, args.revision ) text_encoder_cls_two = import_model_class_from_model_name_or_path( args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2" ) # Load scheduler and models noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") # Check for terminal SNR in combination with SNR Gamma text_encoder_one = text_encoder_cls_one.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant ) text_encoder_two = text_encoder_cls_two.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision, variant=args.variant ) vae_path = ( args.pretrained_model_name_or_path if args.pretrained_vae_model_name_or_path is None else args.pretrained_vae_model_name_or_path ) vae = AutoencoderKL.from_pretrained( vae_path, subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, revision=args.revision, variant=args.variant, ) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant ) # Freeze vae and text encoders. vae.requires_grad_(False) text_encoder_one.requires_grad_(False) text_encoder_two.requires_grad_(False) # Set unet as trainable. unet.train() # For mixed precision training we cast all non-trainable weights to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move unet, vae and text_encoder to device and cast to weight_dtype # The VAE is in float32 to avoid NaN losses. vae.to(accelerator.device, dtype=torch.float32) text_encoder_one.to(accelerator.device, dtype=weight_dtype) text_encoder_two.to(accelerator.device, dtype=weight_dtype) # Create EMA for the unet. if args.use_ema: ema_unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant ) ema_unet = EMAModel(ema_unet.parameters(), model_cls=UNet2DConditionModel, model_config=ema_unet.config) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # `accelerate` 0.16.0 will have better support for customized saving if version.parse(accelerate.__version__) >= version.parse("0.16.0"): # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: if args.use_ema: ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema")) for i, model in enumerate(models): model.save_pretrained(os.path.join(output_dir, "unet")) # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): if args.use_ema: load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel) ema_unet.load_state_dict(load_model.state_dict()) ema_unet.to(accelerator.device) del load_model for _ in range(len(models)): # pop models so that they are not loaded again model = models.pop() # load diffusers style into model load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") model.register_to_config(**load_model.config) model.load_state_dict(load_model.state_dict()) del load_model accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) if args.gradient_checkpointing: unet.enable_gradient_checkpointing() # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW # Optimizer creation params_to_optimize = unet.parameters() optimizer = optimizer_class( params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Get the datasets: you can either provide your own training and evaluation files (see below) # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, ) else: data_files = {} if args.train_data_dir is not None: data_files["train"] = os.path.join(args.train_data_dir, "**") dataset = load_dataset( "imagefolder", data_files=data_files, cache_dir=args.cache_dir, ) # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder # Preprocessing the datasets. # We need to tokenize inputs and targets. column_names = dataset["train"].column_names # 6. Get the column names for input/target. dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None) if args.image_column is None: image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] else: image_column = args.image_column if image_column not in column_names: raise ValueError( f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}" ) if args.caption_column is None: caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] else: caption_column = args.caption_column if caption_column not in column_names: raise ValueError( f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}" ) # Preprocessing the datasets. train_resize = transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR) train_crop = transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution) train_flip = transforms.RandomHorizontalFlip(p=1.0) train_transforms = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]) def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] # image aug original_sizes = [] all_images = [] crop_top_lefts = [] for image in images: original_sizes.append((image.height, image.width)) image = train_resize(image) if args.random_flip and random.random() < 0.5: # flip image = train_flip(image) if args.center_crop: y1 = max(0, int(round((image.height - args.resolution) / 2.0))) x1 = max(0, int(round((image.width - args.resolution) / 2.0))) image = train_crop(image) else: y1, x1, h, w = train_crop.get_params(image, (args.resolution, args.resolution)) image = crop(image, y1, x1, h, w) crop_top_left = (y1, x1) crop_top_lefts.append(crop_top_left) image = train_transforms(image) all_images.append(image) examples["original_sizes"] = original_sizes examples["crop_top_lefts"] = crop_top_lefts examples["pixel_values"] = all_images return examples with accelerator.main_process_first(): if args.max_train_samples is not None: dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms train_dataset = dataset["train"].with_transform(preprocess_train) # Let's first compute all the embeddings so that we can free up the text encoders # from memory. We will pre-compute the VAE encodings too. text_encoders = [text_encoder_one, text_encoder_two] tokenizers = [tokenizer_one, tokenizer_two] compute_embeddings_fn = functools.partial( encode_prompt, text_encoders=text_encoders, tokenizers=tokenizers, proportion_empty_prompts=args.proportion_empty_prompts, caption_column=args.caption_column, ) compute_vae_encodings_fn = functools.partial(compute_vae_encodings, vae=vae) with accelerator.main_process_first(): from datasets.fingerprint import Hasher # fingerprint used by the cache for the other processes to load the result # details: https://github.com/huggingface/diffusers/pull/4038#discussion_r1266078401 new_fingerprint = Hasher.hash(args) new_fingerprint_for_vae = Hasher.hash(vae_path) train_dataset_with_embeddings = train_dataset.map( compute_embeddings_fn, batched=True, new_fingerprint=new_fingerprint ) train_dataset_with_vae = train_dataset.map( compute_vae_encodings_fn, batched=True, batch_size=args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps, new_fingerprint=new_fingerprint_for_vae, ) precomputed_dataset = concatenate_datasets( [train_dataset_with_embeddings, train_dataset_with_vae.remove_columns(["image", "text"])], axis=1 ) precomputed_dataset = precomputed_dataset.with_transform(preprocess_train) del compute_vae_encodings_fn, compute_embeddings_fn, text_encoder_one, text_encoder_two del text_encoders, tokenizers, vae gc.collect() torch.cuda.empty_cache() def collate_fn(examples): model_input = torch.stack([torch.tensor(example["model_input"]) for example in examples]) original_sizes = [example["original_sizes"] for example in examples] crop_top_lefts = [example["crop_top_lefts"] for example in examples] prompt_embeds = torch.stack([torch.tensor(example["prompt_embeds"]) for example in examples]) pooled_prompt_embeds = torch.stack([torch.tensor(example["pooled_prompt_embeds"]) for example in examples]) return { "model_input": model_input, "prompt_embeds": prompt_embeds, "pooled_prompt_embeds": pooled_prompt_embeds, "original_sizes": original_sizes, "crop_top_lefts": crop_top_lefts, } # DataLoaders creation: train_dataloader = torch.utils.data.DataLoader( precomputed_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.train_batch_size, num_workers=args.dataloader_num_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, ) # Prepare everything with our `accelerator`. unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) if args.use_ema: ema_unet.to(accelerator.device) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("text2image-fine-tune-sdxl", config=vars(args)) # Function for unwrapping if torch.compile() was used in accelerate. def unwrap_model(model): model = accelerator.unwrap_model(model) model = model._orig_mod if is_compiled_module(model) else model return model # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(precomputed_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) for epoch in range(first_epoch, args.num_train_epochs): train_loss = 0.0 for step, batch in enumerate(train_dataloader): with accelerator.accumulate(unet): # Sample noise that we'll add to the latents model_input = batch["model_input"].to(accelerator.device) noise = torch.randn_like(model_input) if args.noise_offset: # https://www.crosslabs.org//blog/diffusion-with-offset-noise noise += args.noise_offset * torch.randn( (model_input.shape[0], model_input.shape[1], 1, 1), device=model_input.device ) bsz = model_input.shape[0] if args.timestep_bias_strategy == "none": # Sample a random timestep for each image if args.loss_type == "huber" or args.loss_type == "smooth_l1": timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (1,), device="cpu") timestep = timesteps.item() if args.huber_schedule == "exponential": alpha = -math.log(args.huber_c) / noise_scheduler.config.num_train_timesteps huber_c = math.exp(-alpha * timestep) elif args.huber_schedule == "snr": alphas_cumprod = noise_scheduler.alphas_cumprod[timestep] sigmas = ((1.0 - alphas_cumprod) / alphas_cumprod) ** 0.5 huber_c = (1 - args.huber_c) / (1 + sigmas) ** 2 + args.huber_c elif args.huber_schedule == "constant": huber_c = args.huber_c else: raise NotImplementedError(f"Unknown Huber loss schedule {args.huber_schedule}!") timesteps = timesteps.repeat(bsz).to(model_input.device) elif args.loss_type == "l2": timesteps = torch.randint( 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device ) huber_c = 1 # may be anything, as it's not used else: raise NotImplementedError(f"Unknown loss type {args.loss_type}") timesteps = timesteps.long() else: if "huber_scheduled" in args.loss_type: raise NotImplementedError( "Randomly weighted timesteps not implemented yet for scheduled huber loss!" ) else: huber_c = args.huber_c # Sample a random timestep for each image, potentially biased by the timestep weights. # Biasing the timestep weights allows us to spend less time training irrelevant timesteps. weights = generate_timestep_weights(args, noise_scheduler.config.num_train_timesteps).to( model_input.device ) timesteps = torch.multinomial(weights, bsz, replacement=True).long() # Add noise to the model input according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps) # time ids def compute_time_ids(original_size, crops_coords_top_left): # Adapted from pipeline.StableDiffusionXLPipeline._get_add_time_ids target_size = (args.resolution, args.resolution) add_time_ids = list(original_size + crops_coords_top_left + target_size) add_time_ids = torch.tensor([add_time_ids]) add_time_ids = add_time_ids.to(accelerator.device, dtype=weight_dtype) return add_time_ids add_time_ids = torch.cat( [compute_time_ids(s, c) for s, c in zip(batch["original_sizes"], batch["crop_top_lefts"])] ) # Predict the noise residual unet_added_conditions = {"time_ids": add_time_ids} prompt_embeds = batch["prompt_embeds"].to(accelerator.device) pooled_prompt_embeds = batch["pooled_prompt_embeds"].to(accelerator.device) unet_added_conditions.update({"text_embeds": pooled_prompt_embeds}) model_pred = unet( noisy_model_input, timesteps, prompt_embeds, added_cond_kwargs=unet_added_conditions, return_dict=False, )[0] # Get the target for loss depending on the prediction type if args.prediction_type is not None: # set prediction_type of scheduler if defined noise_scheduler.register_to_config(prediction_type=args.prediction_type) if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(model_input, noise, timesteps) elif noise_scheduler.config.prediction_type == "sample": # We set the target to latents here, but the model_pred will return the noise sample prediction. target = model_input # We will have to subtract the noise residual from the prediction to get the target sample. model_pred = model_pred - noise else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") if args.snr_gamma is None: loss = conditional_loss( model_pred.float(), target.float(), reduction="mean", loss_type=args.loss_type, huber_c=huber_c ) else: # Compute loss-weights as per Section 3.4 of https://huggingface.co/papers/2303.09556. # Since we predict the noise instead of x_0, the original formulation is slightly changed. # This is discussed in Section 4.2 of the same paper. snr = compute_snr(noise_scheduler, timesteps) mse_loss_weights = torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min( dim=1 )[0] if noise_scheduler.config.prediction_type == "epsilon": mse_loss_weights = mse_loss_weights / snr elif noise_scheduler.config.prediction_type == "v_prediction": mse_loss_weights = mse_loss_weights / (snr + 1) loss = conditional_loss( model_pred.float(), target.float(), reduction="none", loss_type=args.loss_type, huber_c=huber_c ) loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights loss = loss.mean() # Gather the losses across all processes for logging (if we use distributed training). avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() train_loss += avg_loss.item() / args.gradient_accumulation_steps # Backpropagate accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = unet.parameters() accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: if args.use_ema: ema_unet.step(unet.parameters()) progress_bar.update(1) global_step += 1 accelerator.log({"train_loss": train_loss}, step=global_step) train_loss = 0.0 if accelerator.is_main_process: if global_step % args.checkpointing_steps == 0: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) if global_step >= args.max_train_steps: break if accelerator.is_main_process: if args.validation_prompt is not None and epoch % args.validation_epochs == 0: logger.info( f"Running validation... \n Generating {args.num_validation_images} images with prompt:" f" {args.validation_prompt}." ) if args.use_ema: # Store the UNet parameters temporarily and load the EMA parameters to perform inference. ema_unet.store(unet.parameters()) ema_unet.copy_to(unet.parameters()) # create pipeline vae = AutoencoderKL.from_pretrained( vae_path, subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, revision=args.revision, variant=args.variant, ) pipeline = StableDiffusionXLPipeline.from_pretrained( args.pretrained_model_name_or_path, vae=vae, unet=accelerator.unwrap_model(unet), revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) if args.prediction_type is not None: scheduler_args = {"prediction_type": args.prediction_type} pipeline.scheduler = pipeline.scheduler.from_config(pipeline.scheduler.config, **scheduler_args) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) # run inference generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None pipeline_args = {"prompt": args.validation_prompt} with torch.cuda.amp.autocast(): images = [ pipeline(**pipeline_args, generator=generator, num_inference_steps=25).images[0] for _ in range(args.num_validation_images) ] for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": tracker.log( { "validation": [ wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) ] } ) del pipeline torch.cuda.empty_cache() accelerator.wait_for_everyone() if accelerator.is_main_process: unet = unwrap_model(unet) if args.use_ema: ema_unet.copy_to(unet.parameters()) # Serialize pipeline. vae = AutoencoderKL.from_pretrained( vae_path, subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) pipeline = StableDiffusionXLPipeline.from_pretrained( args.pretrained_model_name_or_path, unet=unet, vae=vae, revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) if args.prediction_type is not None: scheduler_args = {"prediction_type": args.prediction_type} pipeline.scheduler = pipeline.scheduler.from_config(pipeline.scheduler.config, **scheduler_args) pipeline.save_pretrained(args.output_dir) # run inference images = [] if args.validation_prompt and args.num_validation_images > 0: pipeline = pipeline.to(accelerator.device) generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None with torch.cuda.amp.autocast(): images = [ pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0] for _ in range(args.num_validation_images) ] for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("test", np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": tracker.log( { "test": [ wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) ] } ) if args.push_to_hub: save_model_card( repo_id=repo_id, images=images, validation_prompt=args.validation_prompt, base_model=args.pretrained_model_name_or_path, dataset_name=args.dataset_name, repo_folder=args.output_dir, vae_path=args.pretrained_vae_model_name_or_path, ) upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
diffusers/examples/research_projects/scheduled_huber_loss_training/text_to_image/train_text_to_image_sdxl.py/0
{ "file_path": "diffusers/examples/research_projects/scheduled_huber_loss_training/text_to_image/train_text_to_image_sdxl.py", "repo_id": "diffusers", "token_count": 26772 }
151
#!/usr/bin/env python # coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys import tempfile import safetensors from diffusers import DiffusionPipeline # noqa: E402 sys.path.append("..") from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class TextToImageLoRA(ExamplesTestsAccelerate): def test_text_to_image_lora_sdxl_checkpointing_checkpoints_total_limit(self): prompt = "a prompt" pipeline_path = "hf-internal-testing/tiny-stable-diffusion-xl-pipe" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 6, checkpointing_steps == 2, checkpoints_total_limit == 2 # Should create checkpoints at steps 2, 4, 6 # with checkpoint at step 2 deleted initial_run_args = f""" examples/text_to_image/train_text_to_image_lora_sdxl.py --pretrained_model_name_or_path {pipeline_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 6 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained(pipeline_path) pipe.load_lora_weights(tmpdir) pipe(prompt, num_inference_steps=1) # check checkpoint directories exist # checkpoint-2 should have been deleted self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}) def test_text_to_image_lora_checkpointing_checkpoints_total_limit(self): pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" prompt = "a prompt" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 6, checkpointing_steps == 2, checkpoints_total_limit == 2 # Should create checkpoints at steps 2, 4, 6 # with checkpoint at step 2 deleted initial_run_args = f""" examples/text_to_image/train_text_to_image_lora.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 6 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --checkpoints_total_limit=2 --seed=0 --num_validation_images=0 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None ) pipe.load_lora_weights(tmpdir) pipe(prompt, num_inference_steps=1) # check checkpoint directories exist # checkpoint-2 should have been deleted self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}) def test_text_to_image_lora_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" prompt = "a prompt" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 4, checkpointing_steps == 2 # Should create checkpoints at steps 2, 4 initial_run_args = f""" examples/text_to_image/train_text_to_image_lora.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 4 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --seed=0 --num_validation_images=0 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None ) pipe.load_lora_weights(tmpdir) pipe(prompt, num_inference_steps=1) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"}, ) # resume and we should try to checkpoint at 6, where we'll have to remove # checkpoint-2 and checkpoint-4 instead of just a single previous checkpoint resume_run_args = f""" examples/text_to_image/train_text_to_image_lora.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 8 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-4 --checkpoints_total_limit=2 --seed=0 --num_validation_images=0 """.split() run_command(self._launch_args + resume_run_args) pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None ) pipe.load_lora_weights(tmpdir) pipe(prompt, num_inference_steps=1) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8"}, ) class TextToImageLoRASDXL(ExamplesTestsAccelerate): def test_text_to_image_lora_sdxl(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/text_to_image/train_text_to_image_lora_sdxl.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) def test_text_to_image_lora_sdxl_with_text_encoder(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/text_to_image/train_text_to_image_lora_sdxl.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --train_text_encoder """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"unet"` or `"text_encoder"` or `"text_encoder_2"` in their names. keys = lora_state_dict.keys() starts_with_unet = all( k.startswith("unet") or k.startswith("text_encoder") or k.startswith("text_encoder_2") for k in keys ) self.assertTrue(starts_with_unet) def test_text_to_image_lora_sdxl_text_encoder_checkpointing_checkpoints_total_limit(self): prompt = "a prompt" pipeline_path = "hf-internal-testing/tiny-stable-diffusion-xl-pipe" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 6, checkpointing_steps == 2, checkpoints_total_limit == 2 # Should create checkpoints at steps 2, 4, 6 # with checkpoint at step 2 deleted initial_run_args = f""" examples/text_to_image/train_text_to_image_lora_sdxl.py --pretrained_model_name_or_path {pipeline_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 6 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --train_text_encoder --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained(pipeline_path) pipe.load_lora_weights(tmpdir) pipe(prompt, num_inference_steps=1) # check checkpoint directories exist # checkpoint-2 should have been deleted self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"})
diffusers/examples/text_to_image/test_text_to_image_lora.py/0
{ "file_path": "diffusers/examples/text_to_image/test_text_to_image_lora.py", "repo_id": "diffusers", "token_count": 6179 }
152
import argparse import time from pathlib import Path from typing import Any, Dict, Literal import torch from diffusers import AsymmetricAutoencoderKL ASYMMETRIC_AUTOENCODER_KL_x_1_5_CONFIG = { "in_channels": 3, "out_channels": 3, "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", ], "down_block_out_channels": [128, 256, 512, 512], "layers_per_down_block": 2, "up_block_types": [ "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", ], "up_block_out_channels": [192, 384, 768, 768], "layers_per_up_block": 3, "act_fn": "silu", "latent_channels": 4, "norm_num_groups": 32, "sample_size": 256, "scaling_factor": 0.18215, } ASYMMETRIC_AUTOENCODER_KL_x_2_CONFIG = { "in_channels": 3, "out_channels": 3, "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", ], "down_block_out_channels": [128, 256, 512, 512], "layers_per_down_block": 2, "up_block_types": [ "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", ], "up_block_out_channels": [256, 512, 1024, 1024], "layers_per_up_block": 5, "act_fn": "silu", "latent_channels": 4, "norm_num_groups": 32, "sample_size": 256, "scaling_factor": 0.18215, } def convert_asymmetric_autoencoder_kl_state_dict(original_state_dict: Dict[str, Any]) -> Dict[str, Any]: converted_state_dict = {} for k, v in original_state_dict.items(): if k.startswith("encoder."): converted_state_dict[ k.replace("encoder.down.", "encoder.down_blocks.") .replace("encoder.mid.", "encoder.mid_block.") .replace("encoder.norm_out.", "encoder.conv_norm_out.") .replace(".downsample.", ".downsamplers.0.") .replace(".nin_shortcut.", ".conv_shortcut.") .replace(".block.", ".resnets.") .replace(".block_1.", ".resnets.0.") .replace(".block_2.", ".resnets.1.") .replace(".attn_1.k.", ".attentions.0.to_k.") .replace(".attn_1.q.", ".attentions.0.to_q.") .replace(".attn_1.v.", ".attentions.0.to_v.") .replace(".attn_1.proj_out.", ".attentions.0.to_out.0.") .replace(".attn_1.norm.", ".attentions.0.group_norm.") ] = v elif k.startswith("decoder.") and "up_layers" not in k: converted_state_dict[ k.replace("decoder.encoder.", "decoder.condition_encoder.") .replace(".norm_out.", ".conv_norm_out.") .replace(".up.0.", ".up_blocks.3.") .replace(".up.1.", ".up_blocks.2.") .replace(".up.2.", ".up_blocks.1.") .replace(".up.3.", ".up_blocks.0.") .replace(".block.", ".resnets.") .replace("mid", "mid_block") .replace(".0.upsample.", ".0.upsamplers.0.") .replace(".1.upsample.", ".1.upsamplers.0.") .replace(".2.upsample.", ".2.upsamplers.0.") .replace(".nin_shortcut.", ".conv_shortcut.") .replace(".block_1.", ".resnets.0.") .replace(".block_2.", ".resnets.1.") .replace(".attn_1.k.", ".attentions.0.to_k.") .replace(".attn_1.q.", ".attentions.0.to_q.") .replace(".attn_1.v.", ".attentions.0.to_v.") .replace(".attn_1.proj_out.", ".attentions.0.to_out.0.") .replace(".attn_1.norm.", ".attentions.0.group_norm.") ] = v elif k.startswith("quant_conv."): converted_state_dict[k] = v elif k.startswith("post_quant_conv."): converted_state_dict[k] = v else: print(f" skipping key `{k}`") # fix weights shape for k, v in converted_state_dict.items(): if ( (k.startswith("encoder.mid_block.attentions.0") or k.startswith("decoder.mid_block.attentions.0")) and k.endswith("weight") and ("to_q" in k or "to_k" in k or "to_v" in k or "to_out" in k) ): converted_state_dict[k] = converted_state_dict[k][:, :, 0, 0] return converted_state_dict def get_asymmetric_autoencoder_kl_from_original_checkpoint( scale: Literal["1.5", "2"], original_checkpoint_path: str, map_location: torch.device ) -> AsymmetricAutoencoderKL: print("Loading original state_dict") original_state_dict = torch.load(original_checkpoint_path, map_location=map_location) original_state_dict = original_state_dict["state_dict"] print("Converting state_dict") converted_state_dict = convert_asymmetric_autoencoder_kl_state_dict(original_state_dict) kwargs = ASYMMETRIC_AUTOENCODER_KL_x_1_5_CONFIG if scale == "1.5" else ASYMMETRIC_AUTOENCODER_KL_x_2_CONFIG print("Initializing AsymmetricAutoencoderKL model") asymmetric_autoencoder_kl = AsymmetricAutoencoderKL(**kwargs) print("Loading weight from converted state_dict") asymmetric_autoencoder_kl.load_state_dict(converted_state_dict) asymmetric_autoencoder_kl.eval() print("AsymmetricAutoencoderKL successfully initialized") return asymmetric_autoencoder_kl if __name__ == "__main__": start = time.time() parser = argparse.ArgumentParser() parser.add_argument( "--scale", default=None, type=str, required=True, help="Asymmetric VQGAN scale: `1.5` or `2`", ) parser.add_argument( "--original_checkpoint_path", default=None, type=str, required=True, help="Path to the original Asymmetric VQGAN checkpoint", ) parser.add_argument( "--output_path", default=None, type=str, required=True, help="Path to save pretrained AsymmetricAutoencoderKL model", ) parser.add_argument( "--map_location", default="cpu", type=str, required=False, help="The device passed to `map_location` when loading the checkpoint", ) args = parser.parse_args() assert args.scale in ["1.5", "2"], f"{args.scale} should be `1.5` of `2`" assert Path(args.original_checkpoint_path).is_file() asymmetric_autoencoder_kl = get_asymmetric_autoencoder_kl_from_original_checkpoint( scale=args.scale, original_checkpoint_path=args.original_checkpoint_path, map_location=torch.device(args.map_location), ) print("Saving pretrained AsymmetricAutoencoderKL") asymmetric_autoencoder_kl.save_pretrained(args.output_path) print(f"Done in {time.time() - start:.2f} seconds")
diffusers/scripts/convert_asymmetric_vqgan_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_asymmetric_vqgan_to_diffusers.py", "repo_id": "diffusers", "token_count": 3351 }
153
import argparse import os import torch from torchvision.datasets.utils import download_url from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, Transformer2DModel pretrained_models = {512: "DiT-XL-2-512x512.pt", 256: "DiT-XL-2-256x256.pt"} def download_model(model_name): """ Downloads a pre-trained DiT model from the web. """ local_path = f"pretrained_models/{model_name}" if not os.path.isfile(local_path): os.makedirs("pretrained_models", exist_ok=True) web_path = f"https://dl.fbaipublicfiles.com/DiT/models/{model_name}" download_url(web_path, "pretrained_models") model = torch.load(local_path, map_location=lambda storage, loc: storage) return model def main(args): state_dict = download_model(pretrained_models[args.image_size]) state_dict["pos_embed.proj.weight"] = state_dict["x_embedder.proj.weight"] state_dict["pos_embed.proj.bias"] = state_dict["x_embedder.proj.bias"] state_dict.pop("x_embedder.proj.weight") state_dict.pop("x_embedder.proj.bias") for depth in range(28): state_dict[f"transformer_blocks.{depth}.norm1.emb.timestep_embedder.linear_1.weight"] = state_dict[ "t_embedder.mlp.0.weight" ] state_dict[f"transformer_blocks.{depth}.norm1.emb.timestep_embedder.linear_1.bias"] = state_dict[ "t_embedder.mlp.0.bias" ] state_dict[f"transformer_blocks.{depth}.norm1.emb.timestep_embedder.linear_2.weight"] = state_dict[ "t_embedder.mlp.2.weight" ] state_dict[f"transformer_blocks.{depth}.norm1.emb.timestep_embedder.linear_2.bias"] = state_dict[ "t_embedder.mlp.2.bias" ] state_dict[f"transformer_blocks.{depth}.norm1.emb.class_embedder.embedding_table.weight"] = state_dict[ "y_embedder.embedding_table.weight" ] state_dict[f"transformer_blocks.{depth}.norm1.linear.weight"] = state_dict[ f"blocks.{depth}.adaLN_modulation.1.weight" ] state_dict[f"transformer_blocks.{depth}.norm1.linear.bias"] = state_dict[ f"blocks.{depth}.adaLN_modulation.1.bias" ] q, k, v = torch.chunk(state_dict[f"blocks.{depth}.attn.qkv.weight"], 3, dim=0) q_bias, k_bias, v_bias = torch.chunk(state_dict[f"blocks.{depth}.attn.qkv.bias"], 3, dim=0) state_dict[f"transformer_blocks.{depth}.attn1.to_q.weight"] = q state_dict[f"transformer_blocks.{depth}.attn1.to_q.bias"] = q_bias state_dict[f"transformer_blocks.{depth}.attn1.to_k.weight"] = k state_dict[f"transformer_blocks.{depth}.attn1.to_k.bias"] = k_bias state_dict[f"transformer_blocks.{depth}.attn1.to_v.weight"] = v state_dict[f"transformer_blocks.{depth}.attn1.to_v.bias"] = v_bias state_dict[f"transformer_blocks.{depth}.attn1.to_out.0.weight"] = state_dict[ f"blocks.{depth}.attn.proj.weight" ] state_dict[f"transformer_blocks.{depth}.attn1.to_out.0.bias"] = state_dict[f"blocks.{depth}.attn.proj.bias"] state_dict[f"transformer_blocks.{depth}.ff.net.0.proj.weight"] = state_dict[f"blocks.{depth}.mlp.fc1.weight"] state_dict[f"transformer_blocks.{depth}.ff.net.0.proj.bias"] = state_dict[f"blocks.{depth}.mlp.fc1.bias"] state_dict[f"transformer_blocks.{depth}.ff.net.2.weight"] = state_dict[f"blocks.{depth}.mlp.fc2.weight"] state_dict[f"transformer_blocks.{depth}.ff.net.2.bias"] = state_dict[f"blocks.{depth}.mlp.fc2.bias"] state_dict.pop(f"blocks.{depth}.attn.qkv.weight") state_dict.pop(f"blocks.{depth}.attn.qkv.bias") state_dict.pop(f"blocks.{depth}.attn.proj.weight") state_dict.pop(f"blocks.{depth}.attn.proj.bias") state_dict.pop(f"blocks.{depth}.mlp.fc1.weight") state_dict.pop(f"blocks.{depth}.mlp.fc1.bias") state_dict.pop(f"blocks.{depth}.mlp.fc2.weight") state_dict.pop(f"blocks.{depth}.mlp.fc2.bias") state_dict.pop(f"blocks.{depth}.adaLN_modulation.1.weight") state_dict.pop(f"blocks.{depth}.adaLN_modulation.1.bias") state_dict.pop("t_embedder.mlp.0.weight") state_dict.pop("t_embedder.mlp.0.bias") state_dict.pop("t_embedder.mlp.2.weight") state_dict.pop("t_embedder.mlp.2.bias") state_dict.pop("y_embedder.embedding_table.weight") state_dict["proj_out_1.weight"] = state_dict["final_layer.adaLN_modulation.1.weight"] state_dict["proj_out_1.bias"] = state_dict["final_layer.adaLN_modulation.1.bias"] state_dict["proj_out_2.weight"] = state_dict["final_layer.linear.weight"] state_dict["proj_out_2.bias"] = state_dict["final_layer.linear.bias"] state_dict.pop("final_layer.linear.weight") state_dict.pop("final_layer.linear.bias") state_dict.pop("final_layer.adaLN_modulation.1.weight") state_dict.pop("final_layer.adaLN_modulation.1.bias") # DiT XL/2 transformer = Transformer2DModel( sample_size=args.image_size // 8, num_layers=28, attention_head_dim=72, in_channels=4, out_channels=8, patch_size=2, attention_bias=True, num_attention_heads=16, activation_fn="gelu-approximate", num_embeds_ada_norm=1000, norm_type="ada_norm_zero", norm_elementwise_affine=False, ) transformer.load_state_dict(state_dict, strict=True) scheduler = DDIMScheduler( num_train_timesteps=1000, beta_schedule="linear", prediction_type="epsilon", clip_sample=False, ) vae = AutoencoderKL.from_pretrained(args.vae_model) pipeline = DiTPipeline(transformer=transformer, vae=vae, scheduler=scheduler) if args.save: pipeline.save_pretrained(args.checkpoint_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--image_size", default=256, type=int, required=False, help="Image size of pretrained model, either 256 or 512.", ) parser.add_argument( "--vae_model", default="stabilityai/sd-vae-ft-ema", type=str, required=False, help="Path to pretrained VAE model, either stabilityai/sd-vae-ft-mse or stabilityai/sd-vae-ft-ema.", ) parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted pipeline or not." ) parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the output pipeline." ) args = parser.parse_args() main(args)
diffusers/scripts/convert_dit_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_dit_to_diffusers.py", "repo_id": "diffusers", "token_count": 3037 }
154
import argparse import os import torch from safetensors.torch import load_file from transformers import AutoModel, AutoTokenizer from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, LuminaNextDiT2DModel, LuminaPipeline def main(args): # checkpoint from https://huggingface.co/Alpha-VLLM/Lumina-Next-SFT or https://huggingface.co/Alpha-VLLM/Lumina-Next-T2I all_sd = load_file(args.origin_ckpt_path, device="cpu") converted_state_dict = {} # pad token converted_state_dict["pad_token"] = all_sd["pad_token"] # patch embed converted_state_dict["patch_embedder.weight"] = all_sd["x_embedder.weight"] converted_state_dict["patch_embedder.bias"] = all_sd["x_embedder.bias"] # time and caption embed converted_state_dict["time_caption_embed.timestep_embedder.linear_1.weight"] = all_sd["t_embedder.mlp.0.weight"] converted_state_dict["time_caption_embed.timestep_embedder.linear_1.bias"] = all_sd["t_embedder.mlp.0.bias"] converted_state_dict["time_caption_embed.timestep_embedder.linear_2.weight"] = all_sd["t_embedder.mlp.2.weight"] converted_state_dict["time_caption_embed.timestep_embedder.linear_2.bias"] = all_sd["t_embedder.mlp.2.bias"] converted_state_dict["time_caption_embed.caption_embedder.0.weight"] = all_sd["cap_embedder.0.weight"] converted_state_dict["time_caption_embed.caption_embedder.0.bias"] = all_sd["cap_embedder.0.bias"] converted_state_dict["time_caption_embed.caption_embedder.1.weight"] = all_sd["cap_embedder.1.weight"] converted_state_dict["time_caption_embed.caption_embedder.1.bias"] = all_sd["cap_embedder.1.bias"] for i in range(24): # adaln converted_state_dict[f"layers.{i}.gate"] = all_sd[f"layers.{i}.attention.gate"] converted_state_dict[f"layers.{i}.adaLN_modulation.1.weight"] = all_sd[f"layers.{i}.adaLN_modulation.1.weight"] converted_state_dict[f"layers.{i}.adaLN_modulation.1.bias"] = all_sd[f"layers.{i}.adaLN_modulation.1.bias"] # qkv converted_state_dict[f"layers.{i}.attn1.to_q.weight"] = all_sd[f"layers.{i}.attention.wq.weight"] converted_state_dict[f"layers.{i}.attn1.to_k.weight"] = all_sd[f"layers.{i}.attention.wk.weight"] converted_state_dict[f"layers.{i}.attn1.to_v.weight"] = all_sd[f"layers.{i}.attention.wv.weight"] # cap converted_state_dict[f"layers.{i}.attn2.to_q.weight"] = all_sd[f"layers.{i}.attention.wq.weight"] converted_state_dict[f"layers.{i}.attn2.to_k.weight"] = all_sd[f"layers.{i}.attention.wk_y.weight"] converted_state_dict[f"layers.{i}.attn2.to_v.weight"] = all_sd[f"layers.{i}.attention.wv_y.weight"] # output converted_state_dict[f"layers.{i}.attn2.to_out.0.weight"] = all_sd[f"layers.{i}.attention.wo.weight"] # attention # qk norm converted_state_dict[f"layers.{i}.attn1.norm_q.weight"] = all_sd[f"layers.{i}.attention.q_norm.weight"] converted_state_dict[f"layers.{i}.attn1.norm_q.bias"] = all_sd[f"layers.{i}.attention.q_norm.bias"] converted_state_dict[f"layers.{i}.attn1.norm_k.weight"] = all_sd[f"layers.{i}.attention.k_norm.weight"] converted_state_dict[f"layers.{i}.attn1.norm_k.bias"] = all_sd[f"layers.{i}.attention.k_norm.bias"] converted_state_dict[f"layers.{i}.attn2.norm_q.weight"] = all_sd[f"layers.{i}.attention.q_norm.weight"] converted_state_dict[f"layers.{i}.attn2.norm_q.bias"] = all_sd[f"layers.{i}.attention.q_norm.bias"] converted_state_dict[f"layers.{i}.attn2.norm_k.weight"] = all_sd[f"layers.{i}.attention.ky_norm.weight"] converted_state_dict[f"layers.{i}.attn2.norm_k.bias"] = all_sd[f"layers.{i}.attention.ky_norm.bias"] # attention norm converted_state_dict[f"layers.{i}.attn_norm1.weight"] = all_sd[f"layers.{i}.attention_norm1.weight"] converted_state_dict[f"layers.{i}.attn_norm2.weight"] = all_sd[f"layers.{i}.attention_norm2.weight"] converted_state_dict[f"layers.{i}.norm1_context.weight"] = all_sd[f"layers.{i}.attention_y_norm.weight"] # feed forward converted_state_dict[f"layers.{i}.feed_forward.linear_1.weight"] = all_sd[f"layers.{i}.feed_forward.w1.weight"] converted_state_dict[f"layers.{i}.feed_forward.linear_2.weight"] = all_sd[f"layers.{i}.feed_forward.w2.weight"] converted_state_dict[f"layers.{i}.feed_forward.linear_3.weight"] = all_sd[f"layers.{i}.feed_forward.w3.weight"] # feed forward norm converted_state_dict[f"layers.{i}.ffn_norm1.weight"] = all_sd[f"layers.{i}.ffn_norm1.weight"] converted_state_dict[f"layers.{i}.ffn_norm2.weight"] = all_sd[f"layers.{i}.ffn_norm2.weight"] # final layer converted_state_dict["final_layer.linear.weight"] = all_sd["final_layer.linear.weight"] converted_state_dict["final_layer.linear.bias"] = all_sd["final_layer.linear.bias"] converted_state_dict["final_layer.adaLN_modulation.1.weight"] = all_sd["final_layer.adaLN_modulation.1.weight"] converted_state_dict["final_layer.adaLN_modulation.1.bias"] = all_sd["final_layer.adaLN_modulation.1.bias"] # Lumina-Next-SFT 2B transformer = LuminaNextDiT2DModel( sample_size=128, patch_size=2, in_channels=4, hidden_size=2304, num_layers=24, num_attention_heads=32, num_kv_heads=8, multiple_of=256, ffn_dim_multiplier=None, norm_eps=1e-5, learn_sigma=True, qk_norm=True, cross_attention_dim=2048, scaling_factor=1.0, ) transformer.load_state_dict(converted_state_dict, strict=True) num_model_params = sum(p.numel() for p in transformer.parameters()) print(f"Total number of transformer parameters: {num_model_params}") if args.only_transformer: transformer.save_pretrained(os.path.join(args.dump_path, "transformer")) else: scheduler = FlowMatchEulerDiscreteScheduler() vae = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", torch_dtype=torch.float32) tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b") text_encoder = AutoModel.from_pretrained("google/gemma-2b") pipeline = LuminaPipeline( tokenizer=tokenizer, text_encoder=text_encoder, transformer=transformer, vae=vae, scheduler=scheduler ) pipeline.save_pretrained(args.dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--origin_ckpt_path", default=None, type=str, required=False, help="Path to the checkpoint to convert." ) parser.add_argument( "--image_size", default=1024, type=int, choices=[256, 512, 1024], required=False, help="Image size of pretrained model, either 512 or 1024.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output pipeline.") parser.add_argument("--only_transformer", default=True, type=bool, required=True) args = parser.parse_args() main(args)
diffusers/scripts/convert_lumina_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_lumina_to_diffusers.py", "repo_id": "diffusers", "token_count": 3148 }
155
#!/usr/bin/env python from __future__ import annotations import argparse import os from contextlib import nullcontext import torch from accelerate import init_empty_weights from huggingface_hub import hf_hub_download, snapshot_download from termcolor import colored from transformers import AutoModelForCausalLM, AutoTokenizer from diffusers import ( AutoencoderDC, DPMSolverMultistepScheduler, FlowMatchEulerDiscreteScheduler, SanaPipeline, SanaSprintPipeline, SanaTransformer2DModel, SCMScheduler, ) from diffusers.models.modeling_utils import load_model_dict_into_meta from diffusers.utils.import_utils import is_accelerate_available CTX = init_empty_weights if is_accelerate_available else nullcontext ckpt_ids = [ "Efficient-Large-Model/Sana_Sprint_0.6B_1024px/checkpoints/Sana_Sprint_0.6B_1024px.pth" "Efficient-Large-Model/Sana_Sprint_1.6B_1024px/checkpoints/Sana_Sprint_1.6B_1024px.pth" "Efficient-Large-Model/SANA1.5_4.8B_1024px/checkpoints/SANA1.5_4.8B_1024px.pth", "Efficient-Large-Model/SANA1.5_1.6B_1024px/checkpoints/SANA1.5_1.6B_1024px.pth", "Efficient-Large-Model/Sana_1600M_4Kpx_BF16/checkpoints/Sana_1600M_4Kpx_BF16.pth", "Efficient-Large-Model/Sana_1600M_2Kpx_BF16/checkpoints/Sana_1600M_2Kpx_BF16.pth", "Efficient-Large-Model/Sana_1600M_1024px_MultiLing/checkpoints/Sana_1600M_1024px_MultiLing.pth", "Efficient-Large-Model/Sana_1600M_1024px_BF16/checkpoints/Sana_1600M_1024px_BF16.pth", "Efficient-Large-Model/Sana_1600M_512px_MultiLing/checkpoints/Sana_1600M_512px_MultiLing.pth", "Efficient-Large-Model/Sana_1600M_1024px/checkpoints/Sana_1600M_1024px.pth", "Efficient-Large-Model/Sana_1600M_512px/checkpoints/Sana_1600M_512px.pth", "Efficient-Large-Model/Sana_600M_1024px/checkpoints/Sana_600M_1024px_MultiLing.pth", "Efficient-Large-Model/Sana_600M_512px/checkpoints/Sana_600M_512px_MultiLing.pth", ] # https://github.com/NVlabs/Sana/blob/main/scripts/inference.py def main(args): cache_dir_path = os.path.expanduser("~/.cache/huggingface/hub") if args.orig_ckpt_path is None or args.orig_ckpt_path in ckpt_ids: ckpt_id = args.orig_ckpt_path or ckpt_ids[0] snapshot_download( repo_id=f"{'/'.join(ckpt_id.split('/')[:2])}", cache_dir=cache_dir_path, repo_type="model", ) file_path = hf_hub_download( repo_id=f"{'/'.join(ckpt_id.split('/')[:2])}", filename=f"{'/'.join(ckpt_id.split('/')[2:])}", cache_dir=cache_dir_path, repo_type="model", ) else: file_path = args.orig_ckpt_path print(colored(f"Loading checkpoint from {file_path}", "green", attrs=["bold"])) all_state_dict = torch.load(file_path, weights_only=True) state_dict = all_state_dict.pop("state_dict") converted_state_dict = {} # Patch embeddings. converted_state_dict["patch_embed.proj.weight"] = state_dict.pop("x_embedder.proj.weight") converted_state_dict["patch_embed.proj.bias"] = state_dict.pop("x_embedder.proj.bias") # Caption projection. converted_state_dict["caption_projection.linear_1.weight"] = state_dict.pop("y_embedder.y_proj.fc1.weight") converted_state_dict["caption_projection.linear_1.bias"] = state_dict.pop("y_embedder.y_proj.fc1.bias") converted_state_dict["caption_projection.linear_2.weight"] = state_dict.pop("y_embedder.y_proj.fc2.weight") converted_state_dict["caption_projection.linear_2.bias"] = state_dict.pop("y_embedder.y_proj.fc2.bias") # Handle different time embedding structure based on model type if args.model_type in ["SanaSprint_1600M_P1_D20", "SanaSprint_600M_P1_D28"]: # For Sana Sprint, the time embedding structure is different converted_state_dict["time_embed.timestep_embedder.linear_1.weight"] = state_dict.pop( "t_embedder.mlp.0.weight" ) converted_state_dict["time_embed.timestep_embedder.linear_1.bias"] = state_dict.pop("t_embedder.mlp.0.bias") converted_state_dict["time_embed.timestep_embedder.linear_2.weight"] = state_dict.pop( "t_embedder.mlp.2.weight" ) converted_state_dict["time_embed.timestep_embedder.linear_2.bias"] = state_dict.pop("t_embedder.mlp.2.bias") # Guidance embedder for Sana Sprint converted_state_dict["time_embed.guidance_embedder.linear_1.weight"] = state_dict.pop( "cfg_embedder.mlp.0.weight" ) converted_state_dict["time_embed.guidance_embedder.linear_1.bias"] = state_dict.pop("cfg_embedder.mlp.0.bias") converted_state_dict["time_embed.guidance_embedder.linear_2.weight"] = state_dict.pop( "cfg_embedder.mlp.2.weight" ) converted_state_dict["time_embed.guidance_embedder.linear_2.bias"] = state_dict.pop("cfg_embedder.mlp.2.bias") else: # Original Sana time embedding structure converted_state_dict["time_embed.emb.timestep_embedder.linear_1.weight"] = state_dict.pop( "t_embedder.mlp.0.weight" ) converted_state_dict["time_embed.emb.timestep_embedder.linear_1.bias"] = state_dict.pop( "t_embedder.mlp.0.bias" ) converted_state_dict["time_embed.emb.timestep_embedder.linear_2.weight"] = state_dict.pop( "t_embedder.mlp.2.weight" ) converted_state_dict["time_embed.emb.timestep_embedder.linear_2.bias"] = state_dict.pop( "t_embedder.mlp.2.bias" ) # Shared norm. converted_state_dict["time_embed.linear.weight"] = state_dict.pop("t_block.1.weight") converted_state_dict["time_embed.linear.bias"] = state_dict.pop("t_block.1.bias") # y norm converted_state_dict["caption_norm.weight"] = state_dict.pop("attention_y_norm.weight") # scheduler if args.image_size == 4096: flow_shift = 6.0 else: flow_shift = 3.0 # model config if args.model_type in ["SanaMS_1600M_P1_D20", "SanaSprint_1600M_P1_D20", "SanaMS1.5_1600M_P1_D20"]: layer_num = 20 elif args.model_type in ["SanaMS_600M_P1_D28", "SanaSprint_600M_P1_D28"]: layer_num = 28 elif args.model_type == "SanaMS_4800M_P1_D60": layer_num = 60 else: raise ValueError(f"{args.model_type} is not supported.") # Positional embedding interpolation scale. interpolation_scale = {512: None, 1024: None, 2048: 1.0, 4096: 2.0} qk_norm = ( "rms_norm_across_heads" if args.model_type in ["SanaMS1.5_1600M_P1_D20", "SanaMS1.5_4800M_P1_D60", "SanaSprint_600M_P1_D28", "SanaSprint_1600M_P1_D20"] else None ) for depth in range(layer_num): # Transformer blocks. converted_state_dict[f"transformer_blocks.{depth}.scale_shift_table"] = state_dict.pop( f"blocks.{depth}.scale_shift_table" ) # Linear Attention is all you need 🤘 # Self attention. q, k, v = torch.chunk(state_dict.pop(f"blocks.{depth}.attn.qkv.weight"), 3, dim=0) converted_state_dict[f"transformer_blocks.{depth}.attn1.to_q.weight"] = q converted_state_dict[f"transformer_blocks.{depth}.attn1.to_k.weight"] = k converted_state_dict[f"transformer_blocks.{depth}.attn1.to_v.weight"] = v if qk_norm is not None: # Add Q/K normalization for self-attention (attn1) - needed for Sana-Sprint and Sana-1.5 converted_state_dict[f"transformer_blocks.{depth}.attn1.norm_q.weight"] = state_dict.pop( f"blocks.{depth}.attn.q_norm.weight" ) converted_state_dict[f"transformer_blocks.{depth}.attn1.norm_k.weight"] = state_dict.pop( f"blocks.{depth}.attn.k_norm.weight" ) # Projection. converted_state_dict[f"transformer_blocks.{depth}.attn1.to_out.0.weight"] = state_dict.pop( f"blocks.{depth}.attn.proj.weight" ) converted_state_dict[f"transformer_blocks.{depth}.attn1.to_out.0.bias"] = state_dict.pop( f"blocks.{depth}.attn.proj.bias" ) # Feed-forward. converted_state_dict[f"transformer_blocks.{depth}.ff.conv_inverted.weight"] = state_dict.pop( f"blocks.{depth}.mlp.inverted_conv.conv.weight" ) converted_state_dict[f"transformer_blocks.{depth}.ff.conv_inverted.bias"] = state_dict.pop( f"blocks.{depth}.mlp.inverted_conv.conv.bias" ) converted_state_dict[f"transformer_blocks.{depth}.ff.conv_depth.weight"] = state_dict.pop( f"blocks.{depth}.mlp.depth_conv.conv.weight" ) converted_state_dict[f"transformer_blocks.{depth}.ff.conv_depth.bias"] = state_dict.pop( f"blocks.{depth}.mlp.depth_conv.conv.bias" ) converted_state_dict[f"transformer_blocks.{depth}.ff.conv_point.weight"] = state_dict.pop( f"blocks.{depth}.mlp.point_conv.conv.weight" ) # Cross-attention. q = state_dict.pop(f"blocks.{depth}.cross_attn.q_linear.weight") q_bias = state_dict.pop(f"blocks.{depth}.cross_attn.q_linear.bias") k, v = torch.chunk(state_dict.pop(f"blocks.{depth}.cross_attn.kv_linear.weight"), 2, dim=0) k_bias, v_bias = torch.chunk(state_dict.pop(f"blocks.{depth}.cross_attn.kv_linear.bias"), 2, dim=0) converted_state_dict[f"transformer_blocks.{depth}.attn2.to_q.weight"] = q converted_state_dict[f"transformer_blocks.{depth}.attn2.to_q.bias"] = q_bias converted_state_dict[f"transformer_blocks.{depth}.attn2.to_k.weight"] = k converted_state_dict[f"transformer_blocks.{depth}.attn2.to_k.bias"] = k_bias converted_state_dict[f"transformer_blocks.{depth}.attn2.to_v.weight"] = v converted_state_dict[f"transformer_blocks.{depth}.attn2.to_v.bias"] = v_bias if qk_norm is not None: # Add Q/K normalization for cross-attention (attn2) - needed for Sana-Sprint and Sana-1.5 converted_state_dict[f"transformer_blocks.{depth}.attn2.norm_q.weight"] = state_dict.pop( f"blocks.{depth}.cross_attn.q_norm.weight" ) converted_state_dict[f"transformer_blocks.{depth}.attn2.norm_k.weight"] = state_dict.pop( f"blocks.{depth}.cross_attn.k_norm.weight" ) converted_state_dict[f"transformer_blocks.{depth}.attn2.to_out.0.weight"] = state_dict.pop( f"blocks.{depth}.cross_attn.proj.weight" ) converted_state_dict[f"transformer_blocks.{depth}.attn2.to_out.0.bias"] = state_dict.pop( f"blocks.{depth}.cross_attn.proj.bias" ) # Final block. converted_state_dict["proj_out.weight"] = state_dict.pop("final_layer.linear.weight") converted_state_dict["proj_out.bias"] = state_dict.pop("final_layer.linear.bias") converted_state_dict["scale_shift_table"] = state_dict.pop("final_layer.scale_shift_table") # Transformer with CTX(): transformer_kwargs = { "in_channels": 32, "out_channels": 32, "num_attention_heads": model_kwargs[args.model_type]["num_attention_heads"], "attention_head_dim": model_kwargs[args.model_type]["attention_head_dim"], "num_layers": model_kwargs[args.model_type]["num_layers"], "num_cross_attention_heads": model_kwargs[args.model_type]["num_cross_attention_heads"], "cross_attention_head_dim": model_kwargs[args.model_type]["cross_attention_head_dim"], "cross_attention_dim": model_kwargs[args.model_type]["cross_attention_dim"], "caption_channels": 2304, "mlp_ratio": 2.5, "attention_bias": False, "sample_size": args.image_size // 32, "patch_size": 1, "norm_elementwise_affine": False, "norm_eps": 1e-6, "interpolation_scale": interpolation_scale[args.image_size], } # Add qk_norm parameter for Sana Sprint if args.model_type in [ "SanaMS1.5_1600M_P1_D20", "SanaMS1.5_4800M_P1_D60", "SanaSprint_600M_P1_D28", "SanaSprint_1600M_P1_D20", ]: transformer_kwargs["qk_norm"] = "rms_norm_across_heads" if args.model_type in ["SanaSprint_1600M_P1_D20", "SanaSprint_600M_P1_D28"]: transformer_kwargs["guidance_embeds"] = True transformer = SanaTransformer2DModel(**transformer_kwargs) if is_accelerate_available(): load_model_dict_into_meta(transformer, converted_state_dict) else: transformer.load_state_dict(converted_state_dict, strict=True, assign=True) try: state_dict.pop("y_embedder.y_embedding") state_dict.pop("pos_embed") state_dict.pop("logvar_linear.weight") state_dict.pop("logvar_linear.bias") except KeyError: print("y_embedder.y_embedding or pos_embed not found in the state_dict") assert len(state_dict) == 0, f"State dict is not empty, {state_dict.keys()}" num_model_params = sum(p.numel() for p in transformer.parameters()) print(f"Total number of transformer parameters: {num_model_params}") transformer = transformer.to(weight_dtype) if not args.save_full_pipeline: print( colored( f"Only saving transformer model of {args.model_type}. " f"Set --save_full_pipeline to save the whole Pipeline", "green", attrs=["bold"], ) ) transformer.save_pretrained( os.path.join(args.dump_path, "transformer"), safe_serialization=True, max_shard_size="5GB" ) else: print(colored(f"Saving the whole Pipeline containing {args.model_type}", "green", attrs=["bold"])) # VAE ae = AutoencoderDC.from_pretrained("mit-han-lab/dc-ae-f32c32-sana-1.1-diffusers", torch_dtype=torch.float32) # Text Encoder text_encoder_model_path = "Efficient-Large-Model/gemma-2-2b-it" tokenizer = AutoTokenizer.from_pretrained(text_encoder_model_path) tokenizer.padding_side = "right" text_encoder = AutoModelForCausalLM.from_pretrained( text_encoder_model_path, torch_dtype=torch.bfloat16 ).get_decoder() # Choose the appropriate pipeline and scheduler based on model type if args.model_type in ["SanaSprint_1600M_P1_D20", "SanaSprint_600M_P1_D28"]: # Force SCM Scheduler for Sana Sprint regardless of scheduler_type if args.scheduler_type != "scm": print( colored( f"Warning: Overriding scheduler_type '{args.scheduler_type}' to 'scm' for SanaSprint model", "yellow", attrs=["bold"], ) ) # SCM Scheduler for Sana Sprint scheduler_config = { "prediction_type": "trigflow", "sigma_data": 0.5, } scheduler = SCMScheduler(**scheduler_config) pipe = SanaSprintPipeline( tokenizer=tokenizer, text_encoder=text_encoder, transformer=transformer, vae=ae, scheduler=scheduler, ) else: # Original Sana scheduler if args.scheduler_type == "flow-dpm_solver": scheduler = DPMSolverMultistepScheduler( flow_shift=flow_shift, use_flow_sigmas=True, prediction_type="flow_prediction", ) elif args.scheduler_type == "flow-euler": scheduler = FlowMatchEulerDiscreteScheduler(shift=flow_shift) else: raise ValueError(f"Scheduler type {args.scheduler_type} is not supported") pipe = SanaPipeline( tokenizer=tokenizer, text_encoder=text_encoder, transformer=transformer, vae=ae, scheduler=scheduler, ) pipe.save_pretrained(args.dump_path, safe_serialization=True, max_shard_size="5GB") DTYPE_MAPPING = { "fp32": torch.float32, "fp16": torch.float16, "bf16": torch.bfloat16, } if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--orig_ckpt_path", default=None, type=str, required=False, help="Path to the checkpoint to convert." ) parser.add_argument( "--image_size", default=1024, type=int, choices=[512, 1024, 2048, 4096], required=False, help="Image size of pretrained model, 512, 1024, 2048 or 4096.", ) parser.add_argument( "--model_type", default="SanaMS_1600M_P1_D20", type=str, choices=[ "SanaMS_1600M_P1_D20", "SanaMS_600M_P1_D28", "SanaMS1.5_1600M_P1_D20", "SanaMS1.5_4800M_P1_D60", "SanaSprint_1600M_P1_D20", "SanaSprint_600M_P1_D28", ], ) parser.add_argument( "--scheduler_type", default="flow-dpm_solver", type=str, choices=["flow-dpm_solver", "flow-euler", "scm"], help="Scheduler type to use. Use 'scm' for Sana Sprint models.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output pipeline.") parser.add_argument("--save_full_pipeline", action="store_true", help="save all the pipeline elements in one.") parser.add_argument("--dtype", default="fp32", type=str, choices=["fp32", "fp16", "bf16"], help="Weight dtype.") args = parser.parse_args() model_kwargs = { "SanaMS_1600M_P1_D20": { "num_attention_heads": 70, "attention_head_dim": 32, "num_cross_attention_heads": 20, "cross_attention_head_dim": 112, "cross_attention_dim": 2240, "num_layers": 20, }, "SanaMS_600M_P1_D28": { "num_attention_heads": 36, "attention_head_dim": 32, "num_cross_attention_heads": 16, "cross_attention_head_dim": 72, "cross_attention_dim": 1152, "num_layers": 28, }, "SanaMS1.5_1600M_P1_D20": { "num_attention_heads": 70, "attention_head_dim": 32, "num_cross_attention_heads": 20, "cross_attention_head_dim": 112, "cross_attention_dim": 2240, "num_layers": 20, }, "SanaMS1.5_4800M_P1_D60": { "num_attention_heads": 70, "attention_head_dim": 32, "num_cross_attention_heads": 20, "cross_attention_head_dim": 112, "cross_attention_dim": 2240, "num_layers": 60, }, "SanaSprint_600M_P1_D28": { "num_attention_heads": 36, "attention_head_dim": 32, "num_cross_attention_heads": 16, "cross_attention_head_dim": 72, "cross_attention_dim": 1152, "num_layers": 28, }, "SanaSprint_1600M_P1_D20": { "num_attention_heads": 70, "attention_head_dim": 32, "num_cross_attention_heads": 20, "cross_attention_head_dim": 112, "cross_attention_dim": 2240, "num_layers": 20, }, } device = "cuda" if torch.cuda.is_available() else "cpu" weight_dtype = DTYPE_MAPPING[args.dtype] main(args)
diffusers/scripts/convert_sana_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_sana_to_diffusers.py", "repo_id": "diffusers", "token_count": 9521 }
156
import argparse import io import requests import torch import yaml from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) from diffusers.utils.constants import DIFFUSERS_REQUEST_TIMEOUT def custom_convert_ldm_vae_checkpoint(checkpoint, config): vae_state_dict = checkpoint new_checkpoint = {} new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"] new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"] new_checkpoint["encoder.conv_out.weight"] = vae_state_dict["encoder.conv_out.weight"] new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"] new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict["encoder.norm_out.weight"] new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict["encoder.norm_out.bias"] new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"] new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"] new_checkpoint["decoder.conv_out.weight"] = vae_state_dict["decoder.conv_out.weight"] new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"] new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict["decoder.norm_out.weight"] new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict["decoder.norm_out.bias"] new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"] new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"] new_checkpoint["post_quant_conv.weight"] = vae_state_dict["post_quant_conv.weight"] new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "encoder.down" in layer}) down_blocks = { layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks) } # Retrieves the keys for the decoder up blocks only num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "decoder.up" in layer}) up_blocks = { layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks) } for i in range(num_down_blocks): resnets = [ key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key and "attn" not in key ] attentions = [key for key in down_blocks[i] if f"down.{i}.attn" in key] if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.pop( f"encoder.down.{i}.downsample.conv.weight" ) new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.pop( f"encoder.down.{i}.downsample.conv.bias" ) paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) paths = renew_vae_attention_paths(attentions) meta_path = {"old": f"down.{i}.attn", "new": f"down_blocks.{i}.attentions"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key] paths = renew_vae_attention_paths(mid_attentions) meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) conv_attn_to_linear(new_checkpoint) for i in range(num_up_blocks): block_id = num_up_blocks - 1 - i resnets = [ key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key and "attn" not in key ] attentions = [key for key in up_blocks[block_id] if f"up.{block_id}.attn" in key] if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.weight" ] new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.bias" ] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) paths = renew_vae_attention_paths(attentions) meta_path = {"old": f"up.{block_id}.attn", "new": f"up_blocks.{i}.attentions"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key] paths = renew_vae_attention_paths(mid_attentions) meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) conv_attn_to_linear(new_checkpoint) return new_checkpoint def vae_pt_to_vae_diffuser( checkpoint_path: str, output_path: str, ): # Only support V1 r = requests.get( " https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml", timeout=DIFFUSERS_REQUEST_TIMEOUT, ) io_obj = io.BytesIO(r.content) original_config = yaml.safe_load(io_obj) image_size = 512 device = "cuda" if torch.cuda.is_available() else "cpu" if checkpoint_path.endswith("safetensors"): from safetensors import safe_open checkpoint = {} with safe_open(checkpoint_path, framework="pt", device="cpu") as f: for key in f.keys(): checkpoint[key] = f.get_tensor(key) else: checkpoint = torch.load(checkpoint_path, map_location=device)["state_dict"] # Convert the VAE model. vae_config = create_vae_diffusers_config(original_config, image_size=image_size) converted_vae_checkpoint = custom_convert_ldm_vae_checkpoint(checkpoint, vae_config) vae = AutoencoderKL(**vae_config) vae.load_state_dict(converted_vae_checkpoint) vae.save_pretrained(output_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.") parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.") args = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
diffusers/scripts/convert_vae_pt_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_vae_pt_to_diffusers.py", "repo_id": "diffusers", "token_count": 3572 }
157
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ConfigMixin base class and utilities.""" import dataclasses import functools import importlib import inspect import json import os import re from collections import OrderedDict from pathlib import Path from typing import Any, Dict, Optional, Tuple, Union import numpy as np from huggingface_hub import DDUFEntry, create_repo, hf_hub_download from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, validate_hf_hub_args, ) from requests import HTTPError from typing_extensions import Self from . import __version__ from .utils import ( HUGGINGFACE_CO_RESOLVE_ENDPOINT, DummyObject, deprecate, extract_commit_hash, http_user_agent, logging, ) logger = logging.get_logger(__name__) _re_configuration_file = re.compile(r"config\.(.*)\.json") class FrozenDict(OrderedDict): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) for key, value in self.items(): setattr(self, key, value) self.__frozen = True def __delitem__(self, *args, **kwargs): raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.") def setdefault(self, *args, **kwargs): raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.") def pop(self, *args, **kwargs): raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.") def update(self, *args, **kwargs): raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.") def __setattr__(self, name, value): if hasattr(self, "__frozen") and self.__frozen: raise Exception(f"You cannot use ``__setattr__`` on a {self.__class__.__name__} instance.") super().__setattr__(name, value) def __setitem__(self, name, value): if hasattr(self, "__frozen") and self.__frozen: raise Exception(f"You cannot use ``__setattr__`` on a {self.__class__.__name__} instance.") super().__setitem__(name, value) class ConfigMixin: r""" Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also provides the [`~ConfigMixin.from_config`] and [`~ConfigMixin.save_config`] methods for loading, downloading, and saving classes that inherit from [`ConfigMixin`]. Class attributes: - **config_name** (`str`) -- A filename under which the config should stored when calling [`~ConfigMixin.save_config`] (should be overridden by parent class). - **ignore_for_config** (`List[str]`) -- A list of attributes that should not be saved in the config (should be overridden by subclass). - **has_compatibles** (`bool`) -- Whether the class has compatible classes (should be overridden by subclass). - **_deprecated_kwargs** (`List[str]`) -- Keyword arguments that are deprecated. Note that the `init` function should only have a `kwargs` argument if at least one argument is deprecated (should be overridden by subclass). """ config_name = None ignore_for_config = [] has_compatibles = False _deprecated_kwargs = [] def register_to_config(self, **kwargs): if self.config_name is None: raise NotImplementedError(f"Make sure that {self.__class__} has defined a class name `config_name`") # Special case for `kwargs` used in deprecation warning added to schedulers # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument, # or solve in a more general way. kwargs.pop("kwargs", None) if not hasattr(self, "_internal_dict"): internal_dict = kwargs else: previous_dict = dict(self._internal_dict) internal_dict = {**self._internal_dict, **kwargs} logger.debug(f"Updating config from {previous_dict} to {internal_dict}") self._internal_dict = FrozenDict(internal_dict) def __getattr__(self, name: str) -> Any: """The only reason we overwrite `getattr` here is to gracefully deprecate accessing config attributes directly. See https://github.com/huggingface/diffusers/pull/3129 This function is mostly copied from PyTorch's __getattr__ overwrite: https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module """ is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) is_attribute = name in self.__dict__ if is_in_config and not is_attribute: deprecation_message = f"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'scheduler.config.{name}'." deprecate("direct config name access", "1.0.0", deprecation_message, standard_warn=False) return self._internal_dict[name] raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'") def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs): """ Save a configuration object to the directory specified in `save_directory` so that it can be reloaded using the [`~ConfigMixin.from_config`] class method. Args: save_directory (`str` or `os.PathLike`): Directory where the configuration JSON file is saved (will be created if it does not exist). push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ if os.path.isfile(save_directory): raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file") os.makedirs(save_directory, exist_ok=True) # If we save using the predefined names, we can load using `from_config` output_config_file = os.path.join(save_directory, self.config_name) self.to_json_file(output_config_file) logger.info(f"Configuration saved in {output_config_file}") if push_to_hub: commit_message = kwargs.pop("commit_message", None) private = kwargs.pop("private", None) create_pr = kwargs.pop("create_pr", False) token = kwargs.pop("token", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id subfolder = kwargs.pop("subfolder", None) self._upload_folder( save_directory, repo_id, token=token, commit_message=commit_message, create_pr=create_pr, subfolder=subfolder, ) @classmethod def from_config( cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs ) -> Union[Self, Tuple[Self, Dict[str, Any]]]: r""" Instantiate a Python class from a config dictionary. Parameters: config (`Dict[str, Any]`): A config dictionary from which the Python class is instantiated. Make sure to only load configuration files of compatible classes. return_unused_kwargs (`bool`, *optional*, defaults to `False`): Whether kwargs that are not consumed by the Python class should be returned or not. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it is loaded) and initiate the Python class. `**kwargs` are passed directly to the underlying scheduler/model's `__init__` method and eventually overwrite the same named arguments in `config`. Returns: [`ModelMixin`] or [`SchedulerMixin`]: A model or scheduler object instantiated from a config dictionary. Examples: ```python >>> from diffusers import DDPMScheduler, DDIMScheduler, PNDMScheduler >>> # Download scheduler from huggingface.co and cache. >>> scheduler = DDPMScheduler.from_pretrained("google/ddpm-cifar10-32") >>> # Instantiate DDIM scheduler class with same config as DDPM >>> scheduler = DDIMScheduler.from_config(scheduler.config) >>> # Instantiate PNDM scheduler class with same config as DDPM >>> scheduler = PNDMScheduler.from_config(scheduler.config) ``` """ # <===== TO BE REMOVED WITH DEPRECATION # TODO(Patrick) - make sure to remove the following lines when config=="model_path" is deprecated if "pretrained_model_name_or_path" in kwargs: config = kwargs.pop("pretrained_model_name_or_path") if config is None: raise ValueError("Please make sure to provide a config as the first positional argument.") # ======> if not isinstance(config, dict): deprecation_message = "It is deprecated to pass a pretrained model name or path to `from_config`." if "Scheduler" in cls.__name__: deprecation_message += ( f"If you were trying to load a scheduler, please use {cls}.from_pretrained(...) instead." " Otherwise, please make sure to pass a configuration dictionary instead. This functionality will" " be removed in v1.0.0." ) elif "Model" in cls.__name__: deprecation_message += ( f"If you were trying to load a model, please use {cls}.load_config(...) followed by" f" {cls}.from_config(...) instead. Otherwise, please make sure to pass a configuration dictionary" " instead. This functionality will be removed in v1.0.0." ) deprecate("config-passed-as-path", "1.0.0", deprecation_message, standard_warn=False) config, kwargs = cls.load_config(pretrained_model_name_or_path=config, return_unused_kwargs=True, **kwargs) init_dict, unused_kwargs, hidden_dict = cls.extract_init_dict(config, **kwargs) # Allow dtype to be specified on initialization if "dtype" in unused_kwargs: init_dict["dtype"] = unused_kwargs.pop("dtype") # add possible deprecated kwargs for deprecated_kwarg in cls._deprecated_kwargs: if deprecated_kwarg in unused_kwargs: init_dict[deprecated_kwarg] = unused_kwargs.pop(deprecated_kwarg) # Return model and optionally state and/or unused_kwargs model = cls(**init_dict) # make sure to also save config parameters that might be used for compatible classes # update _class_name if "_class_name" in hidden_dict: hidden_dict["_class_name"] = cls.__name__ model.register_to_config(**hidden_dict) # add hidden kwargs of compatible classes to unused_kwargs unused_kwargs = {**unused_kwargs, **hidden_dict} if return_unused_kwargs: return (model, unused_kwargs) else: return model @classmethod def get_config_dict(cls, *args, **kwargs): deprecation_message = ( f" The function get_config_dict is deprecated. Please use {cls}.load_config instead. This function will be" " removed in version v1.0.0" ) deprecate("get_config_dict", "1.0.0", deprecation_message, standard_warn=False) return cls.load_config(*args, **kwargs) @classmethod @validate_hf_hub_args def load_config( cls, pretrained_model_name_or_path: Union[str, os.PathLike], return_unused_kwargs=False, return_commit_hash=False, **kwargs, ) -> Tuple[Dict[str, Any], Dict[str, Any]]: r""" Load a model or scheduler configuration. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing model weights saved with [`~ConfigMixin.save_config`]. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. subfolder (`str`, *optional*, defaults to `""`): The subfolder location of a model file within a larger model repository on the Hub or locally. return_unused_kwargs (`bool`, *optional*, defaults to `False): Whether unused keyword arguments of the config are returned. return_commit_hash (`bool`, *optional*, defaults to `False): Whether the `commit_hash` of the loaded configuration are returned. Returns: `dict`: A dictionary of all the parameters stored in a JSON configuration file. """ cache_dir = kwargs.pop("cache_dir", None) local_dir = kwargs.pop("local_dir", None) local_dir_use_symlinks = kwargs.pop("local_dir_use_symlinks", "auto") force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) token = kwargs.pop("token", None) local_files_only = kwargs.pop("local_files_only", False) revision = kwargs.pop("revision", None) _ = kwargs.pop("mirror", None) subfolder = kwargs.pop("subfolder", None) user_agent = kwargs.pop("user_agent", {}) dduf_entries: Optional[Dict[str, DDUFEntry]] = kwargs.pop("dduf_entries", None) user_agent = {**user_agent, "file_type": "config"} user_agent = http_user_agent(user_agent) pretrained_model_name_or_path = str(pretrained_model_name_or_path) if cls.config_name is None: raise ValueError( "`self.config_name` is not defined. Note that one should not load a config from " "`ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`" ) # Custom path for now if dduf_entries: if subfolder is not None: raise ValueError( "DDUF file only allow for 1 level of directory (e.g transformer/model1/model.safetentors is not allowed). " "Please check the DDUF structure" ) config_file = cls._get_config_file_from_dduf(pretrained_model_name_or_path, dduf_entries) elif os.path.isfile(pretrained_model_name_or_path): config_file = pretrained_model_name_or_path elif os.path.isdir(pretrained_model_name_or_path): if subfolder is not None and os.path.isfile( os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name) ): config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name) elif os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)): # Load from a PyTorch checkpoint config_file = os.path.join(pretrained_model_name_or_path, cls.config_name) else: raise EnvironmentError( f"Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}." ) else: try: # Load from URL or cache if already cached config_file = hf_hub_download( pretrained_model_name_or_path, filename=cls.config_name, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, user_agent=user_agent, subfolder=subfolder, revision=revision, local_dir=local_dir, local_dir_use_symlinks=local_dir_use_symlinks, ) except RepositoryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier" " listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a" " token having permission to this repo with `token` or log in with `hf auth login`." ) except RevisionNotFoundError: raise EnvironmentError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for" " this model name. Check the model page at" f" 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." ) except EntryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}." ) except HTTPError as err: raise EnvironmentError( "There was a specific connection error when trying to load" f" {pretrained_model_name_or_path}:\n{err}" ) except ValueError: raise EnvironmentError( f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" f" directory containing a {cls.config_name} file.\nCheckout your internet connection or see how to" " run the library in offline mode at" " 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from " "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " f"containing a {cls.config_name} file" ) try: config_dict = cls._dict_from_json_file(config_file, dduf_entries=dduf_entries) commit_hash = extract_commit_hash(config_file) except (json.JSONDecodeError, UnicodeDecodeError): raise EnvironmentError(f"It looks like the config file at '{config_file}' is not a valid JSON file.") if not (return_unused_kwargs or return_commit_hash): return config_dict outputs = (config_dict,) if return_unused_kwargs: outputs += (kwargs,) if return_commit_hash: outputs += (commit_hash,) return outputs @staticmethod def _get_init_keys(input_class): return set(dict(inspect.signature(input_class.__init__).parameters).keys()) @classmethod def extract_init_dict(cls, config_dict, **kwargs): # Skip keys that were not present in the original config, so default __init__ values were used used_defaults = config_dict.get("_use_default_values", []) config_dict = {k: v for k, v in config_dict.items() if k not in used_defaults and k != "_use_default_values"} # 0. Copy origin config dict original_dict = dict(config_dict.items()) # 1. Retrieve expected config attributes from __init__ signature expected_keys = cls._get_init_keys(cls) expected_keys.remove("self") # remove general kwargs if present in dict if "kwargs" in expected_keys: expected_keys.remove("kwargs") # remove flax internal keys if hasattr(cls, "_flax_internal_args"): for arg in cls._flax_internal_args: expected_keys.remove(arg) # 2. Remove attributes that cannot be expected from expected config attributes # remove keys to be ignored if len(cls.ignore_for_config) > 0: expected_keys = expected_keys - set(cls.ignore_for_config) # load diffusers library to import compatible and original scheduler diffusers_library = importlib.import_module(__name__.split(".")[0]) if cls.has_compatibles: compatible_classes = [c for c in cls._get_compatibles() if not isinstance(c, DummyObject)] else: compatible_classes = [] expected_keys_comp_cls = set() for c in compatible_classes: expected_keys_c = cls._get_init_keys(c) expected_keys_comp_cls = expected_keys_comp_cls.union(expected_keys_c) expected_keys_comp_cls = expected_keys_comp_cls - cls._get_init_keys(cls) config_dict = {k: v for k, v in config_dict.items() if k not in expected_keys_comp_cls} # remove attributes from orig class that cannot be expected orig_cls_name = config_dict.pop("_class_name", cls.__name__) if ( isinstance(orig_cls_name, str) and orig_cls_name != cls.__name__ and hasattr(diffusers_library, orig_cls_name) ): orig_cls = getattr(diffusers_library, orig_cls_name) unexpected_keys_from_orig = cls._get_init_keys(orig_cls) - expected_keys config_dict = {k: v for k, v in config_dict.items() if k not in unexpected_keys_from_orig} elif not isinstance(orig_cls_name, str) and not isinstance(orig_cls_name, (list, tuple)): raise ValueError( "Make sure that the `_class_name` is of type string or list of string (for custom pipelines)." ) # remove private attributes config_dict = {k: v for k, v in config_dict.items() if not k.startswith("_")} # remove quantization_config config_dict = {k: v for k, v in config_dict.items() if k != "quantization_config"} # 3. Create keyword arguments that will be passed to __init__ from expected keyword arguments init_dict = {} for key in expected_keys: # if config param is passed to kwarg and is present in config dict # it should overwrite existing config dict key if key in kwargs and key in config_dict: config_dict[key] = kwargs.pop(key) if key in kwargs: # overwrite key init_dict[key] = kwargs.pop(key) elif key in config_dict: # use value from config dict init_dict[key] = config_dict.pop(key) # 4. Give nice warning if unexpected values have been passed if len(config_dict) > 0: logger.warning( f"The config attributes {config_dict} were passed to {cls.__name__}, " "but are not expected and will be ignored. Please verify your " f"{cls.config_name} configuration file." ) # 5. Give nice info if config attributes are initialized to default because they have not been passed passed_keys = set(init_dict.keys()) if len(expected_keys - passed_keys) > 0: logger.info( f"{expected_keys - passed_keys} was not found in config. Values will be initialized to default values." ) # 6. Define unused keyword arguments unused_kwargs = {**config_dict, **kwargs} # 7. Define "hidden" config parameters that were saved for compatible classes hidden_config_dict = {k: v for k, v in original_dict.items() if k not in init_dict} return init_dict, unused_kwargs, hidden_config_dict @classmethod def _dict_from_json_file( cls, json_file: Union[str, os.PathLike], dduf_entries: Optional[Dict[str, DDUFEntry]] = None ): if dduf_entries: text = dduf_entries[json_file].read_text() else: with open(json_file, "r", encoding="utf-8") as reader: text = reader.read() return json.loads(text) def __repr__(self): return f"{self.__class__.__name__} {self.to_json_string()}" @property def config(self) -> Dict[str, Any]: """ Returns the config of the class as a frozen dictionary Returns: `Dict[str, Any]`: Config of the class. """ return self._internal_dict def to_json_string(self) -> str: """ Serializes the configuration instance to a JSON string. Returns: `str`: String containing all the attributes that make up the configuration instance in JSON format. """ config_dict = self._internal_dict if hasattr(self, "_internal_dict") else {} config_dict["_class_name"] = self.__class__.__name__ config_dict["_diffusers_version"] = __version__ def to_json_saveable(value): if isinstance(value, np.ndarray): value = value.tolist() elif isinstance(value, Path): value = value.as_posix() elif hasattr(value, "to_dict") and callable(value.to_dict): value = value.to_dict() elif isinstance(value, list): value = [to_json_saveable(v) for v in value] return value if "quantization_config" in config_dict: config_dict["quantization_config"] = ( config_dict.quantization_config.to_dict() if not isinstance(config_dict.quantization_config, dict) else config_dict.quantization_config ) config_dict = {k: to_json_saveable(v) for k, v in config_dict.items()} # Don't save "_ignore_files" or "_use_default_values" config_dict.pop("_ignore_files", None) config_dict.pop("_use_default_values", None) # pop the `_pre_quantization_dtype` as torch.dtypes are not serializable. _ = config_dict.pop("_pre_quantization_dtype", None) return json.dumps(config_dict, indent=2, sort_keys=True) + "\n" def to_json_file(self, json_file_path: Union[str, os.PathLike]): """ Save the configuration instance's parameters to a JSON file. Args: json_file_path (`str` or `os.PathLike`): Path to the JSON file to save a configuration instance's parameters. """ with open(json_file_path, "w", encoding="utf-8") as writer: writer.write(self.to_json_string()) @classmethod def _get_config_file_from_dduf(cls, pretrained_model_name_or_path: str, dduf_entries: Dict[str, DDUFEntry]): # paths inside a DDUF file must always be "/" config_file = ( cls.config_name if pretrained_model_name_or_path == "" else "/".join([pretrained_model_name_or_path, cls.config_name]) ) if config_file not in dduf_entries: raise ValueError( f"We did not manage to find the file {config_file} in the dduf file. We only have the following files {dduf_entries.keys()}" ) return config_file def register_to_config(init): r""" Decorator to apply on the init of classes inheriting from [`ConfigMixin`] so that all the arguments are automatically sent to `self.register_for_config`. To ignore a specific argument accepted by the init but that shouldn't be registered in the config, use the `ignore_for_config` class variable Warning: Once decorated, all private arguments (beginning with an underscore) are trashed and not sent to the init! """ @functools.wraps(init) def inner_init(self, *args, **kwargs): # Ignore private kwargs in the init. init_kwargs = {k: v for k, v in kwargs.items() if not k.startswith("_")} config_init_kwargs = {k: v for k, v in kwargs.items() if k.startswith("_")} if not isinstance(self, ConfigMixin): raise RuntimeError( f"`@register_for_config` was applied to {self.__class__.__name__} init method, but this class does " "not inherit from `ConfigMixin`." ) ignore = getattr(self, "ignore_for_config", []) # Get positional arguments aligned with kwargs new_kwargs = {} signature = inspect.signature(init) parameters = { name: p.default for i, (name, p) in enumerate(signature.parameters.items()) if i > 0 and name not in ignore } for arg, name in zip(args, parameters.keys()): new_kwargs[name] = arg # Then add all kwargs new_kwargs.update( { k: init_kwargs.get(k, default) for k, default in parameters.items() if k not in ignore and k not in new_kwargs } ) # Take note of the parameters that were not present in the loaded config if len(set(new_kwargs.keys()) - set(init_kwargs)) > 0: new_kwargs["_use_default_values"] = list(set(new_kwargs.keys()) - set(init_kwargs)) new_kwargs = {**config_init_kwargs, **new_kwargs} getattr(self, "register_to_config")(**new_kwargs) init(self, *args, **init_kwargs) return inner_init def flax_register_to_config(cls): original_init = cls.__init__ @functools.wraps(original_init) def init(self, *args, **kwargs): if not isinstance(self, ConfigMixin): raise RuntimeError( f"`@register_for_config` was applied to {self.__class__.__name__} init method, but this class does " "not inherit from `ConfigMixin`." ) # Ignore private kwargs in the init. Retrieve all passed attributes init_kwargs = dict(kwargs.items()) # Retrieve default values fields = dataclasses.fields(self) default_kwargs = {} for field in fields: # ignore flax specific attributes if field.name in self._flax_internal_args: continue if type(field.default) == dataclasses._MISSING_TYPE: default_kwargs[field.name] = None else: default_kwargs[field.name] = getattr(self, field.name) # Make sure init_kwargs override default kwargs new_kwargs = {**default_kwargs, **init_kwargs} # dtype should be part of `init_kwargs`, but not `new_kwargs` if "dtype" in new_kwargs: new_kwargs.pop("dtype") # Get positional arguments aligned with kwargs for i, arg in enumerate(args): name = fields[i].name new_kwargs[name] = arg # Take note of the parameters that were not present in the loaded config if len(set(new_kwargs.keys()) - set(init_kwargs)) > 0: new_kwargs["_use_default_values"] = list(set(new_kwargs.keys()) - set(init_kwargs)) getattr(self, "register_to_config")(**new_kwargs) original_init(self, *args, **kwargs) cls.__init__ = init return cls class LegacyConfigMixin(ConfigMixin): r""" A subclass of `ConfigMixin` to resolve class mapping from legacy classes (like `Transformer2DModel`) to more pipeline-specific classes (like `DiTTransformer2DModel`). """ @classmethod def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs): # To prevent dependency import problem. from .models.model_loading_utils import _fetch_remapped_cls_from_config # resolve remapping remapped_class = _fetch_remapped_cls_from_config(config, cls) if remapped_class is cls: return super(LegacyConfigMixin, remapped_class).from_config(config, return_unused_kwargs, **kwargs) else: return remapped_class.from_config(config, return_unused_kwargs, **kwargs)
diffusers/src/diffusers/configuration_utils.py/0
{ "file_path": "diffusers/src/diffusers/configuration_utils.py", "repo_id": "diffusers", "token_count": 14943 }
158
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union import torch from ..configuration_utils import register_to_config from ..hooks import HookRegistry from ..hooks.smoothed_energy_guidance_utils import SmoothedEnergyGuidanceConfig, _apply_smoothed_energy_guidance_hook from .guider_utils import BaseGuidance, rescale_noise_cfg if TYPE_CHECKING: from ..modular_pipelines.modular_pipeline import BlockState class SmoothedEnergyGuidance(BaseGuidance): """ Smoothed Energy Guidance (SEG): https://huggingface.co/papers/2408.00760 SEG is only supported as an experimental prototype feature for now, so the implementation may be modified in the future without warning or guarantee of reproducibility. This implementation assumes: - Generated images are square (height == width) - The model does not combine different modalities together (e.g., text and image latent streams are not combined together such as Flux) Args: guidance_scale (`float`, defaults to `7.5`): The scale parameter for classifier-free guidance. Higher values result in stronger conditioning on the text prompt, while lower values allow for more freedom in generation. Higher values may lead to saturation and deterioration of image quality. seg_guidance_scale (`float`, defaults to `3.0`): The scale parameter for smoothed energy guidance. Anatomy and structure coherence may improve with higher values, but it may also lead to overexposure and saturation. seg_blur_sigma (`float`, defaults to `9999999.0`): The amount by which we blur the attention weights. Setting this value greater than 9999.0 results in infinite blur, which means uniform queries. Controlling it exponentially is empirically effective. seg_blur_threshold_inf (`float`, defaults to `9999.0`): The threshold above which the blur is considered infinite. seg_guidance_start (`float`, defaults to `0.0`): The fraction of the total number of denoising steps after which smoothed energy guidance starts. seg_guidance_stop (`float`, defaults to `1.0`): The fraction of the total number of denoising steps after which smoothed energy guidance stops. seg_guidance_layers (`int` or `List[int]`, *optional*): The layer indices to apply smoothed energy guidance to. Can be a single integer or a list of integers. If not provided, `seg_guidance_config` must be provided. The recommended values are `[7, 8, 9]` for Stable Diffusion 3.5 Medium. seg_guidance_config (`SmoothedEnergyGuidanceConfig` or `List[SmoothedEnergyGuidanceConfig]`, *optional*): The configuration for the smoothed energy layer guidance. Can be a single `SmoothedEnergyGuidanceConfig` or a list of `SmoothedEnergyGuidanceConfig`. If not provided, `seg_guidance_layers` must be provided. guidance_rescale (`float`, defaults to `0.0`): The rescale factor applied to the noise predictions. This is used to improve image quality and fix overexposure. Based on Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). use_original_formulation (`bool`, defaults to `False`): Whether to use the original formulation of classifier-free guidance as proposed in the paper. By default, we use the diffusers-native implementation that has been in the codebase for a long time. See [~guiders.classifier_free_guidance.ClassifierFreeGuidance] for more details. start (`float`, defaults to `0.01`): The fraction of the total number of denoising steps after which guidance starts. stop (`float`, defaults to `0.2`): The fraction of the total number of denoising steps after which guidance stops. """ _input_predictions = ["pred_cond", "pred_uncond", "pred_cond_seg"] @register_to_config def __init__( self, guidance_scale: float = 7.5, seg_guidance_scale: float = 2.8, seg_blur_sigma: float = 9999999.0, seg_blur_threshold_inf: float = 9999.0, seg_guidance_start: float = 0.0, seg_guidance_stop: float = 1.0, seg_guidance_layers: Optional[Union[int, List[int]]] = None, seg_guidance_config: Union[SmoothedEnergyGuidanceConfig, List[SmoothedEnergyGuidanceConfig]] = None, guidance_rescale: float = 0.0, use_original_formulation: bool = False, start: float = 0.0, stop: float = 1.0, ): super().__init__(start, stop) self.guidance_scale = guidance_scale self.seg_guidance_scale = seg_guidance_scale self.seg_blur_sigma = seg_blur_sigma self.seg_blur_threshold_inf = seg_blur_threshold_inf self.seg_guidance_start = seg_guidance_start self.seg_guidance_stop = seg_guidance_stop self.guidance_rescale = guidance_rescale self.use_original_formulation = use_original_formulation if not (0.0 <= seg_guidance_start < 1.0): raise ValueError(f"Expected `seg_guidance_start` to be between 0.0 and 1.0, but got {seg_guidance_start}.") if not (seg_guidance_start <= seg_guidance_stop <= 1.0): raise ValueError(f"Expected `seg_guidance_stop` to be between 0.0 and 1.0, but got {seg_guidance_stop}.") if seg_guidance_layers is None and seg_guidance_config is None: raise ValueError( "Either `seg_guidance_layers` or `seg_guidance_config` must be provided to enable Smoothed Energy Guidance." ) if seg_guidance_layers is not None and seg_guidance_config is not None: raise ValueError("Only one of `seg_guidance_layers` or `seg_guidance_config` can be provided.") if seg_guidance_layers is not None: if isinstance(seg_guidance_layers, int): seg_guidance_layers = [seg_guidance_layers] if not isinstance(seg_guidance_layers, list): raise ValueError( f"Expected `seg_guidance_layers` to be an int or a list of ints, but got {type(seg_guidance_layers)}." ) seg_guidance_config = [SmoothedEnergyGuidanceConfig(layer, fqn="auto") for layer in seg_guidance_layers] if isinstance(seg_guidance_config, dict): seg_guidance_config = SmoothedEnergyGuidanceConfig.from_dict(seg_guidance_config) if isinstance(seg_guidance_config, SmoothedEnergyGuidanceConfig): seg_guidance_config = [seg_guidance_config] if not isinstance(seg_guidance_config, list): raise ValueError( f"Expected `seg_guidance_config` to be a SmoothedEnergyGuidanceConfig or a list of SmoothedEnergyGuidanceConfig, but got {type(seg_guidance_config)}." ) elif isinstance(next(iter(seg_guidance_config), None), dict): seg_guidance_config = [SmoothedEnergyGuidanceConfig.from_dict(config) for config in seg_guidance_config] self.seg_guidance_config = seg_guidance_config self._seg_layer_hook_names = [f"SmoothedEnergyGuidance_{i}" for i in range(len(self.seg_guidance_config))] def prepare_models(self, denoiser: torch.nn.Module) -> None: if self._is_seg_enabled() and self.is_conditional and self._count_prepared > 1: for name, config in zip(self._seg_layer_hook_names, self.seg_guidance_config): _apply_smoothed_energy_guidance_hook(denoiser, config, self.seg_blur_sigma, name=name) def cleanup_models(self, denoiser: torch.nn.Module): if self._is_seg_enabled() and self.is_conditional and self._count_prepared > 1: registry = HookRegistry.check_if_exists_or_initialize(denoiser) # Remove the hooks after inference for hook_name in self._seg_layer_hook_names: registry.remove_hook(hook_name, recurse=True) def prepare_inputs( self, data: "BlockState", input_fields: Optional[Dict[str, Union[str, Tuple[str, str]]]] = None ) -> List["BlockState"]: if input_fields is None: input_fields = self._input_fields if self.num_conditions == 1: tuple_indices = [0] input_predictions = ["pred_cond"] elif self.num_conditions == 2: tuple_indices = [0, 1] input_predictions = ( ["pred_cond", "pred_uncond"] if self._is_cfg_enabled() else ["pred_cond", "pred_cond_seg"] ) else: tuple_indices = [0, 1, 0] input_predictions = ["pred_cond", "pred_uncond", "pred_cond_seg"] data_batches = [] for i in range(self.num_conditions): data_batch = self._prepare_batch(input_fields, data, tuple_indices[i], input_predictions[i]) data_batches.append(data_batch) return data_batches def forward( self, pred_cond: torch.Tensor, pred_uncond: Optional[torch.Tensor] = None, pred_cond_seg: Optional[torch.Tensor] = None, ) -> torch.Tensor: pred = None if not self._is_cfg_enabled() and not self._is_seg_enabled(): pred = pred_cond elif not self._is_cfg_enabled(): shift = pred_cond - pred_cond_seg pred = pred_cond if self.use_original_formulation else pred_cond_seg pred = pred + self.seg_guidance_scale * shift elif not self._is_seg_enabled(): shift = pred_cond - pred_uncond pred = pred_cond if self.use_original_formulation else pred_uncond pred = pred + self.guidance_scale * shift else: shift = pred_cond - pred_uncond shift_seg = pred_cond - pred_cond_seg pred = pred_cond if self.use_original_formulation else pred_uncond pred = pred + self.guidance_scale * shift + self.seg_guidance_scale * shift_seg if self.guidance_rescale > 0.0: pred = rescale_noise_cfg(pred, pred_cond, self.guidance_rescale) return pred, {} @property def is_conditional(self) -> bool: return self._count_prepared == 1 or self._count_prepared == 3 @property def num_conditions(self) -> int: num_conditions = 1 if self._is_cfg_enabled(): num_conditions += 1 if self._is_seg_enabled(): num_conditions += 1 return num_conditions def _is_cfg_enabled(self) -> bool: if not self._enabled: return False is_within_range = True if self._num_inference_steps is not None: skip_start_step = int(self._start * self._num_inference_steps) skip_stop_step = int(self._stop * self._num_inference_steps) is_within_range = skip_start_step <= self._step < skip_stop_step is_close = False if self.use_original_formulation: is_close = math.isclose(self.guidance_scale, 0.0) else: is_close = math.isclose(self.guidance_scale, 1.0) return is_within_range and not is_close def _is_seg_enabled(self) -> bool: if not self._enabled: return False is_within_range = True if self._num_inference_steps is not None: skip_start_step = int(self.seg_guidance_start * self._num_inference_steps) skip_stop_step = int(self.seg_guidance_stop * self._num_inference_steps) is_within_range = skip_start_step < self._step < skip_stop_step is_zero = math.isclose(self.seg_guidance_scale, 0.0) return is_within_range and not is_zero
diffusers/src/diffusers/guiders/smoothed_energy_guidance.py/0
{ "file_path": "diffusers/src/diffusers/guiders/smoothed_energy_guidance.py", "repo_id": "diffusers", "token_count": 5166 }
159
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path from typing import Dict, List, Optional, Union import torch import torch.nn.functional as F from huggingface_hub.utils import validate_hf_hub_args from safetensors import safe_open from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_state_dict from ..utils import ( USE_PEFT_BACKEND, _get_detailed_type, _get_model_file, _is_valid_type, is_accelerate_available, is_torch_version, is_transformers_available, logging, ) from .unet_loader_utils import _maybe_expand_lora_scales if is_transformers_available(): from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection, SiglipImageProcessor, SiglipVisionModel from ..models.attention_processor import ( AttnProcessor, AttnProcessor2_0, IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0, IPAdapterXFormersAttnProcessor, JointAttnProcessor2_0, SD3IPAdapterJointAttnProcessor2_0, ) logger = logging.get_logger(__name__) class IPAdapterMixin: """Mixin for handling IP Adapters.""" @validate_hf_hub_args def load_ip_adapter( self, pretrained_model_name_or_path_or_dict: Union[str, List[str], Dict[str, torch.Tensor]], subfolder: Union[str, List[str]], weight_name: Union[str, List[str]], image_encoder_folder: Optional[str] = "image_encoder", **kwargs, ): """ Parameters: pretrained_model_name_or_path_or_dict (`str` or `List[str]` or `os.PathLike` or `List[os.PathLike]` or `dict` or `List[dict]`): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`ModelMixin.save_pretrained`]. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). subfolder (`str` or `List[str]`): The subfolder location of a model file within a larger model repository on the Hub or locally. If a list is passed, it should have the same length as `weight_name`. weight_name (`str` or `List[str]`): The name of the weight file to load. If a list is passed, it should have the same length as `subfolder`. image_encoder_folder (`str`, *optional*, defaults to `image_encoder`): The subfolder location of the image encoder within a larger model repository on the Hub or locally. Pass `None` to not load the image encoder. If the image encoder is located in a folder inside `subfolder`, you only need to pass the name of the folder that contains image encoder weights, e.g. `image_encoder_folder="image_encoder"`. If the image encoder is located in a folder other than `subfolder`, you should pass the path to the folder that contains image encoder weights, for example, `image_encoder_folder="different_subfolder/image_encoder"`. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. """ # handle the list inputs for multiple IP Adapters if not isinstance(weight_name, list): weight_name = [weight_name] if not isinstance(pretrained_model_name_or_path_or_dict, list): pretrained_model_name_or_path_or_dict = [pretrained_model_name_or_path_or_dict] if len(pretrained_model_name_or_path_or_dict) == 1: pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict * len(weight_name) if not isinstance(subfolder, list): subfolder = [subfolder] if len(subfolder) == 1: subfolder = subfolder * len(weight_name) if len(weight_name) != len(pretrained_model_name_or_path_or_dict): raise ValueError("`weight_name` and `pretrained_model_name_or_path_or_dict` must have the same length.") if len(weight_name) != len(subfolder): raise ValueError("`weight_name` and `subfolder` must have the same length.") # Load the main state dict first. cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT) if low_cpu_mem_usage and not is_accelerate_available(): low_cpu_mem_usage = False logger.warning( "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the" " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install" " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip" " install accelerate\n```\n." ) if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"): raise NotImplementedError( "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set" " `low_cpu_mem_usage=False`." ) user_agent = {"file_type": "attn_procs_weights", "framework": "pytorch"} state_dicts = [] for pretrained_model_name_or_path_or_dict, weight_name, subfolder in zip( pretrained_model_name_or_path_or_dict, weight_name, subfolder ): if not isinstance(pretrained_model_name_or_path_or_dict, dict): model_file = _get_model_file( pretrained_model_name_or_path_or_dict, weights_name=weight_name, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, ) if weight_name.endswith(".safetensors"): state_dict = {"image_proj": {}, "ip_adapter": {}} with safe_open(model_file, framework="pt", device="cpu") as f: for key in f.keys(): if key.startswith("image_proj."): state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key) elif key.startswith("ip_adapter."): state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key) else: state_dict = load_state_dict(model_file) else: state_dict = pretrained_model_name_or_path_or_dict keys = list(state_dict.keys()) if "image_proj" not in keys and "ip_adapter" not in keys: raise ValueError("Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.") state_dicts.append(state_dict) # load CLIP image encoder here if it has not been registered to the pipeline yet if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is None: if image_encoder_folder is not None: if not isinstance(pretrained_model_name_or_path_or_dict, dict): logger.info(f"loading image_encoder from {pretrained_model_name_or_path_or_dict}") if image_encoder_folder.count("/") == 0: image_encoder_subfolder = Path(subfolder, image_encoder_folder).as_posix() else: image_encoder_subfolder = Path(image_encoder_folder).as_posix() image_encoder = CLIPVisionModelWithProjection.from_pretrained( pretrained_model_name_or_path_or_dict, subfolder=image_encoder_subfolder, low_cpu_mem_usage=low_cpu_mem_usage, cache_dir=cache_dir, local_files_only=local_files_only, torch_dtype=self.dtype, ).to(self.device) self.register_modules(image_encoder=image_encoder) else: raise ValueError( "`image_encoder` cannot be loaded because `pretrained_model_name_or_path_or_dict` is a state dict." ) else: logger.warning( "image_encoder is not loaded since `image_encoder_folder=None` passed. You will not be able to use `ip_adapter_image` when calling the pipeline with IP-Adapter." "Use `ip_adapter_image_embeds` to pass pre-generated image embedding instead." ) # create feature extractor if it has not been registered to the pipeline yet if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is None: # FaceID IP adapters don't need the image encoder so it's not present, in this case we default to 224 default_clip_size = 224 clip_image_size = ( self.image_encoder.config.image_size if self.image_encoder is not None else default_clip_size ) feature_extractor = CLIPImageProcessor(size=clip_image_size, crop_size=clip_image_size) self.register_modules(feature_extractor=feature_extractor) # load ip-adapter into unet unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet unet._load_ip_adapter_weights(state_dicts, low_cpu_mem_usage=low_cpu_mem_usage) extra_loras = unet._load_ip_adapter_loras(state_dicts) if extra_loras != {}: if not USE_PEFT_BACKEND: logger.warning("PEFT backend is required to load these weights.") else: # apply the IP Adapter Face ID LoRA weights peft_config = getattr(unet, "peft_config", {}) for k, lora in extra_loras.items(): if f"faceid_{k}" not in peft_config: self.load_lora_weights(lora, adapter_name=f"faceid_{k}") self.set_adapters([f"faceid_{k}"], adapter_weights=[1.0]) def set_ip_adapter_scale(self, scale): """ Set IP-Adapter scales per-transformer block. Input `scale` could be a single config or a list of configs for granular control over each IP-Adapter behavior. A config can be a float or a dictionary. Example: ```py # To use original IP-Adapter scale = 1.0 pipeline.set_ip_adapter_scale(scale) # To use style block only scale = { "up": {"block_0": [0.0, 1.0, 0.0]}, } pipeline.set_ip_adapter_scale(scale) # To use style+layout blocks scale = { "down": {"block_2": [0.0, 1.0]}, "up": {"block_0": [0.0, 1.0, 0.0]}, } pipeline.set_ip_adapter_scale(scale) # To use style and layout from 2 reference images scales = [{"down": {"block_2": [0.0, 1.0]}}, {"up": {"block_0": [0.0, 1.0, 0.0]}}] pipeline.set_ip_adapter_scale(scales) ``` """ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet if not isinstance(scale, list): scale = [scale] scale_configs = _maybe_expand_lora_scales(unet, scale, default_scale=0.0) for attn_name, attn_processor in unet.attn_processors.items(): if isinstance( attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0, IPAdapterXFormersAttnProcessor) ): if len(scale_configs) != len(attn_processor.scale): raise ValueError( f"Cannot assign {len(scale_configs)} scale_configs to {len(attn_processor.scale)} IP-Adapter." ) elif len(scale_configs) == 1: scale_configs = scale_configs * len(attn_processor.scale) for i, scale_config in enumerate(scale_configs): if isinstance(scale_config, dict): for k, s in scale_config.items(): if attn_name.startswith(k): attn_processor.scale[i] = s else: attn_processor.scale[i] = scale_config def unload_ip_adapter(self): """ Unloads the IP Adapter weights Examples: ```python >>> # Assuming `pipeline` is already loaded with the IP Adapter weights. >>> pipeline.unload_ip_adapter() >>> ... ``` """ # remove CLIP image encoder if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is not None: self.image_encoder = None self.register_to_config(image_encoder=[None, None]) # remove feature extractor only when safety_checker is None as safety_checker uses # the feature_extractor later if not hasattr(self, "safety_checker"): if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is not None: self.feature_extractor = None self.register_to_config(feature_extractor=[None, None]) # remove hidden encoder self.unet.encoder_hid_proj = None self.unet.config.encoder_hid_dim_type = None # Kolors: restore `encoder_hid_proj` with `text_encoder_hid_proj` if hasattr(self.unet, "text_encoder_hid_proj") and self.unet.text_encoder_hid_proj is not None: self.unet.encoder_hid_proj = self.unet.text_encoder_hid_proj self.unet.text_encoder_hid_proj = None self.unet.config.encoder_hid_dim_type = "text_proj" # restore original Unet attention processors layers attn_procs = {} for name, value in self.unet.attn_processors.items(): attn_processor_class = ( AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnProcessor() ) attn_procs[name] = ( attn_processor_class if isinstance( value, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0, IPAdapterXFormersAttnProcessor) ) else value.__class__() ) self.unet.set_attn_processor(attn_procs) class ModularIPAdapterMixin: """Mixin for handling IP Adapters.""" @validate_hf_hub_args def load_ip_adapter( self, pretrained_model_name_or_path_or_dict: Union[str, List[str], Dict[str, torch.Tensor]], subfolder: Union[str, List[str]], weight_name: Union[str, List[str]], **kwargs, ): """ Parameters: pretrained_model_name_or_path_or_dict (`str` or `List[str]` or `os.PathLike` or `List[os.PathLike]` or `dict` or `List[dict]`): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`ModelMixin.save_pretrained`]. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). subfolder (`str` or `List[str]`): The subfolder location of a model file within a larger model repository on the Hub or locally. If a list is passed, it should have the same length as `weight_name`. weight_name (`str` or `List[str]`): The name of the weight file to load. If a list is passed, it should have the same length as `subfolder`. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. """ # handle the list inputs for multiple IP Adapters if not isinstance(weight_name, list): weight_name = [weight_name] if not isinstance(pretrained_model_name_or_path_or_dict, list): pretrained_model_name_or_path_or_dict = [pretrained_model_name_or_path_or_dict] if len(pretrained_model_name_or_path_or_dict) == 1: pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict * len(weight_name) if not isinstance(subfolder, list): subfolder = [subfolder] if len(subfolder) == 1: subfolder = subfolder * len(weight_name) if len(weight_name) != len(pretrained_model_name_or_path_or_dict): raise ValueError("`weight_name` and `pretrained_model_name_or_path_or_dict` must have the same length.") if len(weight_name) != len(subfolder): raise ValueError("`weight_name` and `subfolder` must have the same length.") # Load the main state dict first. cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT) if low_cpu_mem_usage and not is_accelerate_available(): low_cpu_mem_usage = False logger.warning( "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the" " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install" " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip" " install accelerate\n```\n." ) if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"): raise NotImplementedError( "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set" " `low_cpu_mem_usage=False`." ) user_agent = { "file_type": "attn_procs_weights", "framework": "pytorch", } state_dicts = [] for pretrained_model_name_or_path_or_dict, weight_name, subfolder in zip( pretrained_model_name_or_path_or_dict, weight_name, subfolder ): if not isinstance(pretrained_model_name_or_path_or_dict, dict): model_file = _get_model_file( pretrained_model_name_or_path_or_dict, weights_name=weight_name, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, ) if weight_name.endswith(".safetensors"): state_dict = {"image_proj": {}, "ip_adapter": {}} with safe_open(model_file, framework="pt", device="cpu") as f: for key in f.keys(): if key.startswith("image_proj."): state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key) elif key.startswith("ip_adapter."): state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key) else: state_dict = load_state_dict(model_file) else: state_dict = pretrained_model_name_or_path_or_dict keys = list(state_dict.keys()) if "image_proj" not in keys and "ip_adapter" not in keys: raise ValueError("Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.") state_dicts.append(state_dict) unet_name = getattr(self, "unet_name", "unet") unet = getattr(self, unet_name) unet._load_ip_adapter_weights(state_dicts, low_cpu_mem_usage=low_cpu_mem_usage) extra_loras = unet._load_ip_adapter_loras(state_dicts) if extra_loras != {}: if not USE_PEFT_BACKEND: logger.warning("PEFT backend is required to load these weights.") else: # apply the IP Adapter Face ID LoRA weights peft_config = getattr(unet, "peft_config", {}) for k, lora in extra_loras.items(): if f"faceid_{k}" not in peft_config: self.load_lora_weights(lora, adapter_name=f"faceid_{k}") self.set_adapters([f"faceid_{k}"], adapter_weights=[1.0]) def set_ip_adapter_scale(self, scale): """ Set IP-Adapter scales per-transformer block. Input `scale` could be a single config or a list of configs for granular control over each IP-Adapter behavior. A config can be a float or a dictionary. Example: ```py # To use original IP-Adapter scale = 1.0 pipeline.set_ip_adapter_scale(scale) # To use style block only scale = { "up": {"block_0": [0.0, 1.0, 0.0]}, } pipeline.set_ip_adapter_scale(scale) # To use style+layout blocks scale = { "down": {"block_2": [0.0, 1.0]}, "up": {"block_0": [0.0, 1.0, 0.0]}, } pipeline.set_ip_adapter_scale(scale) # To use style and layout from 2 reference images scales = [{"down": {"block_2": [0.0, 1.0]}}, {"up": {"block_0": [0.0, 1.0, 0.0]}}] pipeline.set_ip_adapter_scale(scales) ``` """ unet_name = getattr(self, "unet_name", "unet") unet = getattr(self, unet_name) if not isinstance(scale, list): scale = [scale] scale_configs = _maybe_expand_lora_scales(unet, scale, default_scale=0.0) for attn_name, attn_processor in unet.attn_processors.items(): if isinstance( attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0, IPAdapterXFormersAttnProcessor) ): if len(scale_configs) != len(attn_processor.scale): raise ValueError( f"Cannot assign {len(scale_configs)} scale_configs to {len(attn_processor.scale)} IP-Adapter." ) elif len(scale_configs) == 1: scale_configs = scale_configs * len(attn_processor.scale) for i, scale_config in enumerate(scale_configs): if isinstance(scale_config, dict): for k, s in scale_config.items(): if attn_name.startswith(k): attn_processor.scale[i] = s else: attn_processor.scale[i] = scale_config def unload_ip_adapter(self): """ Unloads the IP Adapter weights Examples: ```python >>> # Assuming `pipeline` is already loaded with the IP Adapter weights. >>> pipeline.unload_ip_adapter() >>> ... ``` """ # remove hidden encoder if self.unet is None: return self.unet.encoder_hid_proj = None self.unet.config.encoder_hid_dim_type = None # Kolors: restore `encoder_hid_proj` with `text_encoder_hid_proj` if hasattr(self.unet, "text_encoder_hid_proj") and self.unet.text_encoder_hid_proj is not None: self.unet.encoder_hid_proj = self.unet.text_encoder_hid_proj self.unet.text_encoder_hid_proj = None self.unet.config.encoder_hid_dim_type = "text_proj" # restore original Unet attention processors layers attn_procs = {} for name, value in self.unet.attn_processors.items(): attn_processor_class = ( AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnProcessor() ) attn_procs[name] = ( attn_processor_class if isinstance( value, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0, IPAdapterXFormersAttnProcessor) ) else value.__class__() ) self.unet.set_attn_processor(attn_procs) class FluxIPAdapterMixin: """Mixin for handling Flux IP Adapters.""" @validate_hf_hub_args def load_ip_adapter( self, pretrained_model_name_or_path_or_dict: Union[str, List[str], Dict[str, torch.Tensor]], weight_name: Union[str, List[str]], subfolder: Optional[Union[str, List[str]]] = "", image_encoder_pretrained_model_name_or_path: Optional[str] = "image_encoder", image_encoder_subfolder: Optional[str] = "", image_encoder_dtype: torch.dtype = torch.float16, **kwargs, ): """ Parameters: pretrained_model_name_or_path_or_dict (`str` or `List[str]` or `os.PathLike` or `List[os.PathLike]` or `dict` or `List[dict]`): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`ModelMixin.save_pretrained`]. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). subfolder (`str` or `List[str]`): The subfolder location of a model file within a larger model repository on the Hub or locally. If a list is passed, it should have the same length as `weight_name`. weight_name (`str` or `List[str]`): The name of the weight file to load. If a list is passed, it should have the same length as `weight_name`. image_encoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `./image_encoder`): Can be either: - A string, the *model id* (for example `openai/clip-vit-large-patch14`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`ModelMixin.save_pretrained`]. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. """ # handle the list inputs for multiple IP Adapters if not isinstance(weight_name, list): weight_name = [weight_name] if not isinstance(pretrained_model_name_or_path_or_dict, list): pretrained_model_name_or_path_or_dict = [pretrained_model_name_or_path_or_dict] if len(pretrained_model_name_or_path_or_dict) == 1: pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict * len(weight_name) if not isinstance(subfolder, list): subfolder = [subfolder] if len(subfolder) == 1: subfolder = subfolder * len(weight_name) if len(weight_name) != len(pretrained_model_name_or_path_or_dict): raise ValueError("`weight_name` and `pretrained_model_name_or_path_or_dict` must have the same length.") if len(weight_name) != len(subfolder): raise ValueError("`weight_name` and `subfolder` must have the same length.") # Load the main state dict first. cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT) if low_cpu_mem_usage and not is_accelerate_available(): low_cpu_mem_usage = False logger.warning( "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the" " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install" " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip" " install accelerate\n```\n." ) if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"): raise NotImplementedError( "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set" " `low_cpu_mem_usage=False`." ) user_agent = {"file_type": "attn_procs_weights", "framework": "pytorch"} state_dicts = [] for pretrained_model_name_or_path_or_dict, weight_name, subfolder in zip( pretrained_model_name_or_path_or_dict, weight_name, subfolder ): if not isinstance(pretrained_model_name_or_path_or_dict, dict): model_file = _get_model_file( pretrained_model_name_or_path_or_dict, weights_name=weight_name, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, ) if weight_name.endswith(".safetensors"): state_dict = {"image_proj": {}, "ip_adapter": {}} with safe_open(model_file, framework="pt", device="cpu") as f: image_proj_keys = ["ip_adapter_proj_model.", "image_proj."] ip_adapter_keys = ["double_blocks.", "ip_adapter."] for key in f.keys(): if any(key.startswith(prefix) for prefix in image_proj_keys): diffusers_name = ".".join(key.split(".")[1:]) state_dict["image_proj"][diffusers_name] = f.get_tensor(key) elif any(key.startswith(prefix) for prefix in ip_adapter_keys): diffusers_name = ( ".".join(key.split(".")[1:]) .replace("ip_adapter_double_stream_k_proj", "to_k_ip") .replace("ip_adapter_double_stream_v_proj", "to_v_ip") .replace("processor.", "") ) state_dict["ip_adapter"][diffusers_name] = f.get_tensor(key) else: state_dict = load_state_dict(model_file) else: state_dict = pretrained_model_name_or_path_or_dict keys = list(state_dict.keys()) if keys != ["image_proj", "ip_adapter"]: raise ValueError("Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.") state_dicts.append(state_dict) # load CLIP image encoder here if it has not been registered to the pipeline yet if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is None: if image_encoder_pretrained_model_name_or_path is not None: if not isinstance(pretrained_model_name_or_path_or_dict, dict): logger.info(f"loading image_encoder from {image_encoder_pretrained_model_name_or_path}") image_encoder = ( CLIPVisionModelWithProjection.from_pretrained( image_encoder_pretrained_model_name_or_path, subfolder=image_encoder_subfolder, low_cpu_mem_usage=low_cpu_mem_usage, cache_dir=cache_dir, local_files_only=local_files_only, torch_dtype=image_encoder_dtype, ) .to(self.device) .eval() ) self.register_modules(image_encoder=image_encoder) else: raise ValueError( "`image_encoder` cannot be loaded because `pretrained_model_name_or_path_or_dict` is a state dict." ) else: logger.warning( "image_encoder is not loaded since `image_encoder_folder=None` passed. You will not be able to use `ip_adapter_image` when calling the pipeline with IP-Adapter." "Use `ip_adapter_image_embeds` to pass pre-generated image embedding instead." ) # create feature extractor if it has not been registered to the pipeline yet if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is None: # FaceID IP adapters don't need the image encoder so it's not present, in this case we default to 224 default_clip_size = 224 clip_image_size = ( self.image_encoder.config.image_size if self.image_encoder is not None else default_clip_size ) feature_extractor = CLIPImageProcessor(size=clip_image_size, crop_size=clip_image_size) self.register_modules(feature_extractor=feature_extractor) # load ip-adapter into transformer self.transformer._load_ip_adapter_weights(state_dicts, low_cpu_mem_usage=low_cpu_mem_usage) def set_ip_adapter_scale(self, scale: Union[float, List[float], List[List[float]]]): """ Set IP-Adapter scales per-transformer block. Input `scale` could be a single config or a list of configs for granular control over each IP-Adapter behavior. A config can be a float or a list. `float` is converted to list and repeated for the number of blocks and the number of IP adapters. `List[float]` length match the number of blocks, it is repeated for each IP adapter. `List[List[float]]` must match the number of IP adapters and each must match the number of blocks. Example: ```py # To use original IP-Adapter scale = 1.0 pipeline.set_ip_adapter_scale(scale) def LinearStrengthModel(start, finish, size): return [(start + (finish - start) * (i / (size - 1))) for i in range(size)] ip_strengths = LinearStrengthModel(0.3, 0.92, 19) pipeline.set_ip_adapter_scale(ip_strengths) ``` """ scale_type = Union[int, float] num_ip_adapters = self.transformer.encoder_hid_proj.num_ip_adapters num_layers = self.transformer.config.num_layers # Single value for all layers of all IP-Adapters if isinstance(scale, scale_type): scale = [scale for _ in range(num_ip_adapters)] # List of per-layer scales for a single IP-Adapter elif _is_valid_type(scale, List[scale_type]) and num_ip_adapters == 1: scale = [scale] # Invalid scale type elif not _is_valid_type(scale, List[Union[scale_type, List[scale_type]]]): raise TypeError(f"Unexpected type {_get_detailed_type(scale)} for scale.") if len(scale) != num_ip_adapters: raise ValueError(f"Cannot assign {len(scale)} scales to {num_ip_adapters} IP-Adapters.") if any(len(s) != num_layers for s in scale if isinstance(s, list)): invalid_scale_sizes = {len(s) for s in scale if isinstance(s, list)} - {num_layers} raise ValueError( f"Expected list of {num_layers} scales, got {', '.join(str(x) for x in invalid_scale_sizes)}." ) # Scalars are transformed to lists with length num_layers scale_configs = [[s] * num_layers if isinstance(s, scale_type) else s for s in scale] # Set scales. zip over scale_configs prevents going into single transformer layers for attn_processor, *scale in zip(self.transformer.attn_processors.values(), *scale_configs): attn_processor.scale = scale def unload_ip_adapter(self): """ Unloads the IP Adapter weights Examples: ```python >>> # Assuming `pipeline` is already loaded with the IP Adapter weights. >>> pipeline.unload_ip_adapter() >>> ... ``` """ # TODO: once the 1.0.0 deprecations are in, we can move the imports to top-level from ..models.transformers.transformer_flux import FluxAttnProcessor, FluxIPAdapterAttnProcessor # remove CLIP image encoder if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is not None: self.image_encoder = None self.register_to_config(image_encoder=[None, None]) # remove feature extractor only when safety_checker is None as safety_checker uses # the feature_extractor later if not hasattr(self, "safety_checker"): if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is not None: self.feature_extractor = None self.register_to_config(feature_extractor=[None, None]) # remove hidden encoder self.transformer.encoder_hid_proj = None self.transformer.config.encoder_hid_dim_type = None # restore original Transformer attention processors layers attn_procs = {} for name, value in self.transformer.attn_processors.items(): attn_processor_class = FluxAttnProcessor() attn_procs[name] = ( attn_processor_class if isinstance(value, FluxIPAdapterAttnProcessor) else value.__class__() ) self.transformer.set_attn_processor(attn_procs) class SD3IPAdapterMixin: """Mixin for handling StableDiffusion 3 IP Adapters.""" @property def is_ip_adapter_active(self) -> bool: """Checks if IP-Adapter is loaded and scale > 0. IP-Adapter scale controls the influence of the image prompt versus text prompt. When this value is set to 0, the image context is irrelevant. Returns: `bool`: True when IP-Adapter is loaded and any layer has scale > 0. """ scales = [ attn_proc.scale for attn_proc in self.transformer.attn_processors.values() if isinstance(attn_proc, SD3IPAdapterJointAttnProcessor2_0) ] return len(scales) > 0 and any(scale > 0 for scale in scales) @validate_hf_hub_args def load_ip_adapter( self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], weight_name: str = "ip-adapter.safetensors", subfolder: Optional[str] = None, image_encoder_folder: Optional[str] = "image_encoder", **kwargs, ) -> None: """ Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`ModelMixin.save_pretrained`]. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). weight_name (`str`, defaults to "ip-adapter.safetensors"): The name of the weight file to load. If a list is passed, it should have the same length as `subfolder`. subfolder (`str`, *optional*): The subfolder location of a model file within a larger model repository on the Hub or locally. If a list is passed, it should have the same length as `weight_name`. image_encoder_folder (`str`, *optional*, defaults to `image_encoder`): The subfolder location of the image encoder within a larger model repository on the Hub or locally. Pass `None` to not load the image encoder. If the image encoder is located in a folder inside `subfolder`, you only need to pass the name of the folder that contains image encoder weights, e.g. `image_encoder_folder="image_encoder"`. If the image encoder is located in a folder other than `subfolder`, you should pass the path to the folder that contains image encoder weights, for example, `image_encoder_folder="different_subfolder/image_encoder"`. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. """ # Load the main state dict first cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT) if low_cpu_mem_usage and not is_accelerate_available(): low_cpu_mem_usage = False logger.warning( "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the" " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install" " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip" " install accelerate\n```\n." ) if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"): raise NotImplementedError( "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set" " `low_cpu_mem_usage=False`." ) user_agent = {"file_type": "attn_procs_weights", "framework": "pytorch"} if not isinstance(pretrained_model_name_or_path_or_dict, dict): model_file = _get_model_file( pretrained_model_name_or_path_or_dict, weights_name=weight_name, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, ) if weight_name.endswith(".safetensors"): state_dict = {"image_proj": {}, "ip_adapter": {}} with safe_open(model_file, framework="pt", device="cpu") as f: for key in f.keys(): if key.startswith("image_proj."): state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key) elif key.startswith("ip_adapter."): state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key) else: state_dict = load_state_dict(model_file) else: state_dict = pretrained_model_name_or_path_or_dict keys = list(state_dict.keys()) if "image_proj" not in keys and "ip_adapter" not in keys: raise ValueError("Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.") # Load image_encoder and feature_extractor here if they haven't been registered to the pipeline yet if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is None: if image_encoder_folder is not None: if not isinstance(pretrained_model_name_or_path_or_dict, dict): logger.info(f"loading image_encoder from {pretrained_model_name_or_path_or_dict}") if image_encoder_folder.count("/") == 0: image_encoder_subfolder = Path(subfolder, image_encoder_folder).as_posix() else: image_encoder_subfolder = Path(image_encoder_folder).as_posix() # Commons args for loading image encoder and image processor kwargs = { "low_cpu_mem_usage": low_cpu_mem_usage, "cache_dir": cache_dir, "local_files_only": local_files_only, } self.register_modules( feature_extractor=SiglipImageProcessor.from_pretrained(image_encoder_subfolder, **kwargs), image_encoder=SiglipVisionModel.from_pretrained( image_encoder_subfolder, torch_dtype=self.dtype, **kwargs ).to(self.device), ) else: raise ValueError( "`image_encoder` cannot be loaded because `pretrained_model_name_or_path_or_dict` is a state dict." ) else: logger.warning( "image_encoder is not loaded since `image_encoder_folder=None` passed. You will not be able to use `ip_adapter_image` when calling the pipeline with IP-Adapter." "Use `ip_adapter_image_embeds` to pass pre-generated image embedding instead." ) # Load IP-Adapter into transformer self.transformer._load_ip_adapter_weights(state_dict, low_cpu_mem_usage=low_cpu_mem_usage) def set_ip_adapter_scale(self, scale: float) -> None: """ Set IP-Adapter scale, which controls image prompt conditioning. A value of 1.0 means the model is only conditioned on the image prompt, and 0.0 only conditioned by the text prompt. Lowering this value encourages the model to produce more diverse images, but they may not be as aligned with the image prompt. Example: ```python >>> # Assuming `pipeline` is already loaded with the IP Adapter weights. >>> pipeline.set_ip_adapter_scale(0.6) >>> ... ``` Args: scale (float): IP-Adapter scale to be set. """ for attn_processor in self.transformer.attn_processors.values(): if isinstance(attn_processor, SD3IPAdapterJointAttnProcessor2_0): attn_processor.scale = scale def unload_ip_adapter(self) -> None: """ Unloads the IP Adapter weights. Example: ```python >>> # Assuming `pipeline` is already loaded with the IP Adapter weights. >>> pipeline.unload_ip_adapter() >>> ... ``` """ # Remove image encoder if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is not None: self.image_encoder = None self.register_to_config(image_encoder=None) # Remove feature extractor if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is not None: self.feature_extractor = None self.register_to_config(feature_extractor=None) # Remove image projection self.transformer.image_proj = None # Restore original attention processors layers attn_procs = { name: ( JointAttnProcessor2_0() if isinstance(value, SD3IPAdapterJointAttnProcessor2_0) else value.__class__() ) for name, value in self.transformer.attn_processors.items() } self.transformer.set_attn_processor(attn_procs)
diffusers/src/diffusers/loaders/ip_adapter.py/0
{ "file_path": "diffusers/src/diffusers/loaders/ip_adapter.py", "repo_id": "diffusers", "token_count": 26696 }
160
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn.functional as F from torch import nn from ..utils import deprecate from ..utils.import_utils import is_torch_npu_available, is_torch_version if is_torch_npu_available(): import torch_npu ACT2CLS = { "swish": nn.SiLU, "silu": nn.SiLU, "mish": nn.Mish, "gelu": nn.GELU, "relu": nn.ReLU, } def get_activation(act_fn: str) -> nn.Module: """Helper function to get activation function from string. Args: act_fn (str): Name of activation function. Returns: nn.Module: Activation function. """ act_fn = act_fn.lower() if act_fn in ACT2CLS: return ACT2CLS[act_fn]() else: raise ValueError(f"activation function {act_fn} not found in ACT2FN mapping {list(ACT2CLS.keys())}") class FP32SiLU(nn.Module): r""" SiLU activation function with input upcasted to torch.float32. """ def __init__(self): super().__init__() def forward(self, inputs: torch.Tensor) -> torch.Tensor: return F.silu(inputs.float(), inplace=False).to(inputs.dtype) class GELU(nn.Module): r""" GELU activation function with tanh approximation support with `approximate="tanh"`. Parameters: dim_in (`int`): The number of channels in the input. dim_out (`int`): The number of channels in the output. approximate (`str`, *optional*, defaults to `"none"`): If `"tanh"`, use tanh approximation. bias (`bool`, defaults to True): Whether to use a bias in the linear layer. """ def __init__(self, dim_in: int, dim_out: int, approximate: str = "none", bias: bool = True): super().__init__() self.proj = nn.Linear(dim_in, dim_out, bias=bias) self.approximate = approximate def gelu(self, gate: torch.Tensor) -> torch.Tensor: if gate.device.type == "mps" and is_torch_version("<", "2.0.0"): # fp16 gelu not supported on mps before torch 2.0 return F.gelu(gate.to(dtype=torch.float32), approximate=self.approximate).to(dtype=gate.dtype) return F.gelu(gate, approximate=self.approximate) def forward(self, hidden_states): hidden_states = self.proj(hidden_states) hidden_states = self.gelu(hidden_states) return hidden_states class GEGLU(nn.Module): r""" A [variant](https://huggingface.co/papers/2002.05202) of the gated linear unit activation function. Parameters: dim_in (`int`): The number of channels in the input. dim_out (`int`): The number of channels in the output. bias (`bool`, defaults to True): Whether to use a bias in the linear layer. """ def __init__(self, dim_in: int, dim_out: int, bias: bool = True): super().__init__() self.proj = nn.Linear(dim_in, dim_out * 2, bias=bias) def gelu(self, gate: torch.Tensor) -> torch.Tensor: if gate.device.type == "mps" and is_torch_version("<", "2.0.0"): # fp16 gelu not supported on mps before torch 2.0 return F.gelu(gate.to(dtype=torch.float32)).to(dtype=gate.dtype) return F.gelu(gate) def forward(self, hidden_states, *args, **kwargs): if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) hidden_states = self.proj(hidden_states) if is_torch_npu_available(): # using torch_npu.npu_geglu can run faster and save memory on NPU. return torch_npu.npu_geglu(hidden_states, dim=-1, approximate=1)[0] else: hidden_states, gate = hidden_states.chunk(2, dim=-1) return hidden_states * self.gelu(gate) class SwiGLU(nn.Module): r""" A [variant](https://huggingface.co/papers/2002.05202) of the gated linear unit activation function. It's similar to `GEGLU` but uses SiLU / Swish instead of GeLU. Parameters: dim_in (`int`): The number of channels in the input. dim_out (`int`): The number of channels in the output. bias (`bool`, defaults to True): Whether to use a bias in the linear layer. """ def __init__(self, dim_in: int, dim_out: int, bias: bool = True): super().__init__() self.proj = nn.Linear(dim_in, dim_out * 2, bias=bias) self.activation = nn.SiLU() def forward(self, hidden_states): hidden_states = self.proj(hidden_states) hidden_states, gate = hidden_states.chunk(2, dim=-1) return hidden_states * self.activation(gate) class ApproximateGELU(nn.Module): r""" The approximate form of the Gaussian Error Linear Unit (GELU). For more details, see section 2 of this [paper](https://huggingface.co/papers/1606.08415). Parameters: dim_in (`int`): The number of channels in the input. dim_out (`int`): The number of channels in the output. bias (`bool`, defaults to True): Whether to use a bias in the linear layer. """ def __init__(self, dim_in: int, dim_out: int, bias: bool = True): super().__init__() self.proj = nn.Linear(dim_in, dim_out, bias=bias) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.proj(x) return x * torch.sigmoid(1.702 * x) class LinearActivation(nn.Module): def __init__(self, dim_in: int, dim_out: int, bias: bool = True, activation: str = "silu"): super().__init__() self.proj = nn.Linear(dim_in, dim_out, bias=bias) self.activation = get_activation(activation) def forward(self, hidden_states): hidden_states = self.proj(hidden_states) return self.activation(hidden_states)
diffusers/src/diffusers/models/activations.py/0
{ "file_path": "diffusers/src/diffusers/models/activations.py", "repo_id": "diffusers", "token_count": 2593 }
161
# Copyright 2025 The EasyAnimate team and The HuggingFace Team. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...utils import logging from ...utils.accelerate_utils import apply_forward_hook from ..activations import get_activation from ..modeling_outputs import AutoencoderKLOutput from ..modeling_utils import ModelMixin from .vae import DecoderOutput, DiagonalGaussianDistribution logger = logging.get_logger(__name__) # pylint: disable=invalid-name class EasyAnimateCausalConv3d(nn.Conv3d): def __init__( self, in_channels: int, out_channels: int, kernel_size: Union[int, Tuple[int, ...]] = 3, stride: Union[int, Tuple[int, ...]] = 1, padding: Union[int, Tuple[int, ...]] = 1, dilation: Union[int, Tuple[int, ...]] = 1, groups: int = 1, bias: bool = True, padding_mode: str = "zeros", ): # Ensure kernel_size, stride, and dilation are tuples of length 3 kernel_size = kernel_size if isinstance(kernel_size, tuple) else (kernel_size,) * 3 assert len(kernel_size) == 3, f"Kernel size must be a 3-tuple, got {kernel_size} instead." stride = stride if isinstance(stride, tuple) else (stride,) * 3 assert len(stride) == 3, f"Stride must be a 3-tuple, got {stride} instead." dilation = dilation if isinstance(dilation, tuple) else (dilation,) * 3 assert len(dilation) == 3, f"Dilation must be a 3-tuple, got {dilation} instead." # Unpack kernel size, stride, and dilation for temporal, height, and width dimensions t_ks, h_ks, w_ks = kernel_size self.t_stride, h_stride, w_stride = stride t_dilation, h_dilation, w_dilation = dilation # Calculate padding for temporal dimension to maintain causality t_pad = (t_ks - 1) * t_dilation # Calculate padding for height and width dimensions based on the padding parameter if padding is None: h_pad = math.ceil(((h_ks - 1) * h_dilation + (1 - h_stride)) / 2) w_pad = math.ceil(((w_ks - 1) * w_dilation + (1 - w_stride)) / 2) elif isinstance(padding, int): h_pad = w_pad = padding else: assert NotImplementedError # Store temporal padding and initialize flags and previous features cache self.temporal_padding = t_pad self.temporal_padding_origin = math.ceil(((t_ks - 1) * w_dilation + (1 - w_stride)) / 2) self.prev_features = None # Initialize the parent class with modified padding super().__init__( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, dilation=dilation, padding=(0, h_pad, w_pad), groups=groups, bias=bias, padding_mode=padding_mode, ) def _clear_conv_cache(self): del self.prev_features self.prev_features = None def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # Ensure input tensor is of the correct type dtype = hidden_states.dtype if self.prev_features is None: # Pad the input tensor in the temporal dimension to maintain causality hidden_states = F.pad( hidden_states, pad=(0, 0, 0, 0, self.temporal_padding, 0), mode="replicate", # TODO: check if this is necessary ) hidden_states = hidden_states.to(dtype=dtype) # Clear cache before processing and store previous features for causality self._clear_conv_cache() self.prev_features = hidden_states[:, :, -self.temporal_padding :].clone() # Process the input tensor in chunks along the temporal dimension num_frames = hidden_states.size(2) outputs = [] i = 0 while i + self.temporal_padding + 1 <= num_frames: out = super().forward(hidden_states[:, :, i : i + self.temporal_padding + 1]) i += self.t_stride outputs.append(out) return torch.concat(outputs, 2) else: # Concatenate previous features with the input tensor for continuous temporal processing if self.t_stride == 2: hidden_states = torch.concat( [self.prev_features[:, :, -(self.temporal_padding - 1) :], hidden_states], dim=2 ) else: hidden_states = torch.concat([self.prev_features, hidden_states], dim=2) hidden_states = hidden_states.to(dtype=dtype) # Clear cache and update previous features self._clear_conv_cache() self.prev_features = hidden_states[:, :, -self.temporal_padding :].clone() # Process the concatenated tensor in chunks along the temporal dimension num_frames = hidden_states.size(2) outputs = [] i = 0 while i + self.temporal_padding + 1 <= num_frames: out = super().forward(hidden_states[:, :, i : i + self.temporal_padding + 1]) i += self.t_stride outputs.append(out) return torch.concat(outputs, 2) class EasyAnimateResidualBlock3D(nn.Module): def __init__( self, in_channels: int, out_channels: int, non_linearity: str = "silu", norm_num_groups: int = 32, norm_eps: float = 1e-6, spatial_group_norm: bool = True, dropout: float = 0.0, output_scale_factor: float = 1.0, ): super().__init__() self.output_scale_factor = output_scale_factor # Group normalization for input tensor self.norm1 = nn.GroupNorm( num_groups=norm_num_groups, num_channels=in_channels, eps=norm_eps, affine=True, ) self.nonlinearity = get_activation(non_linearity) self.conv1 = EasyAnimateCausalConv3d(in_channels, out_channels, kernel_size=3) self.norm2 = nn.GroupNorm(num_groups=norm_num_groups, num_channels=out_channels, eps=norm_eps, affine=True) self.dropout = nn.Dropout(dropout) self.conv2 = EasyAnimateCausalConv3d(out_channels, out_channels, kernel_size=3) if in_channels != out_channels: self.shortcut = nn.Conv3d(in_channels, out_channels, kernel_size=1) else: self.shortcut = nn.Identity() self.spatial_group_norm = spatial_group_norm def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: shortcut = self.shortcut(hidden_states) if self.spatial_group_norm: batch_size = hidden_states.size(0) hidden_states = hidden_states.permute(0, 2, 1, 3, 4).flatten(0, 1) # [B, C, T, H, W] -> [B * T, C, H, W] hidden_states = self.norm1(hidden_states) hidden_states = hidden_states.unflatten(0, (batch_size, -1)).permute( 0, 2, 1, 3, 4 ) # [B * T, C, H, W] -> [B, C, T, H, W] else: hidden_states = self.norm1(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.conv1(hidden_states) if self.spatial_group_norm: batch_size = hidden_states.size(0) hidden_states = hidden_states.permute(0, 2, 1, 3, 4).flatten(0, 1) # [B, C, T, H, W] -> [B * T, C, H, W] hidden_states = self.norm2(hidden_states) hidden_states = hidden_states.unflatten(0, (batch_size, -1)).permute( 0, 2, 1, 3, 4 ) # [B * T, C, H, W] -> [B, C, T, H, W] else: hidden_states = self.norm2(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.conv2(hidden_states) return (hidden_states + shortcut) / self.output_scale_factor class EasyAnimateDownsampler3D(nn.Module): def __init__(self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: tuple = (2, 2, 2)): super().__init__() self.conv = EasyAnimateCausalConv3d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=0 ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = F.pad(hidden_states, (0, 1, 0, 1)) hidden_states = self.conv(hidden_states) return hidden_states class EasyAnimateUpsampler3D(nn.Module): def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 3, temporal_upsample: bool = False, spatial_group_norm: bool = True, ): super().__init__() out_channels = out_channels or in_channels self.temporal_upsample = temporal_upsample self.spatial_group_norm = spatial_group_norm self.conv = EasyAnimateCausalConv3d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size ) self.prev_features = None def _clear_conv_cache(self): del self.prev_features self.prev_features = None def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = F.interpolate(hidden_states, scale_factor=(1, 2, 2), mode="nearest") hidden_states = self.conv(hidden_states) if self.temporal_upsample: if self.prev_features is None: self.prev_features = hidden_states else: hidden_states = F.interpolate( hidden_states, scale_factor=(2, 1, 1), mode="trilinear" if not self.spatial_group_norm else "nearest", ) return hidden_states class EasyAnimateDownBlock3D(nn.Module): def __init__( self, in_channels: int, out_channels: int, num_layers: int = 1, act_fn: str = "silu", norm_num_groups: int = 32, norm_eps: float = 1e-6, spatial_group_norm: bool = True, dropout: float = 0.0, output_scale_factor: float = 1.0, add_downsample: bool = True, add_temporal_downsample: bool = True, ): super().__init__() self.convs = nn.ModuleList([]) for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels self.convs.append( EasyAnimateResidualBlock3D( in_channels=in_channels, out_channels=out_channels, non_linearity=act_fn, norm_num_groups=norm_num_groups, norm_eps=norm_eps, spatial_group_norm=spatial_group_norm, dropout=dropout, output_scale_factor=output_scale_factor, ) ) if add_downsample and add_temporal_downsample: self.downsampler = EasyAnimateDownsampler3D(out_channels, out_channels, kernel_size=3, stride=(2, 2, 2)) self.spatial_downsample_factor = 2 self.temporal_downsample_factor = 2 elif add_downsample and not add_temporal_downsample: self.downsampler = EasyAnimateDownsampler3D(out_channels, out_channels, kernel_size=3, stride=(1, 2, 2)) self.spatial_downsample_factor = 2 self.temporal_downsample_factor = 1 else: self.downsampler = None self.spatial_downsample_factor = 1 self.temporal_downsample_factor = 1 def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: for conv in self.convs: hidden_states = conv(hidden_states) if self.downsampler is not None: hidden_states = self.downsampler(hidden_states) return hidden_states class EasyAnimateUpBlock3d(nn.Module): def __init__( self, in_channels: int, out_channels: int, num_layers: int = 1, act_fn: str = "silu", norm_num_groups: int = 32, norm_eps: float = 1e-6, spatial_group_norm: bool = False, dropout: float = 0.0, output_scale_factor: float = 1.0, add_upsample: bool = True, add_temporal_upsample: bool = True, ): super().__init__() self.convs = nn.ModuleList([]) for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels self.convs.append( EasyAnimateResidualBlock3D( in_channels=in_channels, out_channels=out_channels, non_linearity=act_fn, norm_num_groups=norm_num_groups, norm_eps=norm_eps, spatial_group_norm=spatial_group_norm, dropout=dropout, output_scale_factor=output_scale_factor, ) ) if add_upsample: self.upsampler = EasyAnimateUpsampler3D( in_channels, in_channels, temporal_upsample=add_temporal_upsample, spatial_group_norm=spatial_group_norm, ) else: self.upsampler = None def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: for conv in self.convs: hidden_states = conv(hidden_states) if self.upsampler is not None: hidden_states = self.upsampler(hidden_states) return hidden_states class EasyAnimateMidBlock3d(nn.Module): def __init__( self, in_channels: int, num_layers: int = 1, act_fn: str = "silu", norm_num_groups: int = 32, norm_eps: float = 1e-6, spatial_group_norm: bool = True, dropout: float = 0.0, output_scale_factor: float = 1.0, ): super().__init__() norm_num_groups = norm_num_groups if norm_num_groups is not None else min(in_channels // 4, 32) self.convs = nn.ModuleList( [ EasyAnimateResidualBlock3D( in_channels=in_channels, out_channels=in_channels, non_linearity=act_fn, norm_num_groups=norm_num_groups, norm_eps=norm_eps, spatial_group_norm=spatial_group_norm, dropout=dropout, output_scale_factor=output_scale_factor, ) ] ) for _ in range(num_layers - 1): self.convs.append( EasyAnimateResidualBlock3D( in_channels=in_channels, out_channels=in_channels, non_linearity=act_fn, norm_num_groups=norm_num_groups, norm_eps=norm_eps, spatial_group_norm=spatial_group_norm, dropout=dropout, output_scale_factor=output_scale_factor, ) ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.convs[0](hidden_states) for resnet in self.convs[1:]: hidden_states = resnet(hidden_states) return hidden_states class EasyAnimateEncoder(nn.Module): r""" Causal encoder for 3D video-like data used in [EasyAnimate](https://huggingface.co/papers/2405.18991). """ _supports_gradient_checkpointing = True def __init__( self, in_channels: int = 3, out_channels: int = 8, down_block_types: Tuple[str, ...] = ( "SpatialDownBlock3D", "SpatialTemporalDownBlock3D", "SpatialTemporalDownBlock3D", "SpatialTemporalDownBlock3D", ), block_out_channels: Tuple[int, ...] = [128, 256, 512, 512], layers_per_block: int = 2, norm_num_groups: int = 32, act_fn: str = "silu", double_z: bool = True, spatial_group_norm: bool = False, ): super().__init__() # 1. Input convolution self.conv_in = EasyAnimateCausalConv3d(in_channels, block_out_channels[0], kernel_size=3) # 2. Down blocks self.down_blocks = nn.ModuleList([]) output_channels = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channels = output_channels output_channels = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 if down_block_type == "SpatialDownBlock3D": down_block = EasyAnimateDownBlock3D( in_channels=input_channels, out_channels=output_channels, num_layers=layers_per_block, act_fn=act_fn, norm_num_groups=norm_num_groups, norm_eps=1e-6, spatial_group_norm=spatial_group_norm, add_downsample=not is_final_block, add_temporal_downsample=False, ) elif down_block_type == "SpatialTemporalDownBlock3D": down_block = EasyAnimateDownBlock3D( in_channels=input_channels, out_channels=output_channels, num_layers=layers_per_block, act_fn=act_fn, norm_num_groups=norm_num_groups, norm_eps=1e-6, spatial_group_norm=spatial_group_norm, add_downsample=not is_final_block, add_temporal_downsample=True, ) else: raise ValueError(f"Unknown up block type: {down_block_type}") self.down_blocks.append(down_block) # 3. Middle block self.mid_block = EasyAnimateMidBlock3d( in_channels=block_out_channels[-1], num_layers=layers_per_block, act_fn=act_fn, spatial_group_norm=spatial_group_norm, norm_num_groups=norm_num_groups, norm_eps=1e-6, dropout=0, output_scale_factor=1, ) # 4. Output normalization & convolution self.spatial_group_norm = spatial_group_norm self.conv_norm_out = nn.GroupNorm( num_channels=block_out_channels[-1], num_groups=norm_num_groups, eps=1e-6, ) self.conv_act = get_activation(act_fn) # Initialize the output convolution layer conv_out_channels = 2 * out_channels if double_z else out_channels self.conv_out = EasyAnimateCausalConv3d(block_out_channels[-1], conv_out_channels, kernel_size=3) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # hidden_states: (B, C, T, H, W) hidden_states = self.conv_in(hidden_states) for down_block in self.down_blocks: if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(down_block, hidden_states) else: hidden_states = down_block(hidden_states) hidden_states = self.mid_block(hidden_states) if self.spatial_group_norm: batch_size = hidden_states.size(0) hidden_states = hidden_states.permute(0, 2, 1, 3, 4).flatten(0, 1) hidden_states = self.conv_norm_out(hidden_states) hidden_states = hidden_states.unflatten(0, (batch_size, -1)).permute(0, 2, 1, 3, 4) else: hidden_states = self.conv_norm_out(hidden_states) hidden_states = self.conv_act(hidden_states) hidden_states = self.conv_out(hidden_states) return hidden_states class EasyAnimateDecoder(nn.Module): r""" Causal decoder for 3D video-like data used in [EasyAnimate](https://huggingface.co/papers/2405.18991). """ _supports_gradient_checkpointing = True def __init__( self, in_channels: int = 8, out_channels: int = 3, up_block_types: Tuple[str, ...] = ( "SpatialUpBlock3D", "SpatialTemporalUpBlock3D", "SpatialTemporalUpBlock3D", "SpatialTemporalUpBlock3D", ), block_out_channels: Tuple[int, ...] = [128, 256, 512, 512], layers_per_block: int = 2, norm_num_groups: int = 32, act_fn: str = "silu", spatial_group_norm: bool = False, ): super().__init__() # 1. Input convolution self.conv_in = EasyAnimateCausalConv3d(in_channels, block_out_channels[-1], kernel_size=3) # 2. Middle block self.mid_block = EasyAnimateMidBlock3d( in_channels=block_out_channels[-1], num_layers=layers_per_block, act_fn=act_fn, norm_num_groups=norm_num_groups, norm_eps=1e-6, dropout=0, output_scale_factor=1, ) # 3. Up blocks self.up_blocks = nn.ModuleList([]) reversed_block_out_channels = list(reversed(block_out_channels)) output_channels = reversed_block_out_channels[0] for i, up_block_type in enumerate(up_block_types): input_channels = output_channels output_channels = reversed_block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 # Create and append up block to up_blocks if up_block_type == "SpatialUpBlock3D": up_block = EasyAnimateUpBlock3d( in_channels=input_channels, out_channels=output_channels, num_layers=layers_per_block + 1, act_fn=act_fn, norm_num_groups=norm_num_groups, norm_eps=1e-6, spatial_group_norm=spatial_group_norm, add_upsample=not is_final_block, add_temporal_upsample=False, ) elif up_block_type == "SpatialTemporalUpBlock3D": up_block = EasyAnimateUpBlock3d( in_channels=input_channels, out_channels=output_channels, num_layers=layers_per_block + 1, act_fn=act_fn, norm_num_groups=norm_num_groups, norm_eps=1e-6, spatial_group_norm=spatial_group_norm, add_upsample=not is_final_block, add_temporal_upsample=True, ) else: raise ValueError(f"Unknown up block type: {up_block_type}") self.up_blocks.append(up_block) # Output normalization and activation self.spatial_group_norm = spatial_group_norm self.conv_norm_out = nn.GroupNorm( num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-6, ) self.conv_act = get_activation(act_fn) # Output convolution layer self.conv_out = EasyAnimateCausalConv3d(block_out_channels[0], out_channels, kernel_size=3) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # hidden_states: (B, C, T, H, W) hidden_states = self.conv_in(hidden_states) if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(self.mid_block, hidden_states) else: hidden_states = self.mid_block(hidden_states) for up_block in self.up_blocks: if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(up_block, hidden_states) else: hidden_states = up_block(hidden_states) if self.spatial_group_norm: batch_size = hidden_states.size(0) hidden_states = hidden_states.permute(0, 2, 1, 3, 4).flatten(0, 1) # [B, C, T, H, W] -> [B * T, C, H, W] hidden_states = self.conv_norm_out(hidden_states) hidden_states = hidden_states.unflatten(0, (batch_size, -1)).permute( 0, 2, 1, 3, 4 ) # [B * T, C, H, W] -> [B, C, T, H, W] else: hidden_states = self.conv_norm_out(hidden_states) hidden_states = self.conv_act(hidden_states) hidden_states = self.conv_out(hidden_states) return hidden_states class AutoencoderKLMagvit(ModelMixin, ConfigMixin): r""" A VAE model with KL loss for encoding images into latents and decoding latent representations into images. This model is used in [EasyAnimate](https://huggingface.co/papers/2405.18991). This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, in_channels: int = 3, latent_channels: int = 16, out_channels: int = 3, block_out_channels: Tuple[int, ...] = [128, 256, 512, 512], down_block_types: Tuple[str, ...] = [ "SpatialDownBlock3D", "SpatialTemporalDownBlock3D", "SpatialTemporalDownBlock3D", "SpatialTemporalDownBlock3D", ], up_block_types: Tuple[str, ...] = [ "SpatialUpBlock3D", "SpatialTemporalUpBlock3D", "SpatialTemporalUpBlock3D", "SpatialTemporalUpBlock3D", ], layers_per_block: int = 2, act_fn: str = "silu", norm_num_groups: int = 32, scaling_factor: float = 0.7125, spatial_group_norm: bool = True, ): super().__init__() # Initialize the encoder self.encoder = EasyAnimateEncoder( in_channels=in_channels, out_channels=latent_channels, down_block_types=down_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, norm_num_groups=norm_num_groups, act_fn=act_fn, double_z=True, spatial_group_norm=spatial_group_norm, ) # Initialize the decoder self.decoder = EasyAnimateDecoder( in_channels=latent_channels, out_channels=out_channels, up_block_types=up_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, norm_num_groups=norm_num_groups, act_fn=act_fn, spatial_group_norm=spatial_group_norm, ) # Initialize convolution layers for quantization and post-quantization self.quant_conv = nn.Conv3d(2 * latent_channels, 2 * latent_channels, kernel_size=1) self.post_quant_conv = nn.Conv3d(latent_channels, latent_channels, kernel_size=1) self.spatial_compression_ratio = 2 ** (len(block_out_channels) - 1) self.temporal_compression_ratio = 2 ** (len(block_out_channels) - 2) # When decoding a batch of video latents at a time, one can save memory by slicing across the batch dimension # to perform decoding of a single video latent at a time. self.use_slicing = False # When decoding spatially large video latents, the memory requirement is very high. By breaking the video latent # frames spatially into smaller tiles and performing multiple forward passes for decoding, and then blending the # intermediate tiles together, the memory requirement can be lowered. self.use_tiling = False # When decoding temporally long video latents, the memory requirement is very high. By decoding latent frames # at a fixed frame batch size (based on `self.num_latent_frames_batch_size`), the memory requirement can be lowered. self.use_framewise_encoding = False self.use_framewise_decoding = False # Assign mini-batch sizes for encoder and decoder self.num_sample_frames_batch_size = 4 self.num_latent_frames_batch_size = 1 # The minimal tile height and width for spatial tiling to be used self.tile_sample_min_height = 512 self.tile_sample_min_width = 512 self.tile_sample_min_num_frames = 4 # The minimal distance between two spatial tiles self.tile_sample_stride_height = 448 self.tile_sample_stride_width = 448 self.tile_sample_stride_num_frames = 8 def _clear_conv_cache(self): # Clear cache for convolutional layers if needed for name, module in self.named_modules(): if isinstance(module, EasyAnimateCausalConv3d): module._clear_conv_cache() if isinstance(module, EasyAnimateUpsampler3D): module._clear_conv_cache() def enable_tiling( self, tile_sample_min_height: Optional[int] = None, tile_sample_min_width: Optional[int] = None, tile_sample_min_num_frames: Optional[int] = None, tile_sample_stride_height: Optional[float] = None, tile_sample_stride_width: Optional[float] = None, tile_sample_stride_num_frames: Optional[float] = None, ) -> None: r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. Args: tile_sample_min_height (`int`, *optional*): The minimum height required for a sample to be separated into tiles across the height dimension. tile_sample_min_width (`int`, *optional*): The minimum width required for a sample to be separated into tiles across the width dimension. tile_sample_stride_height (`int`, *optional*): The minimum amount of overlap between two consecutive vertical tiles. This is to ensure that there are no tiling artifacts produced across the height dimension. tile_sample_stride_width (`int`, *optional*): The stride between two consecutive horizontal tiles. This is to ensure that there are no tiling artifacts produced across the width dimension. """ self.use_tiling = True self.use_framewise_decoding = True self.use_framewise_encoding = True self.tile_sample_min_height = tile_sample_min_height or self.tile_sample_min_height self.tile_sample_min_width = tile_sample_min_width or self.tile_sample_min_width self.tile_sample_min_num_frames = tile_sample_min_num_frames or self.tile_sample_min_num_frames self.tile_sample_stride_height = tile_sample_stride_height or self.tile_sample_stride_height self.tile_sample_stride_width = tile_sample_stride_width or self.tile_sample_stride_width self.tile_sample_stride_num_frames = tile_sample_stride_num_frames or self.tile_sample_stride_num_frames def disable_tiling(self) -> None: r""" Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing decoding in one step. """ self.use_tiling = False def enable_slicing(self) -> None: r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.use_slicing = True def disable_slicing(self) -> None: r""" Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.use_slicing = False @apply_forward_hook def _encode( self, x: torch.Tensor, return_dict: bool = True ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]: """ Encode a batch of images into latents. Args: x (`torch.Tensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. Returns: The latent representations of the encoded images. If `return_dict` is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_tiling and (x.shape[-1] > self.tile_sample_min_height or x.shape[-2] > self.tile_sample_min_width): return self.tiled_encode(x, return_dict=return_dict) first_frames = self.encoder(x[:, :, :1, :, :]) h = [first_frames] for i in range(1, x.shape[2], self.num_sample_frames_batch_size): next_frames = self.encoder(x[:, :, i : i + self.num_sample_frames_batch_size, :, :]) h.append(next_frames) h = torch.cat(h, dim=2) moments = self.quant_conv(h) self._clear_conv_cache() return moments @apply_forward_hook def encode( self, x: torch.Tensor, return_dict: bool = True ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]: """ Encode a batch of images into latents. Args: x (`torch.Tensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. Returns: The latent representations of the encoded videos. If `return_dict` is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_slicing and x.shape[0] > 1: encoded_slices = [self._encode(x_slice) for x_slice in x.split(1)] h = torch.cat(encoded_slices) else: h = self._encode(x) posterior = DiagonalGaussianDistribution(h) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior) def _decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: batch_size, num_channels, num_frames, height, width = z.shape tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio if self.use_tiling and (z.shape[-1] > tile_latent_min_height or z.shape[-2] > tile_latent_min_width): return self.tiled_decode(z, return_dict=return_dict) z = self.post_quant_conv(z) # Process the first frame and save the result first_frames = self.decoder(z[:, :, :1, :, :]) # Initialize the list to store the processed frames, starting with the first frame dec = [first_frames] # Process the remaining frames, with the number of frames processed at a time determined by mini_batch_decoder for i in range(1, z.shape[2], self.num_latent_frames_batch_size): next_frames = self.decoder(z[:, :, i : i + self.num_latent_frames_batch_size, :, :]) dec.append(next_frames) # Concatenate all processed frames along the channel dimension dec = torch.cat(dec, dim=2) if not return_dict: return (dec,) return DecoderOutput(sample=dec) @apply_forward_hook def decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: """ Decode a batch of images. Args: z (`torch.Tensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_slicing and z.shape[0] > 1: decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)] decoded = torch.cat(decoded_slices) else: decoded = self._decode(z).sample self._clear_conv_cache() if not return_dict: return (decoded,) return DecoderOutput(sample=decoded) def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[3], b.shape[3], blend_extent) for y in range(blend_extent): b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, :, y, :] * ( y / blend_extent ) return b def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[4], b.shape[4], blend_extent) for x in range(blend_extent): b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, :, x] * ( x / blend_extent ) return b def tiled_encode(self, x: torch.Tensor, return_dict: bool = True) -> AutoencoderKLOutput: batch_size, num_channels, num_frames, height, width = x.shape latent_height = height // self.spatial_compression_ratio latent_width = width // self.spatial_compression_ratio tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio blend_height = tile_latent_min_height - tile_latent_stride_height blend_width = tile_latent_min_width - tile_latent_stride_width # Split the image into 512x512 tiles and encode them separately. rows = [] for i in range(0, height, self.tile_sample_stride_height): row = [] for j in range(0, width, self.tile_sample_stride_width): tile = x[ :, :, :, i : i + self.tile_sample_min_height, j : j + self.tile_sample_min_width, ] first_frames = self.encoder(tile[:, :, 0:1, :, :]) tile_h = [first_frames] for k in range(1, num_frames, self.num_sample_frames_batch_size): next_frames = self.encoder(tile[:, :, k : k + self.num_sample_frames_batch_size, :, :]) tile_h.append(next_frames) tile = torch.cat(tile_h, dim=2) tile = self.quant_conv(tile) self._clear_conv_cache() row.append(tile) rows.append(row) result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_height) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_width) result_row.append(tile[:, :, :, :latent_height, :latent_width]) result_rows.append(torch.cat(result_row, dim=4)) moments = torch.cat(result_rows, dim=3)[:, :, :, :latent_height, :latent_width] return moments def tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: batch_size, num_channels, num_frames, height, width = z.shape sample_height = height * self.spatial_compression_ratio sample_width = width * self.spatial_compression_ratio tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio blend_height = self.tile_sample_min_height - self.tile_sample_stride_height blend_width = self.tile_sample_min_width - self.tile_sample_stride_width # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. rows = [] for i in range(0, height, tile_latent_stride_height): row = [] for j in range(0, width, tile_latent_stride_width): tile = z[ :, :, :, i : i + tile_latent_min_height, j : j + tile_latent_min_width, ] tile = self.post_quant_conv(tile) # Process the first frame and save the result first_frames = self.decoder(tile[:, :, :1, :, :]) # Initialize the list to store the processed frames, starting with the first frame tile_dec = [first_frames] # Process the remaining frames, with the number of frames processed at a time determined by mini_batch_decoder for k in range(1, num_frames, self.num_latent_frames_batch_size): next_frames = self.decoder(tile[:, :, k : k + self.num_latent_frames_batch_size, :, :]) tile_dec.append(next_frames) # Concatenate all processed frames along the channel dimension decoded = torch.cat(tile_dec, dim=2) self._clear_conv_cache() row.append(decoded) rows.append(row) result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_height) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_width) result_row.append(tile[:, :, :, : self.tile_sample_stride_height, : self.tile_sample_stride_width]) result_rows.append(torch.cat(result_row, dim=4)) dec = torch.cat(result_rows, dim=3)[:, :, :, :sample_height, :sample_width] if not return_dict: return (dec,) return DecoderOutput(sample=dec) def forward( self, sample: torch.Tensor, sample_posterior: bool = False, return_dict: bool = True, generator: Optional[torch.Generator] = None, ) -> Union[DecoderOutput, torch.Tensor]: r""" Args: sample (`torch.Tensor`): Input sample. sample_posterior (`bool`, *optional*, defaults to `False`): Whether to sample from the posterior. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`DecoderOutput`] instead of a plain tuple. """ x = sample posterior = self.encode(x).latent_dist if sample_posterior: z = posterior.sample(generator=generator) else: z = posterior.mode() dec = self.decode(z).sample if not return_dict: return (dec,) return DecoderOutput(sample=dec)
diffusers/src/diffusers/models/autoencoders/autoencoder_kl_magvit.py/0
{ "file_path": "diffusers/src/diffusers/models/autoencoders/autoencoder_kl_magvit.py", "repo_id": "diffusers", "token_count": 21371 }
162
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.nn import functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...loaders.single_file_model import FromOriginalModelMixin from ...utils import BaseOutput, logging from ..attention_processor import ( ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor, ) from ..embeddings import TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin from ..unets.unet_2d_blocks import ( UNetMidBlock2D, UNetMidBlock2DCrossAttn, get_down_block, ) from ..unets.unet_2d_condition import UNet2DConditionModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class ControlNetOutput(BaseOutput): """ The output of [`ControlNetModel`]. Args: down_block_res_samples (`tuple[torch.Tensor]`): A tuple of downsample activations at different resolutions for each downsampling block. Each tensor should be of shape `(batch_size, channel * resolution, height //resolution, width // resolution)`. Output can be used to condition the original UNet's downsampling activations. mid_down_block_re_sample (`torch.Tensor`): The activation of the middle block (the lowest sample resolution). Each tensor should be of shape `(batch_size, channel * lowest_resolution, height // lowest_resolution, width // lowest_resolution)`. Output can be used to condition the original UNet's middle block activation. """ down_block_res_samples: Tuple[torch.Tensor] mid_block_res_sample: torch.Tensor class ControlNetConditioningEmbedding(nn.Module): """ Quoting from https://huggingface.co/papers/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN [11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides (activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full model) to encode image-space conditions ... into feature maps ..." """ def __init__( self, conditioning_embedding_channels: int, conditioning_channels: int = 3, block_out_channels: Tuple[int, ...] = (16, 32, 96, 256), ): super().__init__() self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1) self.blocks = nn.ModuleList([]) for i in range(len(block_out_channels) - 1): channel_in = block_out_channels[i] channel_out = block_out_channels[i + 1] self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1)) self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2)) self.conv_out = zero_module( nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1) ) def forward(self, conditioning): embedding = self.conv_in(conditioning) embedding = F.silu(embedding) for block in self.blocks: embedding = block(embedding) embedding = F.silu(embedding) embedding = self.conv_out(embedding) return embedding class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalModelMixin): """ A ControlNet model. Args: in_channels (`int`, defaults to 4): The number of channels in the input sample. flip_sin_to_cos (`bool`, defaults to `True`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, defaults to 0): The frequency shift to apply to the time embedding. down_block_types (`tuple[str]`, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. only_cross_attention (`Union[bool, Tuple[bool]]`, defaults to `False`): block_out_channels (`tuple[int]`, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, defaults to 2): The number of layers per block. downsample_padding (`int`, defaults to 1): The padding to use for the downsampling convolution. mid_block_scale_factor (`float`, defaults to 1): The scale factor to use for the mid block. act_fn (`str`, defaults to "silu"): The activation function to use. norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. If None, normalization and activation layers is skipped in post-processing. norm_eps (`float`, defaults to 1e-5): The epsilon to use for the normalization. cross_attention_dim (`int`, defaults to 1280): The dimension of the cross attention features. transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1): The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. encoder_hid_dim (`int`, *optional*, defaults to None): If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim` dimension to `cross_attention_dim`. encoder_hid_dim_type (`str`, *optional*, defaults to `None`): If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`. attention_head_dim (`Union[int, Tuple[int]]`, defaults to 8): The dimension of the attention heads. use_linear_projection (`bool`, defaults to `False`): class_embed_type (`str`, *optional*, defaults to `None`): The type of class embedding to use which is ultimately summed with the time embeddings. Choose from None, `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. addition_embed_type (`str`, *optional*, defaults to `None`): Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or "text". "text" will use the `TextTimeEmbedding` layer. num_class_embeds (`int`, *optional*, defaults to 0): Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing class conditioning with `class_embed_type` equal to `None`. upcast_attention (`bool`, defaults to `False`): resnet_time_scale_shift (`str`, defaults to `"default"`): Time scale shift config for ResNet blocks (see `ResnetBlock2D`). Choose from `default` or `scale_shift`. projection_class_embeddings_input_dim (`int`, *optional*, defaults to `None`): The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when `class_embed_type="projection"`. controlnet_conditioning_channel_order (`str`, defaults to `"rgb"`): The channel order of conditional image. Will convert to `rgb` if it's `bgr`. conditioning_embedding_out_channels (`tuple[int]`, *optional*, defaults to `(16, 32, 96, 256)`): The tuple of output channel for each block in the `conditioning_embedding` layer. global_pool_conditions (`bool`, defaults to `False`): TODO(Patrick) - unused parameter. addition_embed_type_num_heads (`int`, defaults to 64): The number of heads to use for the `TextTimeEmbedding` layer. """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, in_channels: int = 4, conditioning_channels: int = 3, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str, ...] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ), mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn", only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280), layers_per_block: int = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, act_fn: str = "silu", norm_num_groups: Optional[int] = 32, norm_eps: float = 1e-5, cross_attention_dim: int = 1280, transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1, encoder_hid_dim: Optional[int] = None, encoder_hid_dim_type: Optional[str] = None, attention_head_dim: Union[int, Tuple[int, ...]] = 8, num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None, use_linear_projection: bool = False, class_embed_type: Optional[str] = None, addition_embed_type: Optional[str] = None, addition_time_embed_dim: Optional[int] = None, num_class_embeds: Optional[int] = None, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", projection_class_embeddings_input_dim: Optional[int] = None, controlnet_conditioning_channel_order: str = "rgb", conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256), global_pool_conditions: bool = False, addition_embed_type_num_heads: int = 64, ): super().__init__() # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = num_attention_heads or attention_head_dim # Check inputs if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): raise ValueError( f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) # input conv_in_kernel = 3 conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding ) # time time_embed_dim = block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] self.time_embedding = TimestepEmbedding( timestep_input_dim, time_embed_dim, act_fn=act_fn, ) if encoder_hid_dim_type is None and encoder_hid_dim is not None: encoder_hid_dim_type = "text_proj" self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") if encoder_hid_dim is None and encoder_hid_dim_type is not None: raise ValueError( f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}." ) if encoder_hid_dim_type == "text_proj": self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) elif encoder_hid_dim_type == "text_image_proj": # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image_proj"` (Kandinsky 2.1)` self.encoder_hid_proj = TextImageProjection( text_embed_dim=encoder_hid_dim, image_embed_dim=cross_attention_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type is not None: raise ValueError( f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'." ) else: self.encoder_hid_proj = None # class embedding if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == "timestep": self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) elif class_embed_type == "identity": self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) elif class_embed_type == "projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" ) # The projection `class_embed_type` is the same as the timestep `class_embed_type` except # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings # 2. it projects from an arbitrary input dimension. # # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. # As a result, `TimestepEmbedding` can be passed arbitrary vectors. self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) else: self.class_embedding = None if addition_embed_type == "text": if encoder_hid_dim is not None: text_time_embedding_from_dim = encoder_hid_dim else: text_time_embedding_from_dim = cross_attention_dim self.add_embedding = TextTimeEmbedding( text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads ) elif addition_embed_type == "text_image": # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image"` (Kandinsky 2.1)` self.add_embedding = TextImageTimeEmbedding( text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim ) elif addition_embed_type == "text_time": self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif addition_embed_type is not None: raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") # control net conditioning embedding self.controlnet_cond_embedding = ControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0], block_out_channels=conditioning_embedding_out_channels, conditioning_channels=conditioning_channels, ) self.down_blocks = nn.ModuleList([]) self.controlnet_down_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): only_cross_attention = [only_cross_attention] * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) # down output_channel = block_out_channels[0] controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_down_blocks.append(controlnet_block) for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block, transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads[i], attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, downsample_padding=downsample_padding, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, ) self.down_blocks.append(down_block) for _ in range(layers_per_block): controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_down_blocks.append(controlnet_block) if not is_final_block: controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_down_blocks.append(controlnet_block) # mid mid_block_channel = block_out_channels[-1] controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_mid_block = controlnet_block if mid_block_type == "UNetMidBlock2DCrossAttn": self.mid_block = UNetMidBlock2DCrossAttn( transformer_layers_per_block=transformer_layers_per_block[-1], in_channels=mid_block_channel, temb_channels=time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, ) elif mid_block_type == "UNetMidBlock2D": self.mid_block = UNetMidBlock2D( in_channels=block_out_channels[-1], temb_channels=time_embed_dim, num_layers=0, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_groups=norm_num_groups, resnet_time_scale_shift=resnet_time_scale_shift, add_attention=False, ) else: raise ValueError(f"unknown mid_block_type : {mid_block_type}") @classmethod def from_unet( cls, unet: UNet2DConditionModel, controlnet_conditioning_channel_order: str = "rgb", conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256), load_weights_from_unet: bool = True, conditioning_channels: int = 3, ): r""" Instantiate a [`ControlNetModel`] from [`UNet2DConditionModel`]. Parameters: unet (`UNet2DConditionModel`): The UNet model weights to copy to the [`ControlNetModel`]. All configuration options are also copied where applicable. """ transformer_layers_per_block = ( unet.config.transformer_layers_per_block if "transformer_layers_per_block" in unet.config else 1 ) encoder_hid_dim = unet.config.encoder_hid_dim if "encoder_hid_dim" in unet.config else None encoder_hid_dim_type = unet.config.encoder_hid_dim_type if "encoder_hid_dim_type" in unet.config else None addition_embed_type = unet.config.addition_embed_type if "addition_embed_type" in unet.config else None addition_time_embed_dim = ( unet.config.addition_time_embed_dim if "addition_time_embed_dim" in unet.config else None ) controlnet = cls( encoder_hid_dim=encoder_hid_dim, encoder_hid_dim_type=encoder_hid_dim_type, addition_embed_type=addition_embed_type, addition_time_embed_dim=addition_time_embed_dim, transformer_layers_per_block=transformer_layers_per_block, in_channels=unet.config.in_channels, flip_sin_to_cos=unet.config.flip_sin_to_cos, freq_shift=unet.config.freq_shift, down_block_types=unet.config.down_block_types, only_cross_attention=unet.config.only_cross_attention, block_out_channels=unet.config.block_out_channels, layers_per_block=unet.config.layers_per_block, downsample_padding=unet.config.downsample_padding, mid_block_scale_factor=unet.config.mid_block_scale_factor, act_fn=unet.config.act_fn, norm_num_groups=unet.config.norm_num_groups, norm_eps=unet.config.norm_eps, cross_attention_dim=unet.config.cross_attention_dim, attention_head_dim=unet.config.attention_head_dim, num_attention_heads=unet.config.num_attention_heads, use_linear_projection=unet.config.use_linear_projection, class_embed_type=unet.config.class_embed_type, num_class_embeds=unet.config.num_class_embeds, upcast_attention=unet.config.upcast_attention, resnet_time_scale_shift=unet.config.resnet_time_scale_shift, projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim, mid_block_type=unet.config.mid_block_type, controlnet_conditioning_channel_order=controlnet_conditioning_channel_order, conditioning_embedding_out_channels=conditioning_embedding_out_channels, conditioning_channels=conditioning_channels, ) if load_weights_from_unet: controlnet.conv_in.load_state_dict(unet.conv_in.state_dict()) controlnet.time_proj.load_state_dict(unet.time_proj.state_dict()) controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict()) if controlnet.class_embedding: controlnet.class_embedding.load_state_dict(unet.class_embedding.state_dict()) if hasattr(controlnet, "add_embedding"): controlnet.add_embedding.load_state_dict(unet.add_embedding.state_dict()) controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict()) controlnet.mid_block.load_state_dict(unet.mid_block.state_dict()) return controlnet @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attention_slice def set_attention_slice(self, slice_size: Union[str, int, List[int]]) -> None: r""" Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor in slices to compute attention in several steps. This is useful for saving some memory in exchange for a small decrease in speed. Args: slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. """ sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) # retrieve number of attention layers for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == "max": # make smallest slice possible slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError( f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." ) for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f"size {size} has to be smaller or equal to {dim}.") # Recursively walk through all the children. # Any children which exposes the set_attention_slice method # gets the message def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, "set_attention_slice"): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def forward( self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, controlnet_cond: torch.Tensor, conditioning_scale: float = 1.0, class_labels: Optional[torch.Tensor] = None, timestep_cond: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guess_mode: bool = False, return_dict: bool = True, ) -> Union[ControlNetOutput, Tuple[Tuple[torch.Tensor, ...], torch.Tensor]]: """ The [`ControlNetModel`] forward method. Args: sample (`torch.Tensor`): The noisy input tensor. timestep (`Union[torch.Tensor, float, int]`): The number of timesteps to denoise an input. encoder_hidden_states (`torch.Tensor`): The encoder hidden states. controlnet_cond (`torch.Tensor`): The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`. conditioning_scale (`float`, defaults to `1.0`): The scale factor for ControlNet outputs. class_labels (`torch.Tensor`, *optional*, defaults to `None`): Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. timestep_cond (`torch.Tensor`, *optional*, defaults to `None`): Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep embeddings. attention_mask (`torch.Tensor`, *optional*, defaults to `None`): An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. added_cond_kwargs (`dict`): Additional conditions for the Stable Diffusion XL UNet. cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`): A kwargs dictionary that if specified is passed along to the `AttnProcessor`. guess_mode (`bool`, defaults to `False`): In this mode, the ControlNet encoder tries its best to recognize the input content of the input even if you remove all prompts. A `guidance_scale` between 3.0 and 5.0 is recommended. return_dict (`bool`, defaults to `True`): Whether or not to return a [`~models.controlnets.controlnet.ControlNetOutput`] instead of a plain tuple. Returns: [`~models.controlnets.controlnet.ControlNetOutput`] **or** `tuple`: If `return_dict` is `True`, a [`~models.controlnets.controlnet.ControlNetOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ # check channel order channel_order = self.config.controlnet_conditioning_channel_order if channel_order == "rgb": # in rgb order by default ... elif channel_order == "bgr": controlnet_cond = torch.flip(controlnet_cond, dims=[1]) else: raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}") # prepare attention_mask if attention_mask is not None: attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) # 1. time timesteps = timestep if not torch.is_tensor(timesteps): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = sample.device.type == "mps" is_npu = sample.device.type == "npu" if isinstance(timestep, float): dtype = torch.float32 if (is_mps or is_npu) else torch.float64 else: dtype = torch.int32 if (is_mps or is_npu) else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=sample.dtype) emb = self.time_embedding(t_emb, timestep_cond) aug_emb = None if self.class_embedding is not None: if class_labels is None: raise ValueError("class_labels should be provided when num_class_embeds > 0") if self.config.class_embed_type == "timestep": class_labels = self.time_proj(class_labels) class_emb = self.class_embedding(class_labels).to(dtype=self.dtype) emb = emb + class_emb if self.config.addition_embed_type is not None: if self.config.addition_embed_type == "text": aug_emb = self.add_embedding(encoder_hidden_states) elif self.config.addition_embed_type == "text_time": if "text_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`" ) text_embeds = added_cond_kwargs.get("text_embeds") if "time_ids" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`" ) time_ids = added_cond_kwargs.get("time_ids") time_embeds = self.add_time_proj(time_ids.flatten()) time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) add_embeds = add_embeds.to(emb.dtype) aug_emb = self.add_embedding(add_embeds) emb = emb + aug_emb if aug_emb is not None else emb # 2. pre-process sample = self.conv_in(sample) controlnet_cond = self.controlnet_cond_embedding(controlnet_cond) sample = sample + controlnet_cond # 3. down down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: sample, res_samples = downsample_block( hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, ) else: sample, res_samples = downsample_block(hidden_states=sample, temb=emb) down_block_res_samples += res_samples # 4. mid if self.mid_block is not None: if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention: sample = self.mid_block( sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, ) else: sample = self.mid_block(sample, emb) # 5. Control net blocks controlnet_down_block_res_samples = () for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks): down_block_res_sample = controlnet_block(down_block_res_sample) controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,) down_block_res_samples = controlnet_down_block_res_samples mid_block_res_sample = self.controlnet_mid_block(sample) # 6. scaling if guess_mode and not self.config.global_pool_conditions: scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) # 0.1 to 1.0 scales = scales * conditioning_scale down_block_res_samples = [sample * scale for sample, scale in zip(down_block_res_samples, scales)] mid_block_res_sample = mid_block_res_sample * scales[-1] # last one else: down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample = mid_block_res_sample * conditioning_scale if self.config.global_pool_conditions: down_block_res_samples = [ torch.mean(sample, dim=(2, 3), keepdim=True) for sample in down_block_res_samples ] mid_block_res_sample = torch.mean(mid_block_res_sample, dim=(2, 3), keepdim=True) if not return_dict: return (down_block_res_samples, mid_block_res_sample) return ControlNetOutput( down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample ) def zero_module(module): for p in module.parameters(): nn.init.zeros_(p) return module
diffusers/src/diffusers/models/controlnets/controlnet.py/0
{ "file_path": "diffusers/src/diffusers/models/controlnets/controlnet.py", "repo_id": "diffusers", "token_count": 18811 }
163
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import importlib import inspect import os from array import array from collections import OrderedDict, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed from pathlib import Path from typing import Dict, List, Optional, Union from zipfile import is_zipfile import safetensors import torch from huggingface_hub import DDUFEntry from huggingface_hub.utils import EntryNotFoundError from ..quantizers import DiffusersQuantizer from ..utils import ( DEFAULT_HF_PARALLEL_LOADING_WORKERS, GGUF_FILE_EXTENSION, SAFE_WEIGHTS_INDEX_NAME, SAFETENSORS_FILE_EXTENSION, WEIGHTS_INDEX_NAME, _add_variant, _get_model_file, deprecate, is_accelerate_available, is_accelerate_version, is_gguf_available, is_torch_available, is_torch_version, logging, ) logger = logging.get_logger(__name__) _CLASS_REMAPPING_DICT = { "Transformer2DModel": { "ada_norm_zero": "DiTTransformer2DModel", "ada_norm_single": "PixArtTransformer2DModel", } } if is_accelerate_available(): from accelerate import infer_auto_device_map from accelerate.utils import get_balanced_memory, get_max_memory, offload_weight, set_module_tensor_to_device # Adapted from `transformers` (see modeling_utils.py) def _determine_device_map( model: torch.nn.Module, device_map, max_memory, torch_dtype, keep_in_fp32_modules=[], hf_quantizer=None ): if isinstance(device_map, str): special_dtypes = {} if hf_quantizer is not None: special_dtypes.update(hf_quantizer.get_special_dtypes_update(model, torch_dtype)) special_dtypes.update( { name: torch.float32 for name, _ in model.named_parameters() if any(m in name for m in keep_in_fp32_modules) } ) target_dtype = torch_dtype if hf_quantizer is not None: target_dtype = hf_quantizer.adjust_target_dtype(target_dtype) no_split_modules = model._get_no_split_modules(device_map) device_map_kwargs = {"no_split_module_classes": no_split_modules} if "special_dtypes" in inspect.signature(infer_auto_device_map).parameters: device_map_kwargs["special_dtypes"] = special_dtypes elif len(special_dtypes) > 0: logger.warning( "This model has some weights that should be kept in higher precision, you need to upgrade " "`accelerate` to properly deal with them (`pip install --upgrade accelerate`)." ) if device_map != "sequential": max_memory = get_balanced_memory( model, dtype=torch_dtype, low_zero=(device_map == "balanced_low_0"), max_memory=max_memory, **device_map_kwargs, ) else: max_memory = get_max_memory(max_memory) if hf_quantizer is not None: max_memory = hf_quantizer.adjust_max_memory(max_memory) device_map_kwargs["max_memory"] = max_memory device_map = infer_auto_device_map(model, dtype=target_dtype, **device_map_kwargs) return device_map def _fetch_remapped_cls_from_config(config, old_class): previous_class_name = old_class.__name__ remapped_class_name = _CLASS_REMAPPING_DICT.get(previous_class_name).get(config["norm_type"], None) # Details: # https://github.com/huggingface/diffusers/pull/7647#discussion_r1621344818 if remapped_class_name: # load diffusers library to import compatible and original scheduler diffusers_library = importlib.import_module(__name__.split(".")[0]) remapped_class = getattr(diffusers_library, remapped_class_name) logger.info( f"Changing class object to be of `{remapped_class_name}` type from `{previous_class_name}` type." f"This is because `{previous_class_name}` is scheduled to be deprecated in a future version. Note that this" " DOESN'T affect the final results." ) return remapped_class else: return old_class def _determine_param_device(param_name: str, device_map: Optional[Dict[str, Union[int, str, torch.device]]]): """ Find the device of param_name from the device_map. """ if device_map is None: return "cpu" else: module_name = param_name # find next higher level module that is defined in device_map: # bert.lm_head.weight -> bert.lm_head -> bert -> '' while len(module_name) > 0 and module_name not in device_map: module_name = ".".join(module_name.split(".")[:-1]) if module_name == "" and "" not in device_map: raise ValueError(f"{param_name} doesn't have any device set.") return device_map[module_name] def load_state_dict( checkpoint_file: Union[str, os.PathLike], dduf_entries: Optional[Dict[str, DDUFEntry]] = None, disable_mmap: bool = False, map_location: Union[str, torch.device] = "cpu", ): """ Reads a checkpoint file, returning properly formatted errors if they arise. """ # TODO: maybe refactor a bit this part where we pass a dict here if isinstance(checkpoint_file, dict): return checkpoint_file try: file_extension = os.path.basename(checkpoint_file).split(".")[-1] if file_extension == SAFETENSORS_FILE_EXTENSION: if dduf_entries: # tensors are loaded on cpu with dduf_entries[checkpoint_file].as_mmap() as mm: return safetensors.torch.load(mm) if disable_mmap: return safetensors.torch.load(open(checkpoint_file, "rb").read()) else: return safetensors.torch.load_file(checkpoint_file, device=map_location) elif file_extension == GGUF_FILE_EXTENSION: return load_gguf_checkpoint(checkpoint_file) else: extra_args = {} weights_only_kwarg = {"weights_only": True} if is_torch_version(">=", "1.13") else {} # mmap can only be used with files serialized with zipfile-based format. if ( isinstance(checkpoint_file, str) and map_location != "meta" and is_torch_version(">=", "2.1.0") and is_zipfile(checkpoint_file) and not disable_mmap ): extra_args = {"mmap": True} return torch.load(checkpoint_file, map_location=map_location, **weights_only_kwarg, **extra_args) except Exception as e: try: with open(checkpoint_file) as f: if f.read().startswith("version"): raise OSError( "You seem to have cloned a repository without having git-lfs installed. Please install " "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder " "you cloned." ) else: raise ValueError( f"Unable to locate the file {checkpoint_file} which is necessary to load this pretrained " "model. Make sure you have saved the model properly." ) from e except (UnicodeDecodeError, ValueError): raise OSError( f"Unable to load weights from checkpoint file for '{checkpoint_file}' at '{checkpoint_file}'. " ) def load_model_dict_into_meta( model, state_dict: OrderedDict, dtype: Optional[Union[str, torch.dtype]] = None, model_name_or_path: Optional[str] = None, hf_quantizer: Optional[DiffusersQuantizer] = None, keep_in_fp32_modules: Optional[List] = None, device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None, unexpected_keys: Optional[List[str]] = None, offload_folder: Optional[Union[str, os.PathLike]] = None, offload_index: Optional[Dict] = None, state_dict_index: Optional[Dict] = None, state_dict_folder: Optional[Union[str, os.PathLike]] = None, ) -> List[str]: """ This is somewhat similar to `_load_state_dict_into_model`, but deals with a model that has some or all of its params on a `meta` device. It replaces the model params with the data from the `state_dict` """ is_quantized = hf_quantizer is not None empty_state_dict = model.state_dict() for param_name, param in state_dict.items(): if param_name not in empty_state_dict: continue set_module_kwargs = {} # We convert floating dtypes to the `dtype` passed. We also want to keep the buffers/params # in int/uint/bool and not cast them. # TODO: revisit cases when param.dtype == torch.float8_e4m3fn if dtype is not None and torch.is_floating_point(param): if keep_in_fp32_modules is not None and any( module_to_keep_in_fp32 in param_name.split(".") for module_to_keep_in_fp32 in keep_in_fp32_modules ): param = param.to(torch.float32) set_module_kwargs["dtype"] = torch.float32 # For quantizers have save weights using torch.float8_e4m3fn elif hf_quantizer is not None and param.dtype == getattr(torch, "float8_e4m3fn", None): pass else: param = param.to(dtype) set_module_kwargs["dtype"] = dtype if is_accelerate_version(">", "1.8.1"): set_module_kwargs["non_blocking"] = True set_module_kwargs["clear_cache"] = False # For compatibility with PyTorch load_state_dict which converts state dict dtype to existing dtype in model, and which # uses `param.copy_(input_param)` that preserves the contiguity of the parameter in the model. # Reference: https://github.com/pytorch/pytorch/blob/db79ceb110f6646523019a59bbd7b838f43d4a86/torch/nn/modules/module.py#L2040C29-L2040C29 old_param = model splits = param_name.split(".") for split in splits: old_param = getattr(old_param, split) if not isinstance(old_param, (torch.nn.Parameter, torch.Tensor)): old_param = None if old_param is not None: if dtype is None: param = param.to(old_param.dtype) if old_param.is_contiguous(): param = param.contiguous() param_device = _determine_param_device(param_name, device_map) # bnb params are flattened. # gguf quants have a different shape based on the type of quantization applied if empty_state_dict[param_name].shape != param.shape: if ( is_quantized and hf_quantizer.pre_quantized and hf_quantizer.check_if_quantized_param( model, param, param_name, state_dict, param_device=param_device ) ): hf_quantizer.check_quantized_param_shape(param_name, empty_state_dict[param_name], param) else: model_name_or_path_str = f"{model_name_or_path} " if model_name_or_path is not None else "" raise ValueError( f"Cannot load {model_name_or_path_str} because {param_name} expected shape {empty_state_dict[param_name].shape}, but got {param.shape}. If you want to instead overwrite randomly initialized weights, please make sure to pass both `low_cpu_mem_usage=False` and `ignore_mismatched_sizes=True`. For more information, see also: https://github.com/huggingface/diffusers/issues/1619#issuecomment-1345604389 as an example." ) if param_device == "disk": offload_index = offload_weight(param, param_name, offload_folder, offload_index) elif param_device == "cpu" and state_dict_index is not None: state_dict_index = offload_weight(param, param_name, state_dict_folder, state_dict_index) elif is_quantized and ( hf_quantizer.check_if_quantized_param(model, param, param_name, state_dict, param_device=param_device) ): hf_quantizer.create_quantized_param( model, param, param_name, param_device, state_dict, unexpected_keys, dtype=dtype ) else: set_module_tensor_to_device(model, param_name, param_device, value=param, **set_module_kwargs) return offload_index, state_dict_index def check_support_param_buffer_assignment(model_to_load, state_dict, start_prefix=""): """ Checks if `model_to_load` supports param buffer assignment (such as when loading in empty weights) by first checking if the model explicitly disables it, then by ensuring that the state dict keys are a subset of the model's parameters. """ if model_to_load.device.type == "meta": return False if len([key for key in state_dict if key.startswith(start_prefix)]) == 0: return False # Some models explicitly do not support param buffer assignment if not getattr(model_to_load, "_supports_param_buffer_assignment", True): logger.debug( f"{model_to_load.__class__.__name__} does not support param buffer assignment, loading will be slower" ) return False # If the model does, the incoming `state_dict` and the `model_to_load` must be the same dtype first_key = next(iter(model_to_load.state_dict().keys())) if start_prefix + first_key in state_dict: return state_dict[start_prefix + first_key].dtype == model_to_load.state_dict()[first_key].dtype return False def _load_shard_file( shard_file, model, model_state_dict, device_map=None, dtype=None, hf_quantizer=None, keep_in_fp32_modules=None, dduf_entries=None, loaded_keys=None, unexpected_keys=None, offload_index=None, offload_folder=None, state_dict_index=None, state_dict_folder=None, ignore_mismatched_sizes=False, low_cpu_mem_usage=False, ): state_dict = load_state_dict(shard_file, dduf_entries=dduf_entries) mismatched_keys = _find_mismatched_keys( state_dict, model_state_dict, loaded_keys, ignore_mismatched_sizes, ) error_msgs = [] if low_cpu_mem_usage: offload_index, state_dict_index = load_model_dict_into_meta( model, state_dict, device_map=device_map, dtype=dtype, hf_quantizer=hf_quantizer, keep_in_fp32_modules=keep_in_fp32_modules, unexpected_keys=unexpected_keys, offload_folder=offload_folder, offload_index=offload_index, state_dict_index=state_dict_index, state_dict_folder=state_dict_folder, ) else: assign_to_params_buffers = check_support_param_buffer_assignment(model, state_dict) error_msgs += _load_state_dict_into_model(model, state_dict, assign_to_params_buffers) return offload_index, state_dict_index, mismatched_keys, error_msgs def _load_shard_files_with_threadpool( shard_files, model, model_state_dict, device_map=None, dtype=None, hf_quantizer=None, keep_in_fp32_modules=None, dduf_entries=None, loaded_keys=None, unexpected_keys=None, offload_index=None, offload_folder=None, state_dict_index=None, state_dict_folder=None, ignore_mismatched_sizes=False, low_cpu_mem_usage=False, ): # Do not spawn anymore workers than you need num_workers = min(len(shard_files), DEFAULT_HF_PARALLEL_LOADING_WORKERS) logger.info(f"Loading model weights in parallel with {num_workers} workers...") error_msgs = [] mismatched_keys = [] load_one = functools.partial( _load_shard_file, model=model, model_state_dict=model_state_dict, device_map=device_map, dtype=dtype, hf_quantizer=hf_quantizer, keep_in_fp32_modules=keep_in_fp32_modules, dduf_entries=dduf_entries, loaded_keys=loaded_keys, unexpected_keys=unexpected_keys, offload_index=offload_index, offload_folder=offload_folder, state_dict_index=state_dict_index, state_dict_folder=state_dict_folder, ignore_mismatched_sizes=ignore_mismatched_sizes, low_cpu_mem_usage=low_cpu_mem_usage, ) with ThreadPoolExecutor(max_workers=num_workers) as executor: with logging.tqdm(total=len(shard_files), desc="Loading checkpoint shards") as pbar: futures = [executor.submit(load_one, shard_file) for shard_file in shard_files] for future in as_completed(futures): result = future.result() offload_index, state_dict_index, _mismatched_keys, _error_msgs = result error_msgs += _error_msgs mismatched_keys += _mismatched_keys pbar.update(1) return offload_index, state_dict_index, mismatched_keys, error_msgs def _find_mismatched_keys( state_dict, model_state_dict, loaded_keys, ignore_mismatched_sizes, ): mismatched_keys = [] if ignore_mismatched_sizes: for checkpoint_key in loaded_keys: model_key = checkpoint_key # If the checkpoint is sharded, we may not have the key here. if checkpoint_key not in state_dict: continue if model_key in model_state_dict and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape: mismatched_keys.append( (checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape) ) del state_dict[checkpoint_key] return mismatched_keys def _load_state_dict_into_model( model_to_load, state_dict: OrderedDict, assign_to_params_buffers: bool = False ) -> List[str]: # Convert old format to new format if needed from a PyTorch state_dict # copy state_dict so _load_from_state_dict can modify it state_dict = state_dict.copy() error_msgs = [] # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants # so we need to apply the function recursively. def load(module: torch.nn.Module, prefix: str = "", assign_to_params_buffers: bool = False): local_metadata = {} local_metadata["assign_to_params_buffers"] = assign_to_params_buffers if assign_to_params_buffers and not is_torch_version(">=", "2.1"): logger.info("You need to have torch>=2.1 in order to load the model with assign_to_params_buffers=True") args = (state_dict, prefix, local_metadata, True, [], [], error_msgs) module._load_from_state_dict(*args) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + ".", assign_to_params_buffers) load(model_to_load, assign_to_params_buffers=assign_to_params_buffers) return error_msgs def _fetch_index_file( is_local, pretrained_model_name_or_path, subfolder, use_safetensors, cache_dir, variant, force_download, proxies, local_files_only, token, revision, user_agent, commit_hash, dduf_entries: Optional[Dict[str, DDUFEntry]] = None, ): if is_local: index_file = Path( pretrained_model_name_or_path, subfolder or "", _add_variant(SAFE_WEIGHTS_INDEX_NAME if use_safetensors else WEIGHTS_INDEX_NAME, variant), ) else: index_file_in_repo = Path( subfolder or "", _add_variant(SAFE_WEIGHTS_INDEX_NAME if use_safetensors else WEIGHTS_INDEX_NAME, variant), ).as_posix() try: index_file = _get_model_file( pretrained_model_name_or_path, weights_name=index_file_in_repo, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=None, user_agent=user_agent, commit_hash=commit_hash, dduf_entries=dduf_entries, ) if not dduf_entries: index_file = Path(index_file) except (EntryNotFoundError, EnvironmentError): index_file = None return index_file def _fetch_index_file_legacy( is_local, pretrained_model_name_or_path, subfolder, use_safetensors, cache_dir, variant, force_download, proxies, local_files_only, token, revision, user_agent, commit_hash, dduf_entries: Optional[Dict[str, DDUFEntry]] = None, ): if is_local: index_file = Path( pretrained_model_name_or_path, subfolder or "", SAFE_WEIGHTS_INDEX_NAME if use_safetensors else WEIGHTS_INDEX_NAME, ).as_posix() splits = index_file.split(".") split_index = -3 if ".cache" in index_file else -2 splits = splits[:-split_index] + [variant] + splits[-split_index:] index_file = ".".join(splits) if os.path.exists(index_file): deprecation_message = f"This serialization format is now deprecated to standardize the serialization format between `transformers` and `diffusers`. We recommend you to remove the existing files associated with the current variant ({variant}) and re-obtain them by running a `save_pretrained()`." deprecate("legacy_sharded_ckpts_with_variant", "1.0.0", deprecation_message, standard_warn=False) index_file = Path(index_file) else: index_file = None else: if variant is not None: index_file_in_repo = Path( subfolder or "", SAFE_WEIGHTS_INDEX_NAME if use_safetensors else WEIGHTS_INDEX_NAME, ).as_posix() splits = index_file_in_repo.split(".") split_index = -2 splits = splits[:-split_index] + [variant] + splits[-split_index:] index_file_in_repo = ".".join(splits) try: index_file = _get_model_file( pretrained_model_name_or_path, weights_name=index_file_in_repo, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=None, user_agent=user_agent, commit_hash=commit_hash, dduf_entries=dduf_entries, ) index_file = Path(index_file) deprecation_message = f"This serialization format is now deprecated to standardize the serialization format between `transformers` and `diffusers`. We recommend you to remove the existing files associated with the current variant ({variant}) and re-obtain them by running a `save_pretrained()`." deprecate("legacy_sharded_ckpts_with_variant", "1.0.0", deprecation_message, standard_warn=False) except (EntryNotFoundError, EnvironmentError): index_file = None return index_file def _gguf_parse_value(_value, data_type): if not isinstance(data_type, list): data_type = [data_type] if len(data_type) == 1: data_type = data_type[0] array_data_type = None else: if data_type[0] != 9: raise ValueError("Received multiple types, therefore expected the first type to indicate an array.") data_type, array_data_type = data_type if data_type in [0, 1, 2, 3, 4, 5, 10, 11]: _value = int(_value[0]) elif data_type in [6, 12]: _value = float(_value[0]) elif data_type in [7]: _value = bool(_value[0]) elif data_type in [8]: _value = array("B", list(_value)).tobytes().decode() elif data_type in [9]: _value = _gguf_parse_value(_value, array_data_type) return _value def load_gguf_checkpoint(gguf_checkpoint_path, return_tensors=False): """ Load a GGUF file and return a dictionary of parsed parameters containing tensors, the parsed tokenizer and config attributes. Args: gguf_checkpoint_path (`str`): The path the to GGUF file to load return_tensors (`bool`, defaults to `True`): Whether to read the tensors from the file and return them. Not doing so is faster and only loads the metadata in memory. """ if is_gguf_available() and is_torch_available(): import gguf from gguf import GGUFReader from ..quantizers.gguf.utils import SUPPORTED_GGUF_QUANT_TYPES, GGUFParameter else: logger.error( "Loading a GGUF checkpoint in PyTorch, requires both PyTorch and GGUF>=0.10.0 to be installed. Please see " "https://pytorch.org/ and https://github.com/ggerganov/llama.cpp/tree/master/gguf-py for installation instructions." ) raise ImportError("Please install torch and gguf>=0.10.0 to load a GGUF checkpoint in PyTorch.") reader = GGUFReader(gguf_checkpoint_path) parsed_parameters = {} for tensor in reader.tensors: name = tensor.name quant_type = tensor.tensor_type # if the tensor is a torch supported dtype do not use GGUFParameter is_gguf_quant = quant_type not in [gguf.GGMLQuantizationType.F32, gguf.GGMLQuantizationType.F16] if is_gguf_quant and quant_type not in SUPPORTED_GGUF_QUANT_TYPES: _supported_quants_str = "\n".join([str(type) for type in SUPPORTED_GGUF_QUANT_TYPES]) raise ValueError( ( f"{name} has a quantization type: {str(quant_type)} which is unsupported." "\n\nCurrently the following quantization types are supported: \n\n" f"{_supported_quants_str}" "\n\nTo request support for this quantization type please open an issue here: https://github.com/huggingface/diffusers" ) ) weights = torch.from_numpy(tensor.data.copy()) parsed_parameters[name] = GGUFParameter(weights, quant_type=quant_type) if is_gguf_quant else weights return parsed_parameters def _find_mismatched_keys(state_dict, model_state_dict, loaded_keys, ignore_mismatched_sizes): mismatched_keys = [] if not ignore_mismatched_sizes: return mismatched_keys for checkpoint_key in loaded_keys: model_key = checkpoint_key # If the checkpoint is sharded, we may not have the key here. if checkpoint_key not in state_dict: continue if model_key in model_state_dict and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape: mismatched_keys.append( (checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape) ) del state_dict[checkpoint_key] return mismatched_keys def _expand_device_map(device_map, param_names): """ Expand a device map to return the correspondence parameter name to device. """ new_device_map = {} for module, device in device_map.items(): new_device_map.update( {p: device for p in param_names if p == module or p.startswith(f"{module}.") or module == ""} ) return new_device_map # Adapted from: https://github.com/huggingface/transformers/blob/0687d481e2c71544501ef9cb3eef795a6e79b1de/src/transformers/modeling_utils.py#L5859 def _caching_allocator_warmup( model, expanded_device_map: Dict[str, torch.device], dtype: torch.dtype, hf_quantizer: Optional[DiffusersQuantizer] ) -> None: """ This function warm-ups the caching allocator based on the size of the model tensors that will reside on each device. It allows to have one large call to Malloc, instead of recursively calling it later when loading the model, which is actually the loading speed bottleneck. Calling this function allows to cut the model loading time by a very large margin. """ factor = 2 if hf_quantizer is None else hf_quantizer.get_cuda_warm_up_factor() # Keep only accelerator devices accelerator_device_map = { param: torch.device(device) for param, device in expanded_device_map.items() if str(device) not in ["cpu", "disk"] } if not accelerator_device_map: return elements_per_device = defaultdict(int) for param_name, device in accelerator_device_map.items(): try: p = model.get_parameter(param_name) except AttributeError: try: p = model.get_buffer(param_name) except AttributeError: raise AttributeError(f"Parameter or buffer with name={param_name} not found in model") # TODO: account for TP when needed. elements_per_device[device] += p.numel() # This will kick off the caching allocator to avoid having to Malloc afterwards for device, elem_count in elements_per_device.items(): warmup_elems = max(1, elem_count // factor) _ = torch.empty(warmup_elems, dtype=dtype, device=device, requires_grad=False)
diffusers/src/diffusers/models/model_loading_utils.py/0
{ "file_path": "diffusers/src/diffusers/models/model_loading_utils.py", "repo_id": "diffusers", "token_count": 13197 }
164
# Copyright 2025 the Latte Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ..attention import BasicTransformerBlock from ..cache_utils import CacheMixin from ..embeddings import PatchEmbed, PixArtAlphaTextProjection, get_1d_sincos_pos_embed_from_grid from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import AdaLayerNormSingle class LatteTransformer3DModel(ModelMixin, ConfigMixin, CacheMixin): _supports_gradient_checkpointing = True """ A 3D Transformer model for video-like data, paper: https://huggingface.co/papers/2401.03048, official code: https://github.com/Vchitect/Latte Parameters: num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): The number of channels in the input. out_channels (`int`, *optional*): The number of channels in the output. num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use. attention_bias (`bool`, *optional*): Configure if the `TransformerBlocks` attention should contain a bias parameter. sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**). This is fixed during training since it is used to learn a number of position embeddings. patch_size (`int`, *optional*): The size of the patches to use in the patch embedding layer. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to use in feed-forward. num_embeds_ada_norm ( `int`, *optional*): The number of diffusion steps used during training. Pass if at least one of the norm_layers is `AdaLayerNorm`. This is fixed during training since it is used to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for up to but not more steps than `num_embeds_ada_norm`. norm_type (`str`, *optional*, defaults to `"layer_norm"`): The type of normalization to use. Options are `"layer_norm"` or `"ada_layer_norm"`. norm_elementwise_affine (`bool`, *optional*, defaults to `True`): Whether or not to use elementwise affine in normalization layers. norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon value to use in normalization layers. caption_channels (`int`, *optional*): The number of channels in the caption embeddings. video_length (`int`, *optional*): The number of frames in the video-like data. """ _skip_layerwise_casting_patterns = ["pos_embed", "norm"] @register_to_config def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: Optional[int] = None, out_channels: Optional[int] = None, num_layers: int = 1, dropout: float = 0.0, cross_attention_dim: Optional[int] = None, attention_bias: bool = False, sample_size: int = 64, patch_size: Optional[int] = None, activation_fn: str = "geglu", num_embeds_ada_norm: Optional[int] = None, norm_type: str = "layer_norm", norm_elementwise_affine: bool = True, norm_eps: float = 1e-5, caption_channels: int = None, video_length: int = 16, ): super().__init__() inner_dim = num_attention_heads * attention_head_dim # 1. Define input layers self.height = sample_size self.width = sample_size interpolation_scale = self.config.sample_size // 64 interpolation_scale = max(interpolation_scale, 1) self.pos_embed = PatchEmbed( height=sample_size, width=sample_size, patch_size=patch_size, in_channels=in_channels, embed_dim=inner_dim, interpolation_scale=interpolation_scale, ) # 2. Define spatial transformers blocks self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, cross_attention_dim=cross_attention_dim, activation_fn=activation_fn, num_embeds_ada_norm=num_embeds_ada_norm, attention_bias=attention_bias, norm_type=norm_type, norm_elementwise_affine=norm_elementwise_affine, norm_eps=norm_eps, ) for d in range(num_layers) ] ) # 3. Define temporal transformers blocks self.temporal_transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, cross_attention_dim=None, activation_fn=activation_fn, num_embeds_ada_norm=num_embeds_ada_norm, attention_bias=attention_bias, norm_type=norm_type, norm_elementwise_affine=norm_elementwise_affine, norm_eps=norm_eps, ) for d in range(num_layers) ] ) # 4. Define output layers self.out_channels = in_channels if out_channels is None else out_channels self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-6) self.scale_shift_table = nn.Parameter(torch.randn(2, inner_dim) / inner_dim**0.5) self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * self.out_channels) # 5. Latte other blocks. self.adaln_single = AdaLayerNormSingle(inner_dim, use_additional_conditions=False) self.caption_projection = PixArtAlphaTextProjection(in_features=caption_channels, hidden_size=inner_dim) # define temporal positional embedding temp_pos_embed = get_1d_sincos_pos_embed_from_grid( inner_dim, torch.arange(0, video_length).unsqueeze(1), output_type="pt" ) # 1152 hidden size self.register_buffer("temp_pos_embed", temp_pos_embed.float().unsqueeze(0), persistent=False) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, timestep: Optional[torch.LongTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, enable_temporal_attentions: bool = True, return_dict: bool = True, ): """ The [`LatteTransformer3DModel`] forward method. Args: hidden_states shape `(batch size, channel, num_frame, height, width)`: Input `hidden_states`. timestep ( `torch.LongTensor`, *optional*): Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`. encoder_hidden_states ( `torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*): Conditional embeddings for cross attention layer. If not given, cross-attention defaults to self-attention. encoder_attention_mask ( `torch.Tensor`, *optional*): Cross-attention mask applied to `encoder_hidden_states`. Two formats supported: * Mask `(batcheight, sequence_length)` True = keep, False = discard. * Bias `(batcheight, 1, sequence_length)` 0 = keep, -10000 = discard. If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format above. This bias will be added to the cross-attention scores. enable_temporal_attentions: (`bool`, *optional*, defaults to `True`): Whether to enable temporal attentions. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. Returns: If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ # Reshape hidden states batch_size, channels, num_frame, height, width = hidden_states.shape # batch_size channels num_frame height width -> (batch_size * num_frame) channels height width hidden_states = hidden_states.permute(0, 2, 1, 3, 4).reshape(-1, channels, height, width) # Input height, width = ( hidden_states.shape[-2] // self.config.patch_size, hidden_states.shape[-1] // self.config.patch_size, ) num_patches = height * width hidden_states = self.pos_embed(hidden_states) # already add positional embeddings added_cond_kwargs = {"resolution": None, "aspect_ratio": None} timestep, embedded_timestep = self.adaln_single( timestep, added_cond_kwargs=added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_states.dtype ) # Prepare text embeddings for spatial block # batch_size num_tokens hidden_size -> (batch_size * num_frame) num_tokens hidden_size encoder_hidden_states = self.caption_projection(encoder_hidden_states) # 3 120 1152 encoder_hidden_states_spatial = encoder_hidden_states.repeat_interleave( num_frame, dim=0, output_size=encoder_hidden_states.shape[0] * num_frame ).view(-1, encoder_hidden_states.shape[-2], encoder_hidden_states.shape[-1]) # Prepare timesteps for spatial and temporal block timestep_spatial = timestep.repeat_interleave( num_frame, dim=0, output_size=timestep.shape[0] * num_frame ).view(-1, timestep.shape[-1]) timestep_temp = timestep.repeat_interleave( num_patches, dim=0, output_size=timestep.shape[0] * num_patches ).view(-1, timestep.shape[-1]) # Spatial and temporal transformer blocks for i, (spatial_block, temp_block) in enumerate( zip(self.transformer_blocks, self.temporal_transformer_blocks) ): if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func( spatial_block, hidden_states, None, # attention_mask encoder_hidden_states_spatial, encoder_attention_mask, timestep_spatial, None, # cross_attention_kwargs None, # class_labels ) else: hidden_states = spatial_block( hidden_states, None, # attention_mask encoder_hidden_states_spatial, encoder_attention_mask, timestep_spatial, None, # cross_attention_kwargs None, # class_labels ) if enable_temporal_attentions: # (batch_size * num_frame) num_tokens hidden_size -> (batch_size * num_tokens) num_frame hidden_size hidden_states = hidden_states.reshape( batch_size, -1, hidden_states.shape[-2], hidden_states.shape[-1] ).permute(0, 2, 1, 3) hidden_states = hidden_states.reshape(-1, hidden_states.shape[-2], hidden_states.shape[-1]) if i == 0 and num_frame > 1: hidden_states = hidden_states + self.temp_pos_embed.to(hidden_states.dtype) if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func( temp_block, hidden_states, None, # attention_mask None, # encoder_hidden_states None, # encoder_attention_mask timestep_temp, None, # cross_attention_kwargs None, # class_labels ) else: hidden_states = temp_block( hidden_states, None, # attention_mask None, # encoder_hidden_states None, # encoder_attention_mask timestep_temp, None, # cross_attention_kwargs None, # class_labels ) # (batch_size * num_tokens) num_frame hidden_size -> (batch_size * num_frame) num_tokens hidden_size hidden_states = hidden_states.reshape( batch_size, -1, hidden_states.shape[-2], hidden_states.shape[-1] ).permute(0, 2, 1, 3) hidden_states = hidden_states.reshape(-1, hidden_states.shape[-2], hidden_states.shape[-1]) embedded_timestep = embedded_timestep.repeat_interleave( num_frame, dim=0, output_size=embedded_timestep.shape[0] * num_frame ).view(-1, embedded_timestep.shape[-1]) shift, scale = (self.scale_shift_table[None] + embedded_timestep[:, None]).chunk(2, dim=1) hidden_states = self.norm_out(hidden_states) # Modulation hidden_states = hidden_states * (1 + scale) + shift hidden_states = self.proj_out(hidden_states) # unpatchify if self.adaln_single is None: height = width = int(hidden_states.shape[1] ** 0.5) hidden_states = hidden_states.reshape( shape=(-1, height, width, self.config.patch_size, self.config.patch_size, self.out_channels) ) hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) output = hidden_states.reshape( shape=(-1, self.out_channels, height * self.config.patch_size, width * self.config.patch_size) ) output = output.reshape(batch_size, -1, output.shape[-3], output.shape[-2], output.shape[-1]).permute( 0, 2, 1, 3, 4 ) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
diffusers/src/diffusers/models/transformers/latte_transformer_3d.py/0
{ "file_path": "diffusers/src/diffusers/models/transformers/latte_transformer_3d.py", "repo_id": "diffusers", "token_count": 7180 }
165
from typing import Any, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin, PeftAdapterMixin from ...models.modeling_outputs import Transformer2DModelOutput from ...models.modeling_utils import ModelMixin from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import maybe_allow_in_graph from ..attention import Attention from ..embeddings import TimestepEmbedding, Timesteps logger = logging.get_logger(__name__) # pylint: disable=invalid-name class HiDreamImageFeedForwardSwiGLU(nn.Module): def __init__( self, dim: int, hidden_dim: int, multiple_of: int = 256, ffn_dim_multiplier: Optional[float] = None, ): super().__init__() hidden_dim = int(2 * hidden_dim / 3) # custom dim factor multiplier if ffn_dim_multiplier is not None: hidden_dim = int(ffn_dim_multiplier * hidden_dim) hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of) self.w1 = nn.Linear(dim, hidden_dim, bias=False) self.w2 = nn.Linear(hidden_dim, dim, bias=False) self.w3 = nn.Linear(dim, hidden_dim, bias=False) def forward(self, x: torch.Tensor) -> torch.Tensor: return self.w2(torch.nn.functional.silu(self.w1(x)) * self.w3(x)) class HiDreamImagePooledEmbed(nn.Module): def __init__(self, text_emb_dim, hidden_size): super().__init__() self.pooled_embedder = TimestepEmbedding(in_channels=text_emb_dim, time_embed_dim=hidden_size) def forward(self, pooled_embed: torch.Tensor) -> torch.Tensor: return self.pooled_embedder(pooled_embed) class HiDreamImageTimestepEmbed(nn.Module): def __init__(self, hidden_size, frequency_embedding_size=256): super().__init__() self.time_proj = Timesteps(num_channels=frequency_embedding_size, flip_sin_to_cos=True, downscale_freq_shift=0) self.timestep_embedder = TimestepEmbedding(in_channels=frequency_embedding_size, time_embed_dim=hidden_size) def forward(self, timesteps: torch.Tensor, wdtype: Optional[torch.dtype] = None): t_emb = self.time_proj(timesteps).to(dtype=wdtype) t_emb = self.timestep_embedder(t_emb) return t_emb class HiDreamImageOutEmbed(nn.Module): def __init__(self, hidden_size, patch_size, out_channels): super().__init__() self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True) self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(hidden_size, 2 * hidden_size, bias=True)) def forward(self, hidden_states: torch.Tensor, temb: torch.Tensor) -> torch.Tensor: shift, scale = self.adaLN_modulation(temb).chunk(2, dim=1) hidden_states = self.norm_final(hidden_states) * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1) hidden_states = self.linear(hidden_states) return hidden_states class HiDreamImagePatchEmbed(nn.Module): def __init__( self, patch_size=2, in_channels=4, out_channels=1024, ): super().__init__() self.patch_size = patch_size self.out_channels = out_channels self.proj = nn.Linear(in_channels * patch_size * patch_size, out_channels, bias=True) def forward(self, latent): latent = self.proj(latent) return latent def rope(pos: torch.Tensor, dim: int, theta: int) -> torch.Tensor: assert dim % 2 == 0, "The dimension must be even." is_mps = pos.device.type == "mps" is_npu = pos.device.type == "npu" dtype = torch.float32 if (is_mps or is_npu) else torch.float64 scale = torch.arange(0, dim, 2, dtype=dtype, device=pos.device) / dim omega = 1.0 / (theta**scale) batch_size, seq_length = pos.shape out = torch.einsum("...n,d->...nd", pos, omega) cos_out = torch.cos(out) sin_out = torch.sin(out) stacked_out = torch.stack([cos_out, -sin_out, sin_out, cos_out], dim=-1) out = stacked_out.view(batch_size, -1, dim // 2, 2, 2) return out.float() class HiDreamImageEmbedND(nn.Module): def __init__(self, theta: int, axes_dim: List[int]): super().__init__() self.theta = theta self.axes_dim = axes_dim def forward(self, ids: torch.Tensor) -> torch.Tensor: n_axes = ids.shape[-1] emb = torch.cat( [rope(ids[..., i], self.axes_dim[i], self.theta) for i in range(n_axes)], dim=-3, ) return emb.unsqueeze(2) def apply_rope(xq: torch.Tensor, xk: torch.Tensor, freqs_cis: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: xq_ = xq.float().reshape(*xq.shape[:-1], -1, 1, 2) xk_ = xk.float().reshape(*xk.shape[:-1], -1, 1, 2) xq_out = freqs_cis[..., 0] * xq_[..., 0] + freqs_cis[..., 1] * xq_[..., 1] xk_out = freqs_cis[..., 0] * xk_[..., 0] + freqs_cis[..., 1] * xk_[..., 1] return xq_out.reshape(*xq.shape).type_as(xq), xk_out.reshape(*xk.shape).type_as(xk) @maybe_allow_in_graph class HiDreamAttention(Attention): def __init__( self, query_dim: int, heads: int = 8, dim_head: int = 64, upcast_attention: bool = False, upcast_softmax: bool = False, scale_qk: bool = True, eps: float = 1e-5, processor=None, out_dim: int = None, single: bool = False, ): super(Attention, self).__init__() self.inner_dim = out_dim if out_dim is not None else dim_head * heads self.query_dim = query_dim self.upcast_attention = upcast_attention self.upcast_softmax = upcast_softmax self.out_dim = out_dim if out_dim is not None else query_dim self.scale_qk = scale_qk self.scale = dim_head**-0.5 if self.scale_qk else 1.0 self.heads = out_dim // dim_head if out_dim is not None else heads self.sliceable_head_dim = heads self.single = single self.to_q = nn.Linear(query_dim, self.inner_dim) self.to_k = nn.Linear(self.inner_dim, self.inner_dim) self.to_v = nn.Linear(self.inner_dim, self.inner_dim) self.to_out = nn.Linear(self.inner_dim, self.out_dim) self.q_rms_norm = nn.RMSNorm(self.inner_dim, eps) self.k_rms_norm = nn.RMSNorm(self.inner_dim, eps) if not single: self.to_q_t = nn.Linear(query_dim, self.inner_dim) self.to_k_t = nn.Linear(self.inner_dim, self.inner_dim) self.to_v_t = nn.Linear(self.inner_dim, self.inner_dim) self.to_out_t = nn.Linear(self.inner_dim, self.out_dim) self.q_rms_norm_t = nn.RMSNorm(self.inner_dim, eps) self.k_rms_norm_t = nn.RMSNorm(self.inner_dim, eps) self.set_processor(processor) def forward( self, norm_hidden_states: torch.Tensor, hidden_states_masks: torch.Tensor = None, norm_encoder_hidden_states: torch.Tensor = None, image_rotary_emb: torch.Tensor = None, ) -> torch.Tensor: return self.processor( self, hidden_states=norm_hidden_states, hidden_states_masks=hidden_states_masks, encoder_hidden_states=norm_encoder_hidden_states, image_rotary_emb=image_rotary_emb, ) class HiDreamAttnProcessor: """Attention processor used typically in processing the SD3-like self-attention projections.""" def __call__( self, attn: HiDreamAttention, hidden_states: torch.Tensor, hidden_states_masks: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, image_rotary_emb: torch.Tensor = None, *args, **kwargs, ) -> torch.Tensor: dtype = hidden_states.dtype batch_size = hidden_states.shape[0] query_i = attn.q_rms_norm(attn.to_q(hidden_states)).to(dtype=dtype) key_i = attn.k_rms_norm(attn.to_k(hidden_states)).to(dtype=dtype) value_i = attn.to_v(hidden_states) inner_dim = key_i.shape[-1] head_dim = inner_dim // attn.heads query_i = query_i.view(batch_size, -1, attn.heads, head_dim) key_i = key_i.view(batch_size, -1, attn.heads, head_dim) value_i = value_i.view(batch_size, -1, attn.heads, head_dim) if hidden_states_masks is not None: key_i = key_i * hidden_states_masks.view(batch_size, -1, 1, 1) if not attn.single: query_t = attn.q_rms_norm_t(attn.to_q_t(encoder_hidden_states)).to(dtype=dtype) key_t = attn.k_rms_norm_t(attn.to_k_t(encoder_hidden_states)).to(dtype=dtype) value_t = attn.to_v_t(encoder_hidden_states) query_t = query_t.view(batch_size, -1, attn.heads, head_dim) key_t = key_t.view(batch_size, -1, attn.heads, head_dim) value_t = value_t.view(batch_size, -1, attn.heads, head_dim) num_image_tokens = query_i.shape[1] num_text_tokens = query_t.shape[1] query = torch.cat([query_i, query_t], dim=1) key = torch.cat([key_i, key_t], dim=1) value = torch.cat([value_i, value_t], dim=1) else: query = query_i key = key_i value = value_i if query.shape[-1] == image_rotary_emb.shape[-3] * 2: query, key = apply_rope(query, key, image_rotary_emb) else: query_1, query_2 = query.chunk(2, dim=-1) key_1, key_2 = key.chunk(2, dim=-1) query_1, key_1 = apply_rope(query_1, key_1, image_rotary_emb) query = torch.cat([query_1, query_2], dim=-1) key = torch.cat([key_1, key_2], dim=-1) hidden_states = F.scaled_dot_product_attention( query.transpose(1, 2), key.transpose(1, 2), value.transpose(1, 2), dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) if not attn.single: hidden_states_i, hidden_states_t = torch.split(hidden_states, [num_image_tokens, num_text_tokens], dim=1) hidden_states_i = attn.to_out(hidden_states_i) hidden_states_t = attn.to_out_t(hidden_states_t) return hidden_states_i, hidden_states_t else: hidden_states = attn.to_out(hidden_states) return hidden_states # Modified from https://github.com/deepseek-ai/DeepSeek-V3/blob/main/inference/model.py class MoEGate(nn.Module): def __init__( self, embed_dim, num_routed_experts=4, num_activated_experts=2, aux_loss_alpha=0.01, _force_inference_output=False, ): super().__init__() self.top_k = num_activated_experts self.n_routed_experts = num_routed_experts self.scoring_func = "softmax" self.alpha = aux_loss_alpha self.seq_aux = False # topk selection algorithm self.norm_topk_prob = False self.gating_dim = embed_dim self.weight = nn.Parameter(torch.randn(self.n_routed_experts, self.gating_dim) / embed_dim**0.5) self._force_inference_output = _force_inference_output def forward(self, hidden_states): bsz, seq_len, h = hidden_states.shape ### compute gating score hidden_states = hidden_states.view(-1, h) logits = F.linear(hidden_states, self.weight, None) if self.scoring_func == "softmax": scores = logits.softmax(dim=-1) else: raise NotImplementedError(f"insupportable scoring function for MoE gating: {self.scoring_func}") ### select top-k experts topk_weight, topk_idx = torch.topk(scores, k=self.top_k, dim=-1, sorted=False) ### norm gate to sum 1 if self.top_k > 1 and self.norm_topk_prob: denominator = topk_weight.sum(dim=-1, keepdim=True) + 1e-20 topk_weight = topk_weight / denominator ### expert-level computation auxiliary loss if self.training and self.alpha > 0.0 and not self._force_inference_output: scores_for_aux = scores aux_topk = self.top_k # always compute aux loss based on the naive greedy topk method topk_idx_for_aux_loss = topk_idx.view(bsz, -1) if self.seq_aux: scores_for_seq_aux = scores_for_aux.view(bsz, seq_len, -1) ce = torch.zeros(bsz, self.n_routed_experts, device=hidden_states.device) ce.scatter_add_( 1, topk_idx_for_aux_loss, torch.ones(bsz, seq_len * aux_topk, device=hidden_states.device) ).div_(seq_len * aux_topk / self.n_routed_experts) aux_loss = (ce * scores_for_seq_aux.mean(dim=1)).sum(dim=1).mean() * self.alpha else: mask_ce = F.one_hot(topk_idx_for_aux_loss.view(-1), num_classes=self.n_routed_experts) ce = mask_ce.float().mean(0) Pi = scores_for_aux.mean(0) fi = ce * self.n_routed_experts aux_loss = (Pi * fi).sum() * self.alpha else: aux_loss = None return topk_idx, topk_weight, aux_loss # Modified from https://github.com/deepseek-ai/DeepSeek-V3/blob/main/inference/model.py class MOEFeedForwardSwiGLU(nn.Module): def __init__( self, dim: int, hidden_dim: int, num_routed_experts: int, num_activated_experts: int, _force_inference_output: bool = False, ): super().__init__() self.shared_experts = HiDreamImageFeedForwardSwiGLU(dim, hidden_dim // 2) self.experts = nn.ModuleList( [HiDreamImageFeedForwardSwiGLU(dim, hidden_dim) for i in range(num_routed_experts)] ) self._force_inference_output = _force_inference_output self.gate = MoEGate( embed_dim=dim, num_routed_experts=num_routed_experts, num_activated_experts=num_activated_experts, _force_inference_output=_force_inference_output, ) self.num_activated_experts = num_activated_experts def forward(self, x): wtype = x.dtype identity = x orig_shape = x.shape topk_idx, topk_weight, aux_loss = self.gate(x) x = x.view(-1, x.shape[-1]) flat_topk_idx = topk_idx.view(-1) if self.training and not self._force_inference_output: x = x.repeat_interleave(self.num_activated_experts, dim=0) y = torch.empty_like(x, dtype=wtype) for i, expert in enumerate(self.experts): y[flat_topk_idx == i] = expert(x[flat_topk_idx == i]).to(dtype=wtype) y = (y.view(*topk_weight.shape, -1) * topk_weight.unsqueeze(-1)).sum(dim=1) y = y.view(*orig_shape).to(dtype=wtype) # y = AddAuxiliaryLoss.apply(y, aux_loss) else: y = self.moe_infer(x, flat_topk_idx, topk_weight.view(-1, 1)).view(*orig_shape) y = y + self.shared_experts(identity) return y @torch.no_grad() def moe_infer(self, x, flat_expert_indices, flat_expert_weights): expert_cache = torch.zeros_like(x) idxs = flat_expert_indices.argsort() tokens_per_expert = flat_expert_indices.bincount().cpu().numpy().cumsum(0) token_idxs = idxs // self.num_activated_experts for i, end_idx in enumerate(tokens_per_expert): start_idx = 0 if i == 0 else tokens_per_expert[i - 1] if start_idx == end_idx: continue expert = self.experts[i] exp_token_idx = token_idxs[start_idx:end_idx] expert_tokens = x[exp_token_idx] expert_out = expert(expert_tokens) expert_out.mul_(flat_expert_weights[idxs[start_idx:end_idx]]) # for fp16 and other dtype expert_cache = expert_cache.to(expert_out.dtype) expert_cache.scatter_reduce_(0, exp_token_idx.view(-1, 1).repeat(1, x.shape[-1]), expert_out, reduce="sum") return expert_cache class TextProjection(nn.Module): def __init__(self, in_features, hidden_size): super().__init__() self.linear = nn.Linear(in_features=in_features, out_features=hidden_size, bias=False) def forward(self, caption): hidden_states = self.linear(caption) return hidden_states @maybe_allow_in_graph class HiDreamImageSingleTransformerBlock(nn.Module): def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, num_routed_experts: int = 4, num_activated_experts: int = 2, _force_inference_output: bool = False, ): super().__init__() self.num_attention_heads = num_attention_heads self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(dim, 6 * dim, bias=True)) # 1. Attention self.norm1_i = nn.LayerNorm(dim, eps=1e-06, elementwise_affine=False) self.attn1 = HiDreamAttention( query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, processor=HiDreamAttnProcessor(), single=True, ) # 3. Feed-forward self.norm3_i = nn.LayerNorm(dim, eps=1e-06, elementwise_affine=False) if num_routed_experts > 0: self.ff_i = MOEFeedForwardSwiGLU( dim=dim, hidden_dim=4 * dim, num_routed_experts=num_routed_experts, num_activated_experts=num_activated_experts, _force_inference_output=_force_inference_output, ) else: self.ff_i = HiDreamImageFeedForwardSwiGLU(dim=dim, hidden_dim=4 * dim) def forward( self, hidden_states: torch.Tensor, hidden_states_masks: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, temb: Optional[torch.Tensor] = None, image_rotary_emb: torch.Tensor = None, ) -> torch.Tensor: wtype = hidden_states.dtype shift_msa_i, scale_msa_i, gate_msa_i, shift_mlp_i, scale_mlp_i, gate_mlp_i = self.adaLN_modulation(temb)[ :, None ].chunk(6, dim=-1) # 1. MM-Attention norm_hidden_states = self.norm1_i(hidden_states).to(dtype=wtype) norm_hidden_states = norm_hidden_states * (1 + scale_msa_i) + shift_msa_i attn_output_i = self.attn1( norm_hidden_states, hidden_states_masks, image_rotary_emb=image_rotary_emb, ) hidden_states = gate_msa_i * attn_output_i + hidden_states # 2. Feed-forward norm_hidden_states = self.norm3_i(hidden_states).to(dtype=wtype) norm_hidden_states = norm_hidden_states * (1 + scale_mlp_i) + shift_mlp_i ff_output_i = gate_mlp_i * self.ff_i(norm_hidden_states.to(dtype=wtype)) hidden_states = ff_output_i + hidden_states return hidden_states @maybe_allow_in_graph class HiDreamImageTransformerBlock(nn.Module): def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, num_routed_experts: int = 4, num_activated_experts: int = 2, _force_inference_output: bool = False, ): super().__init__() self.num_attention_heads = num_attention_heads self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(dim, 12 * dim, bias=True)) # 1. Attention self.norm1_i = nn.LayerNorm(dim, eps=1e-06, elementwise_affine=False) self.norm1_t = nn.LayerNorm(dim, eps=1e-06, elementwise_affine=False) self.attn1 = HiDreamAttention( query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, processor=HiDreamAttnProcessor(), single=False, ) # 3. Feed-forward self.norm3_i = nn.LayerNorm(dim, eps=1e-06, elementwise_affine=False) if num_routed_experts > 0: self.ff_i = MOEFeedForwardSwiGLU( dim=dim, hidden_dim=4 * dim, num_routed_experts=num_routed_experts, num_activated_experts=num_activated_experts, _force_inference_output=_force_inference_output, ) else: self.ff_i = HiDreamImageFeedForwardSwiGLU(dim=dim, hidden_dim=4 * dim) self.norm3_t = nn.LayerNorm(dim, eps=1e-06, elementwise_affine=False) self.ff_t = HiDreamImageFeedForwardSwiGLU(dim=dim, hidden_dim=4 * dim) def forward( self, hidden_states: torch.Tensor, hidden_states_masks: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, temb: Optional[torch.Tensor] = None, image_rotary_emb: torch.Tensor = None, ) -> torch.Tensor: wtype = hidden_states.dtype ( shift_msa_i, scale_msa_i, gate_msa_i, shift_mlp_i, scale_mlp_i, gate_mlp_i, shift_msa_t, scale_msa_t, gate_msa_t, shift_mlp_t, scale_mlp_t, gate_mlp_t, ) = self.adaLN_modulation(temb)[:, None].chunk(12, dim=-1) # 1. MM-Attention norm_hidden_states = self.norm1_i(hidden_states).to(dtype=wtype) norm_hidden_states = norm_hidden_states * (1 + scale_msa_i) + shift_msa_i norm_encoder_hidden_states = self.norm1_t(encoder_hidden_states).to(dtype=wtype) norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + scale_msa_t) + shift_msa_t attn_output_i, attn_output_t = self.attn1( norm_hidden_states, hidden_states_masks, norm_encoder_hidden_states, image_rotary_emb=image_rotary_emb, ) hidden_states = gate_msa_i * attn_output_i + hidden_states encoder_hidden_states = gate_msa_t * attn_output_t + encoder_hidden_states # 2. Feed-forward norm_hidden_states = self.norm3_i(hidden_states).to(dtype=wtype) norm_hidden_states = norm_hidden_states * (1 + scale_mlp_i) + shift_mlp_i norm_encoder_hidden_states = self.norm3_t(encoder_hidden_states).to(dtype=wtype) norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + scale_mlp_t) + shift_mlp_t ff_output_i = gate_mlp_i * self.ff_i(norm_hidden_states) ff_output_t = gate_mlp_t * self.ff_t(norm_encoder_hidden_states) hidden_states = ff_output_i + hidden_states encoder_hidden_states = ff_output_t + encoder_hidden_states return hidden_states, encoder_hidden_states class HiDreamBlock(nn.Module): def __init__(self, block: Union[HiDreamImageTransformerBlock, HiDreamImageSingleTransformerBlock]): super().__init__() self.block = block def forward( self, hidden_states: torch.Tensor, hidden_states_masks: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, temb: Optional[torch.Tensor] = None, image_rotary_emb: torch.Tensor = None, ) -> torch.Tensor: return self.block( hidden_states=hidden_states, hidden_states_masks=hidden_states_masks, encoder_hidden_states=encoder_hidden_states, temb=temb, image_rotary_emb=image_rotary_emb, ) class HiDreamImageTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin): _supports_gradient_checkpointing = True _no_split_modules = ["HiDreamImageTransformerBlock", "HiDreamImageSingleTransformerBlock"] @register_to_config def __init__( self, patch_size: Optional[int] = None, in_channels: int = 64, out_channels: Optional[int] = None, num_layers: int = 16, num_single_layers: int = 32, attention_head_dim: int = 128, num_attention_heads: int = 20, caption_channels: List[int] = None, text_emb_dim: int = 2048, num_routed_experts: int = 4, num_activated_experts: int = 2, axes_dims_rope: Tuple[int, int] = (32, 32), max_resolution: Tuple[int, int] = (128, 128), llama_layers: List[int] = None, force_inference_output: bool = False, ): super().__init__() self.out_channels = out_channels or in_channels self.inner_dim = num_attention_heads * attention_head_dim self.t_embedder = HiDreamImageTimestepEmbed(self.inner_dim) self.p_embedder = HiDreamImagePooledEmbed(text_emb_dim, self.inner_dim) self.x_embedder = HiDreamImagePatchEmbed( patch_size=patch_size, in_channels=in_channels, out_channels=self.inner_dim, ) self.pe_embedder = HiDreamImageEmbedND(theta=10000, axes_dim=axes_dims_rope) self.double_stream_blocks = nn.ModuleList( [ HiDreamBlock( HiDreamImageTransformerBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, num_routed_experts=num_routed_experts, num_activated_experts=num_activated_experts, _force_inference_output=force_inference_output, ) ) for _ in range(num_layers) ] ) self.single_stream_blocks = nn.ModuleList( [ HiDreamBlock( HiDreamImageSingleTransformerBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, num_routed_experts=num_routed_experts, num_activated_experts=num_activated_experts, _force_inference_output=force_inference_output, ) ) for _ in range(num_single_layers) ] ) self.final_layer = HiDreamImageOutEmbed(self.inner_dim, patch_size, self.out_channels) caption_channels = [caption_channels[1]] * (num_layers + num_single_layers) + [caption_channels[0]] caption_projection = [] for caption_channel in caption_channels: caption_projection.append(TextProjection(in_features=caption_channel, hidden_size=self.inner_dim)) self.caption_projection = nn.ModuleList(caption_projection) self.max_seq = max_resolution[0] * max_resolution[1] // (patch_size * patch_size) self.gradient_checkpointing = False def unpatchify(self, x: torch.Tensor, img_sizes: List[Tuple[int, int]], is_training: bool) -> List[torch.Tensor]: if is_training and not self.config.force_inference_output: B, S, F = x.shape C = F // (self.config.patch_size * self.config.patch_size) x = ( x.reshape(B, S, self.config.patch_size, self.config.patch_size, C) .permute(0, 4, 1, 2, 3) .reshape(B, C, S, self.config.patch_size * self.config.patch_size) ) else: x_arr = [] p1 = self.config.patch_size p2 = self.config.patch_size for i, img_size in enumerate(img_sizes): pH, pW = img_size t = x[i, : pH * pW].reshape(1, pH, pW, -1) F_token = t.shape[-1] C = F_token // (p1 * p2) t = t.reshape(1, pH, pW, p1, p2, C) t = t.permute(0, 5, 1, 3, 2, 4) t = t.reshape(1, C, pH * p1, pW * p2) x_arr.append(t) x = torch.cat(x_arr, dim=0) return x def patchify(self, hidden_states): batch_size, channels, height, width = hidden_states.shape patch_size = self.config.patch_size patch_height, patch_width = height // patch_size, width // patch_size device = hidden_states.device dtype = hidden_states.dtype # create img_sizes img_sizes = torch.tensor([patch_height, patch_width], dtype=torch.int64, device=device).reshape(-1) img_sizes = img_sizes.unsqueeze(0).repeat(batch_size, 1) # create hidden_states_masks if hidden_states.shape[-2] != hidden_states.shape[-1]: hidden_states_masks = torch.zeros((batch_size, self.max_seq), dtype=dtype, device=device) hidden_states_masks[:, : patch_height * patch_width] = 1.0 else: hidden_states_masks = None # create img_ids img_ids = torch.zeros(patch_height, patch_width, 3, device=device) row_indices = torch.arange(patch_height, device=device)[:, None] col_indices = torch.arange(patch_width, device=device)[None, :] img_ids[..., 1] = img_ids[..., 1] + row_indices img_ids[..., 2] = img_ids[..., 2] + col_indices img_ids = img_ids.reshape(patch_height * patch_width, -1) if hidden_states.shape[-2] != hidden_states.shape[-1]: # Handle non-square latents img_ids_pad = torch.zeros(self.max_seq, 3, device=device) img_ids_pad[: patch_height * patch_width, :] = img_ids img_ids = img_ids_pad.unsqueeze(0).repeat(batch_size, 1, 1) else: img_ids = img_ids.unsqueeze(0).repeat(batch_size, 1, 1) # patchify hidden_states if hidden_states.shape[-2] != hidden_states.shape[-1]: # Handle non-square latents out = torch.zeros( (batch_size, channels, self.max_seq, patch_size * patch_size), dtype=dtype, device=device, ) hidden_states = hidden_states.reshape( batch_size, channels, patch_height, patch_size, patch_width, patch_size ) hidden_states = hidden_states.permute(0, 1, 2, 4, 3, 5) hidden_states = hidden_states.reshape( batch_size, channels, patch_height * patch_width, patch_size * patch_size ) out[:, :, 0 : patch_height * patch_width] = hidden_states hidden_states = out hidden_states = hidden_states.permute(0, 2, 3, 1).reshape( batch_size, self.max_seq, patch_size * patch_size * channels ) else: # Handle square latents hidden_states = hidden_states.reshape( batch_size, channels, patch_height, patch_size, patch_width, patch_size ) hidden_states = hidden_states.permute(0, 2, 4, 3, 5, 1) hidden_states = hidden_states.reshape( batch_size, patch_height * patch_width, patch_size * patch_size * channels ) return hidden_states, hidden_states_masks, img_sizes, img_ids def forward( self, hidden_states: torch.Tensor, timesteps: torch.LongTensor = None, encoder_hidden_states_t5: torch.Tensor = None, encoder_hidden_states_llama3: torch.Tensor = None, pooled_embeds: torch.Tensor = None, img_ids: Optional[torch.Tensor] = None, img_sizes: Optional[List[Tuple[int, int]]] = None, hidden_states_masks: Optional[torch.Tensor] = None, attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, **kwargs, ): encoder_hidden_states = kwargs.get("encoder_hidden_states", None) if encoder_hidden_states is not None: deprecation_message = "The `encoder_hidden_states` argument is deprecated. Please use `encoder_hidden_states_t5` and `encoder_hidden_states_llama3` instead." deprecate("encoder_hidden_states", "0.35.0", deprecation_message) encoder_hidden_states_t5 = encoder_hidden_states[0] encoder_hidden_states_llama3 = encoder_hidden_states[1] if img_ids is not None and img_sizes is not None and hidden_states_masks is None: deprecation_message = ( "Passing `img_ids` and `img_sizes` with unpachified `hidden_states` is deprecated and will be ignored." ) deprecate("img_ids", "0.35.0", deprecation_message) if hidden_states_masks is not None and (img_ids is None or img_sizes is None): raise ValueError("if `hidden_states_masks` is passed, `img_ids` and `img_sizes` must also be passed.") elif hidden_states_masks is not None and hidden_states.ndim != 3: raise ValueError( "if `hidden_states_masks` is passed, `hidden_states` must be a 3D tensors with shape (batch_size, patch_height * patch_width, patch_size * patch_size * channels)" ) if attention_kwargs is not None: attention_kwargs = attention_kwargs.copy() lora_scale = attention_kwargs.pop("scale", 1.0) else: lora_scale = 1.0 if USE_PEFT_BACKEND: # weight the lora layers by setting `lora_scale` for each PEFT layer scale_lora_layers(self, lora_scale) else: if attention_kwargs is not None and attention_kwargs.get("scale", None) is not None: logger.warning( "Passing `scale` via `attention_kwargs` when not using the PEFT backend is ineffective." ) # spatial forward batch_size = hidden_states.shape[0] hidden_states_type = hidden_states.dtype # Patchify the input if hidden_states_masks is None: hidden_states, hidden_states_masks, img_sizes, img_ids = self.patchify(hidden_states) # Embed the hidden states hidden_states = self.x_embedder(hidden_states) # 0. time timesteps = self.t_embedder(timesteps, hidden_states_type) p_embedder = self.p_embedder(pooled_embeds) temb = timesteps + p_embedder encoder_hidden_states = [encoder_hidden_states_llama3[k] for k in self.config.llama_layers] if self.caption_projection is not None: new_encoder_hidden_states = [] for i, enc_hidden_state in enumerate(encoder_hidden_states): enc_hidden_state = self.caption_projection[i](enc_hidden_state) enc_hidden_state = enc_hidden_state.view(batch_size, -1, hidden_states.shape[-1]) new_encoder_hidden_states.append(enc_hidden_state) encoder_hidden_states = new_encoder_hidden_states encoder_hidden_states_t5 = self.caption_projection[-1](encoder_hidden_states_t5) encoder_hidden_states_t5 = encoder_hidden_states_t5.view(batch_size, -1, hidden_states.shape[-1]) encoder_hidden_states.append(encoder_hidden_states_t5) txt_ids = torch.zeros( batch_size, encoder_hidden_states[-1].shape[1] + encoder_hidden_states[-2].shape[1] + encoder_hidden_states[0].shape[1], 3, device=img_ids.device, dtype=img_ids.dtype, ) ids = torch.cat((img_ids, txt_ids), dim=1) image_rotary_emb = self.pe_embedder(ids) # 2. Blocks block_id = 0 initial_encoder_hidden_states = torch.cat([encoder_hidden_states[-1], encoder_hidden_states[-2]], dim=1) initial_encoder_hidden_states_seq_len = initial_encoder_hidden_states.shape[1] for bid, block in enumerate(self.double_stream_blocks): cur_llama31_encoder_hidden_states = encoder_hidden_states[block_id] cur_encoder_hidden_states = torch.cat( [initial_encoder_hidden_states, cur_llama31_encoder_hidden_states], dim=1 ) if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states, initial_encoder_hidden_states = self._gradient_checkpointing_func( block, hidden_states, hidden_states_masks, cur_encoder_hidden_states, temb, image_rotary_emb, ) else: hidden_states, initial_encoder_hidden_states = block( hidden_states=hidden_states, hidden_states_masks=hidden_states_masks, encoder_hidden_states=cur_encoder_hidden_states, temb=temb, image_rotary_emb=image_rotary_emb, ) initial_encoder_hidden_states = initial_encoder_hidden_states[:, :initial_encoder_hidden_states_seq_len] block_id += 1 image_tokens_seq_len = hidden_states.shape[1] hidden_states = torch.cat([hidden_states, initial_encoder_hidden_states], dim=1) hidden_states_seq_len = hidden_states.shape[1] if hidden_states_masks is not None: encoder_attention_mask_ones = torch.ones( (batch_size, initial_encoder_hidden_states.shape[1] + cur_llama31_encoder_hidden_states.shape[1]), device=hidden_states_masks.device, dtype=hidden_states_masks.dtype, ) hidden_states_masks = torch.cat([hidden_states_masks, encoder_attention_mask_ones], dim=1) for bid, block in enumerate(self.single_stream_blocks): cur_llama31_encoder_hidden_states = encoder_hidden_states[block_id] hidden_states = torch.cat([hidden_states, cur_llama31_encoder_hidden_states], dim=1) if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func( block, hidden_states, hidden_states_masks, None, temb, image_rotary_emb, ) else: hidden_states = block( hidden_states=hidden_states, hidden_states_masks=hidden_states_masks, encoder_hidden_states=None, temb=temb, image_rotary_emb=image_rotary_emb, ) hidden_states = hidden_states[:, :hidden_states_seq_len] block_id += 1 hidden_states = hidden_states[:, :image_tokens_seq_len, ...] output = self.final_layer(hidden_states, temb) output = self.unpatchify(output, img_sizes, self.training) if hidden_states_masks is not None: hidden_states_masks = hidden_states_masks[:, :image_tokens_seq_len] if USE_PEFT_BACKEND: # remove `lora_scale` from each PEFT layer unscale_lora_layers(self, lora_scale) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
diffusers/src/diffusers/models/transformers/transformer_hidream_image.py/0
{ "file_path": "diffusers/src/diffusers/models/transformers/transformer_hidream_image.py", "repo_id": "diffusers", "token_count": 19295 }
166
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import BaseOutput from ..embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin from .unet_2d_blocks import UNetMidBlock2D, get_down_block, get_up_block @dataclass class UNet2DOutput(BaseOutput): """ The output of [`UNet2DModel`]. Args: sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`): The hidden states output from the last layer of the model. """ sample: torch.Tensor class UNet2DModel(ModelMixin, ConfigMixin): r""" A 2D UNet model that takes a noisy sample and a timestep and returns a sample shaped output. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): Height and width of input/output sample. Dimensions must be a multiple of `2 ** (len(block_out_channels) - 1)`. in_channels (`int`, *optional*, defaults to 3): Number of channels in the input sample. out_channels (`int`, *optional*, defaults to 3): Number of channels in the output. center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample. time_embedding_type (`str`, *optional*, defaults to `"positional"`): Type of time embedding to use. freq_shift (`int`, *optional*, defaults to 0): Frequency shift for Fourier time embedding. flip_sin_to_cos (`bool`, *optional*, defaults to `True`): Whether to flip sin to cos for Fourier time embedding. down_block_types (`Tuple[str]`, *optional*, defaults to `("DownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D")`): Tuple of downsample block types. mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2D"`): Block type for middle of UNet, it can be either `UNetMidBlock2D` or `None`. up_block_types (`Tuple[str]`, *optional*, defaults to `("AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "UpBlock2D")`): Tuple of upsample block types. block_out_channels (`Tuple[int]`, *optional*, defaults to `(224, 448, 672, 896)`): Tuple of block output channels. layers_per_block (`int`, *optional*, defaults to `2`): The number of layers per block. mid_block_scale_factor (`float`, *optional*, defaults to `1`): The scale factor for the mid block. downsample_padding (`int`, *optional*, defaults to `1`): The padding for the downsample convolution. downsample_type (`str`, *optional*, defaults to `conv`): The downsample type for downsampling layers. Choose between "conv" and "resnet" upsample_type (`str`, *optional*, defaults to `conv`): The upsample type for upsampling layers. Choose between "conv" and "resnet" dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. attention_head_dim (`int`, *optional*, defaults to `8`): The attention head dimension. norm_num_groups (`int`, *optional*, defaults to `32`): The number of groups for normalization. attn_norm_num_groups (`int`, *optional*, defaults to `None`): If set to an integer, a group norm layer will be created in the mid block's [`Attention`] layer with the given number of groups. If left as `None`, the group norm layer will only be created if `resnet_time_scale_shift` is set to `default`, and if created will have `norm_num_groups` groups. norm_eps (`float`, *optional*, defaults to `1e-5`): The epsilon for normalization. resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`. class_embed_type (`str`, *optional*, defaults to `None`): The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`, `"timestep"`, or `"identity"`. num_class_embeds (`int`, *optional*, defaults to `None`): Input dimension of the learnable embedding matrix to be projected to `time_embed_dim` when performing class conditioning with `class_embed_type` equal to `None`. """ _supports_gradient_checkpointing = True _skip_layerwise_casting_patterns = ["norm"] @register_to_config def __init__( self, sample_size: Optional[Union[int, Tuple[int, int]]] = None, in_channels: int = 3, out_channels: int = 3, center_input_sample: bool = False, time_embedding_type: str = "positional", time_embedding_dim: Optional[int] = None, freq_shift: int = 0, flip_sin_to_cos: bool = True, down_block_types: Tuple[str, ...] = ("DownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D"), mid_block_type: Optional[str] = "UNetMidBlock2D", up_block_types: Tuple[str, ...] = ("AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "UpBlock2D"), block_out_channels: Tuple[int, ...] = (224, 448, 672, 896), layers_per_block: int = 2, mid_block_scale_factor: float = 1, downsample_padding: int = 1, downsample_type: str = "conv", upsample_type: str = "conv", dropout: float = 0.0, act_fn: str = "silu", attention_head_dim: Optional[int] = 8, norm_num_groups: int = 32, attn_norm_num_groups: Optional[int] = None, norm_eps: float = 1e-5, resnet_time_scale_shift: str = "default", add_attention: bool = True, class_embed_type: Optional[str] = None, num_class_embeds: Optional[int] = None, num_train_timesteps: Optional[int] = None, ): super().__init__() self.sample_size = sample_size time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 # Check inputs if len(down_block_types) != len(up_block_types): raise ValueError( f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." ) if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) # input self.conv_in = nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1)) # time if time_embedding_type == "fourier": self.time_proj = GaussianFourierProjection(embedding_size=block_out_channels[0], scale=16) timestep_input_dim = 2 * block_out_channels[0] elif time_embedding_type == "positional": self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] elif time_embedding_type == "learned": self.time_proj = nn.Embedding(num_train_timesteps, block_out_channels[0]) timestep_input_dim = block_out_channels[0] self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) # class embedding if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == "timestep": self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) elif class_embed_type == "identity": self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) else: self.class_embedding = None self.down_blocks = nn.ModuleList([]) self.mid_block = None self.up_blocks = nn.ModuleList([]) # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block, in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, attention_head_dim=attention_head_dim if attention_head_dim is not None else output_channel, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift, downsample_type=downsample_type, dropout=dropout, ) self.down_blocks.append(down_block) # mid if mid_block_type is None: self.mid_block = None else: self.mid_block = UNetMidBlock2D( in_channels=block_out_channels[-1], temb_channels=time_embed_dim, dropout=dropout, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, attention_head_dim=attention_head_dim if attention_head_dim is not None else block_out_channels[-1], resnet_groups=norm_num_groups, attn_groups=attn_norm_num_groups, add_attention=add_attention, ) # up reversed_block_out_channels = list(reversed(block_out_channels)) output_channel = reversed_block_out_channels[0] for i, up_block_type in enumerate(up_block_types): prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] is_final_block = i == len(block_out_channels) - 1 up_block = get_up_block( up_block_type, num_layers=layers_per_block + 1, in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=time_embed_dim, add_upsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, attention_head_dim=attention_head_dim if attention_head_dim is not None else output_channel, resnet_time_scale_shift=resnet_time_scale_shift, upsample_type=upsample_type, dropout=dropout, ) self.up_blocks.append(up_block) # out num_groups_out = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32) self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=num_groups_out, eps=norm_eps) self.conv_act = nn.SiLU() self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, kernel_size=3, padding=1) def forward( self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], class_labels: Optional[torch.Tensor] = None, return_dict: bool = True, ) -> Union[UNet2DOutput, Tuple]: r""" The [`UNet2DModel`] forward method. Args: sample (`torch.Tensor`): The noisy input tensor with the following shape `(batch, channel, height, width)`. timestep (`torch.Tensor` or `float` or `int`): The number of timesteps to denoise an input. class_labels (`torch.Tensor`, *optional*, defaults to `None`): Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unets.unet_2d.UNet2DOutput`] instead of a plain tuple. Returns: [`~models.unets.unet_2d.UNet2DOutput`] or `tuple`: If `return_dict` is True, an [`~models.unets.unet_2d.UNet2DOutput`] is returned, otherwise a `tuple` is returned where the first element is the sample tensor. """ # 0. center input if necessary if self.config.center_input_sample: sample = 2 * sample - 1.0 # 1. time timesteps = timestep if not torch.is_tensor(timesteps): timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device) elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timesteps = timesteps * torch.ones(sample.shape[0], dtype=timesteps.dtype, device=timesteps.device) t_emb = self.time_proj(timesteps) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=self.dtype) emb = self.time_embedding(t_emb) if self.class_embedding is not None: if class_labels is None: raise ValueError("class_labels should be provided when doing class conditioning") if self.config.class_embed_type == "timestep": class_labels = self.time_proj(class_labels) class_emb = self.class_embedding(class_labels).to(dtype=self.dtype) emb = emb + class_emb elif self.class_embedding is None and class_labels is not None: raise ValueError("class_embedding needs to be initialized in order to use class conditioning") # 2. pre-process skip_sample = sample sample = self.conv_in(sample) # 3. down down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, "skip_conv"): sample, res_samples, skip_sample = downsample_block( hidden_states=sample, temb=emb, skip_sample=skip_sample ) else: sample, res_samples = downsample_block(hidden_states=sample, temb=emb) down_block_res_samples += res_samples # 4. mid if self.mid_block is not None: sample = self.mid_block(sample, emb) # 5. up skip_sample = None for upsample_block in self.up_blocks: res_samples = down_block_res_samples[-len(upsample_block.resnets) :] down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] if hasattr(upsample_block, "skip_conv"): sample, skip_sample = upsample_block(sample, res_samples, emb, skip_sample) else: sample = upsample_block(sample, res_samples, emb) # 6. post-process sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) if skip_sample is not None: sample += skip_sample if self.config.time_embedding_type == "fourier": timesteps = timesteps.reshape((sample.shape[0], *([1] * len(sample.shape[1:])))) sample = sample / timesteps if not return_dict: return (sample,) return UNet2DOutput(sample=sample)
diffusers/src/diffusers/models/unets/unet_2d.py/0
{ "file_path": "diffusers/src/diffusers/models/unets/unet_2d.py", "repo_id": "diffusers", "token_count": 7439 }
167
from typing import TYPE_CHECKING from ..utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, logging, ) logger = logging.get_logger(__name__) logger.warning( "Modular Diffusers is currently an experimental feature under active development. The API is subject to breaking changes in future releases." ) # These modules contain pipelines from multiple libraries/frameworks _dummy_objects = {} _import_structure = {} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils import dummy_pt_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_pt_objects)) else: _import_structure["modular_pipeline"] = [ "ModularPipelineBlocks", "ModularPipeline", "AutoPipelineBlocks", "SequentialPipelineBlocks", "LoopSequentialPipelineBlocks", "PipelineState", "BlockState", ] _import_structure["modular_pipeline_utils"] = [ "ComponentSpec", "ConfigSpec", "InputParam", "OutputParam", "InsertableDict", ] _import_structure["stable_diffusion_xl"] = ["StableDiffusionXLAutoBlocks", "StableDiffusionXLModularPipeline"] _import_structure["wan"] = ["WanAutoBlocks", "WanModularPipeline"] _import_structure["flux"] = ["FluxAutoBlocks", "FluxModularPipeline"] _import_structure["components_manager"] = ["ComponentsManager"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .components_manager import ComponentsManager from .flux import FluxAutoBlocks, FluxModularPipeline from .modular_pipeline import ( AutoPipelineBlocks, BlockState, LoopSequentialPipelineBlocks, ModularPipeline, ModularPipelineBlocks, PipelineState, SequentialPipelineBlocks, ) from .modular_pipeline_utils import ComponentSpec, ConfigSpec, InputParam, InsertableDict, OutputParam from .stable_diffusion_xl import StableDiffusionXLAutoBlocks, StableDiffusionXLModularPipeline from .wan import WanAutoBlocks, WanModularPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/modular_pipelines/__init__.py/0
{ "file_path": "diffusers/src/diffusers/modular_pipelines/__init__.py", "repo_id": "diffusers", "token_count": 1122 }
168
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Tuple import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection, ) from ...configuration_utils import FrozenDict from ...guiders import ClassifierFreeGuidance from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...utils import ( USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers, ) from ..modular_pipeline import ModularPipelineBlocks, PipelineState from ..modular_pipeline_utils import ComponentSpec, ConfigSpec, InputParam, OutputParam from .modular_pipeline import StableDiffusionXLModularPipeline logger = logging.get_logger(__name__) # pylint: disable=invalid-name # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") class StableDiffusionXLIPAdapterStep(ModularPipelineBlocks): model_name = "stable-diffusion-xl" @property def description(self) -> str: return ( "IP Adapter step that prepares ip adapter image embeddings.\n" "Note that this step only prepares the embeddings - in order for it to work correctly, " "you need to load ip adapter weights into unet via ModularPipeline.load_ip_adapter() and pipeline.set_ip_adapter_scale().\n" "See [ModularIPAdapterMixin](https://huggingface.co/docs/diffusers/api/loaders/ip_adapter#diffusers.loaders.ModularIPAdapterMixin)" " for more details" ) @property def expected_components(self) -> List[ComponentSpec]: return [ ComponentSpec("image_encoder", CLIPVisionModelWithProjection), ComponentSpec( "feature_extractor", CLIPImageProcessor, config=FrozenDict({"size": 224, "crop_size": 224}), default_creation_method="from_config", ), ComponentSpec("unet", UNet2DConditionModel), ComponentSpec( "guider", ClassifierFreeGuidance, config=FrozenDict({"guidance_scale": 7.5}), default_creation_method="from_config", ), ] @property def inputs(self) -> List[InputParam]: return [ InputParam( "ip_adapter_image", PipelineImageInput, required=True, description="The image(s) to be used as ip adapter", ) ] @property def intermediate_outputs(self) -> List[OutputParam]: return [ OutputParam("ip_adapter_embeds", type_hint=torch.Tensor, description="IP adapter image embeddings"), OutputParam( "negative_ip_adapter_embeds", type_hint=torch.Tensor, description="Negative IP adapter image embeddings", ), ] @staticmethod # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image with self->components def encode_image(components, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(components.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = components.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = components.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = components.image_encoder( torch.zeros_like(image), output_hidden_states=True ).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( num_images_per_prompt, dim=0 ) return image_enc_hidden_states, uncond_image_enc_hidden_states else: image_embeds = components.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds # modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds def prepare_ip_adapter_image_embeds( self, components, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, prepare_unconditional_embeds, ): image_embeds = [] if prepare_unconditional_embeds: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(components.unet.encoder_hid_proj.image_projection_layers): raise ValueError( f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(components.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." ) for single_ip_adapter_image, image_proj_layer in zip( ip_adapter_image, components.unet.encoder_hid_proj.image_projection_layers ): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) single_image_embeds, single_negative_image_embeds = self.encode_image( components, single_ip_adapter_image, device, 1, output_hidden_state ) image_embeds.append(single_image_embeds[None, :]) if prepare_unconditional_embeds: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if prepare_unconditional_embeds: single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for i, single_image_embeds in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if prepare_unconditional_embeds: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds @torch.no_grad() def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) block_state.prepare_unconditional_embeds = components.guider.num_conditions > 1 block_state.device = components._execution_device block_state.ip_adapter_embeds = self.prepare_ip_adapter_image_embeds( components, ip_adapter_image=block_state.ip_adapter_image, ip_adapter_image_embeds=None, device=block_state.device, num_images_per_prompt=1, prepare_unconditional_embeds=block_state.prepare_unconditional_embeds, ) if block_state.prepare_unconditional_embeds: block_state.negative_ip_adapter_embeds = [] for i, image_embeds in enumerate(block_state.ip_adapter_embeds): negative_image_embeds, image_embeds = image_embeds.chunk(2) block_state.negative_ip_adapter_embeds.append(negative_image_embeds) block_state.ip_adapter_embeds[i] = image_embeds self.set_block_state(state, block_state) return components, state class StableDiffusionXLTextEncoderStep(ModularPipelineBlocks): model_name = "stable-diffusion-xl" @property def description(self) -> str: return "Text Encoder step that generate text_embeddings to guide the image generation" @property def expected_components(self) -> List[ComponentSpec]: return [ ComponentSpec("text_encoder", CLIPTextModel), ComponentSpec("text_encoder_2", CLIPTextModelWithProjection), ComponentSpec("tokenizer", CLIPTokenizer), ComponentSpec("tokenizer_2", CLIPTokenizer), ComponentSpec( "guider", ClassifierFreeGuidance, config=FrozenDict({"guidance_scale": 7.5}), default_creation_method="from_config", ), ] @property def expected_configs(self) -> List[ConfigSpec]: return [ConfigSpec("force_zeros_for_empty_prompt", True)] @property def inputs(self) -> List[InputParam]: return [ InputParam("prompt"), InputParam("prompt_2"), InputParam("negative_prompt"), InputParam("negative_prompt_2"), InputParam("cross_attention_kwargs"), InputParam("clip_skip"), ] @property def intermediate_outputs(self) -> List[OutputParam]: return [ OutputParam( "prompt_embeds", type_hint=torch.Tensor, kwargs_type="guider_input_fields", description="text embeddings used to guide the image generation", ), OutputParam( "negative_prompt_embeds", type_hint=torch.Tensor, kwargs_type="guider_input_fields", description="negative text embeddings used to guide the image generation", ), OutputParam( "pooled_prompt_embeds", type_hint=torch.Tensor, kwargs_type="guider_input_fields", description="pooled text embeddings used to guide the image generation", ), OutputParam( "negative_pooled_prompt_embeds", type_hint=torch.Tensor, kwargs_type="guider_input_fields", description="negative pooled text embeddings used to guide the image generation", ), ] @staticmethod def check_inputs(block_state): if block_state.prompt is not None and ( not isinstance(block_state.prompt, str) and not isinstance(block_state.prompt, list) ): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(block_state.prompt)}") elif block_state.prompt_2 is not None and ( not isinstance(block_state.prompt_2, str) and not isinstance(block_state.prompt_2, list) ): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(block_state.prompt_2)}") @staticmethod def encode_prompt( components, prompt: str, prompt_2: Optional[str] = None, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, prepare_unconditional_embeds: bool = True, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt prepare_unconditional_embeds (`bool`): whether to use prepare unconditional embeddings or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ device = device or components._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(components, StableDiffusionXLLoraLoaderMixin): components._lora_scale = lora_scale # dynamically adjust the LoRA scale if components.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(components.text_encoder, lora_scale) else: scale_lora_layers(components.text_encoder, lora_scale) if components.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(components.text_encoder_2, lora_scale) else: scale_lora_layers(components.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Define tokenizers and text encoders tokenizers = ( [components.tokenizer, components.tokenizer_2] if components.tokenizer is not None else [components.tokenizer_2] ) text_encoders = ( [components.text_encoder, components.text_encoder_2] if components.text_encoder is not None else [components.text_encoder_2] ) if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 # textual inversion: process multi-vector tokens if necessary prompt_embeds_list = [] prompts = [prompt, prompt_2] for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): if isinstance(components, TextualInversionLoaderMixin): prompt = components.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) # We are only ALWAYS interested in the pooled output of the final text encoder pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: # "2" because SDXL always indexes from the penultimate layer. prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) # get unconditional embeddings for classifier free guidance zero_out_negative_prompt = negative_prompt is None and components.config.force_zeros_for_empty_prompt if prepare_unconditional_embeds and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif prepare_unconditional_embeds and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt_2 = negative_prompt_2 or negative_prompt # normalize str to list negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = ( batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 ) uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(components, TextualInversionLoaderMixin): negative_prompt = components.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer( negative_prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = text_encoder( uncond_input.input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if components.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=components.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=components.unet.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if prepare_unconditional_embeds: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] if components.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to( dtype=components.text_encoder_2.dtype, device=device ) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=components.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if prepare_unconditional_embeds: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if components.text_encoder is not None: if isinstance(components, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(components.text_encoder, lora_scale) if components.text_encoder_2 is not None: if isinstance(components, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(components.text_encoder_2, lora_scale) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds @torch.no_grad() def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState: # Get inputs and intermediates block_state = self.get_block_state(state) self.check_inputs(block_state) block_state.prepare_unconditional_embeds = components.guider.num_conditions > 1 block_state.device = components._execution_device # Encode input prompt block_state.text_encoder_lora_scale = ( block_state.cross_attention_kwargs.get("scale", None) if block_state.cross_attention_kwargs is not None else None ) ( block_state.prompt_embeds, block_state.negative_prompt_embeds, block_state.pooled_prompt_embeds, block_state.negative_pooled_prompt_embeds, ) = self.encode_prompt( components, block_state.prompt, block_state.prompt_2, block_state.device, 1, block_state.prepare_unconditional_embeds, block_state.negative_prompt, block_state.negative_prompt_2, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, lora_scale=block_state.text_encoder_lora_scale, clip_skip=block_state.clip_skip, ) # Add outputs self.set_block_state(state, block_state) return components, state class StableDiffusionXLVaeEncoderStep(ModularPipelineBlocks): model_name = "stable-diffusion-xl" @property def description(self) -> str: return "Vae Encoder step that encode the input image into a latent representation" @property def expected_components(self) -> List[ComponentSpec]: return [ ComponentSpec("vae", AutoencoderKL), ComponentSpec( "image_processor", VaeImageProcessor, config=FrozenDict({"vae_scale_factor": 8}), default_creation_method="from_config", ), ] @property def inputs(self) -> List[InputParam]: return [ InputParam("image", required=True), InputParam("height"), InputParam("width"), InputParam("generator"), InputParam("dtype", type_hint=torch.dtype, description="Data type of model tensor inputs"), InputParam( "preprocess_kwargs", type_hint=Optional[dict], description="A kwargs dictionary that if specified is passed along to the `ImageProcessor` as defined under `self.image_processor` in [diffusers.image_processor.VaeImageProcessor]", ), ] @property def intermediate_outputs(self) -> List[OutputParam]: return [ OutputParam( "image_latents", type_hint=torch.Tensor, description="The latents representing the reference image for image-to-image/inpainting generation", ) ] # Modified from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_inpaint.StableDiffusionXLInpaintPipeline._encode_vae_image with self -> components # YiYi TODO: update the _encode_vae_image so that we can use #Coped from def _encode_vae_image(self, components, image: torch.Tensor, generator: torch.Generator): latents_mean = latents_std = None if hasattr(components.vae.config, "latents_mean") and components.vae.config.latents_mean is not None: latents_mean = torch.tensor(components.vae.config.latents_mean).view(1, 4, 1, 1) if hasattr(components.vae.config, "latents_std") and components.vae.config.latents_std is not None: latents_std = torch.tensor(components.vae.config.latents_std).view(1, 4, 1, 1) dtype = image.dtype if components.vae.config.force_upcast: image = image.float() components.vae.to(dtype=torch.float32) if isinstance(generator, list): image_latents = [ retrieve_latents(components.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(image.shape[0]) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(components.vae.encode(image), generator=generator) if components.vae.config.force_upcast: components.vae.to(dtype) image_latents = image_latents.to(dtype) if latents_mean is not None and latents_std is not None: latents_mean = latents_mean.to(device=image_latents.device, dtype=dtype) latents_std = latents_std.to(device=image_latents.device, dtype=dtype) image_latents = (image_latents - latents_mean) * components.vae.config.scaling_factor / latents_std else: image_latents = components.vae.config.scaling_factor * image_latents return image_latents @torch.no_grad() def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) block_state.preprocess_kwargs = block_state.preprocess_kwargs or {} block_state.device = components._execution_device block_state.dtype = block_state.dtype if block_state.dtype is not None else components.vae.dtype image = components.image_processor.preprocess( block_state.image, height=block_state.height, width=block_state.width, **block_state.preprocess_kwargs ) image = image.to(device=block_state.device, dtype=block_state.dtype) block_state.batch_size = image.shape[0] # if generator is a list, make sure the length of it matches the length of images (both should be batch_size) if isinstance(block_state.generator, list) and len(block_state.generator) != block_state.batch_size: raise ValueError( f"You have passed a list of generators of length {len(block_state.generator)}, but requested an effective batch" f" size of {block_state.batch_size}. Make sure the batch size matches the length of the generators." ) block_state.image_latents = self._encode_vae_image(components, image=image, generator=block_state.generator) self.set_block_state(state, block_state) return components, state class StableDiffusionXLInpaintVaeEncoderStep(ModularPipelineBlocks): model_name = "stable-diffusion-xl" @property def expected_components(self) -> List[ComponentSpec]: return [ ComponentSpec("vae", AutoencoderKL), ComponentSpec( "image_processor", VaeImageProcessor, config=FrozenDict({"vae_scale_factor": 8}), default_creation_method="from_config", ), ComponentSpec( "mask_processor", VaeImageProcessor, config=FrozenDict( {"do_normalize": False, "vae_scale_factor": 8, "do_binarize": True, "do_convert_grayscale": True} ), default_creation_method="from_config", ), ] @property def description(self) -> str: return "Vae encoder step that prepares the image and mask for the inpainting process" @property def inputs(self) -> List[InputParam]: return [ InputParam("height"), InputParam("width"), InputParam("image", required=True), InputParam("mask_image", required=True), InputParam("padding_mask_crop"), InputParam("dtype", type_hint=torch.dtype, description="The dtype of the model inputs"), InputParam("generator"), ] @property def intermediate_outputs(self) -> List[OutputParam]: return [ OutputParam( "image_latents", type_hint=torch.Tensor, description="The latents representation of the input image" ), OutputParam("mask", type_hint=torch.Tensor, description="The mask to use for the inpainting process"), OutputParam( "masked_image_latents", type_hint=torch.Tensor, description="The masked image latents to use for the inpainting process (only for inpainting-specifid unet)", ), OutputParam( "crops_coords", type_hint=Optional[Tuple[int, int]], description="The crop coordinates to use for the preprocess/postprocess of the image and mask", ), ] # Modified from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_inpaint.StableDiffusionXLInpaintPipeline._encode_vae_image with self -> components # YiYi TODO: update the _encode_vae_image so that we can use #Coped from def _encode_vae_image(self, components, image: torch.Tensor, generator: torch.Generator): latents_mean = latents_std = None if hasattr(components.vae.config, "latents_mean") and components.vae.config.latents_mean is not None: latents_mean = torch.tensor(components.vae.config.latents_mean).view(1, 4, 1, 1) if hasattr(components.vae.config, "latents_std") and components.vae.config.latents_std is not None: latents_std = torch.tensor(components.vae.config.latents_std).view(1, 4, 1, 1) dtype = image.dtype if components.vae.config.force_upcast: image = image.float() components.vae.to(dtype=torch.float32) if isinstance(generator, list): image_latents = [ retrieve_latents(components.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(image.shape[0]) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(components.vae.encode(image), generator=generator) if components.vae.config.force_upcast: components.vae.to(dtype) image_latents = image_latents.to(dtype) if latents_mean is not None and latents_std is not None: latents_mean = latents_mean.to(device=image_latents.device, dtype=dtype) latents_std = latents_std.to(device=image_latents.device, dtype=dtype) image_latents = (image_latents - latents_mean) * self.vae.config.scaling_factor / latents_std else: image_latents = components.vae.config.scaling_factor * image_latents return image_latents # modified from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_inpaint.StableDiffusionXLInpaintPipeline.prepare_mask_latents # do not accept do_classifier_free_guidance def prepare_mask_latents( self, components, mask, masked_image, batch_size, height, width, dtype, device, generator ): # resize the mask to latents shape as we concatenate the mask to the latents # we do that before converting to dtype to avoid breaking in case we're using cpu_offload # and half precision mask = torch.nn.functional.interpolate( mask, size=(height // components.vae_scale_factor, width // components.vae_scale_factor) ) mask = mask.to(device=device, dtype=dtype) # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method if mask.shape[0] < batch_size: if not batch_size % mask.shape[0] == 0: raise ValueError( "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" " of masks that you pass is divisible by the total requested batch size." ) mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) if masked_image is not None and masked_image.shape[1] == 4: masked_image_latents = masked_image else: masked_image_latents = None if masked_image is not None: if masked_image_latents is None: masked_image = masked_image.to(device=device, dtype=dtype) masked_image_latents = self._encode_vae_image(components, masked_image, generator=generator) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError( "The passed images and the required batch size don't match. Images are supposed to be duplicated" f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." " Make sure the number of images that you pass is divisible by the total requested batch size." ) masked_image_latents = masked_image_latents.repeat( batch_size // masked_image_latents.shape[0], 1, 1, 1 ) # aligning device to prevent device errors when concating it with the latent model input masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) return mask, masked_image_latents @torch.no_grad() def __call__(self, components: StableDiffusionXLModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) block_state.dtype = block_state.dtype if block_state.dtype is not None else components.vae.dtype block_state.device = components._execution_device if block_state.height is None: block_state.height = components.default_height if block_state.width is None: block_state.width = components.default_width if block_state.padding_mask_crop is not None: block_state.crops_coords = components.mask_processor.get_crop_region( block_state.mask_image, block_state.width, block_state.height, pad=block_state.padding_mask_crop ) block_state.resize_mode = "fill" else: block_state.crops_coords = None block_state.resize_mode = "default" image = components.image_processor.preprocess( block_state.image, height=block_state.height, width=block_state.width, crops_coords=block_state.crops_coords, resize_mode=block_state.resize_mode, ) image = image.to(dtype=torch.float32) mask = components.mask_processor.preprocess( block_state.mask_image, height=block_state.height, width=block_state.width, resize_mode=block_state.resize_mode, crops_coords=block_state.crops_coords, ) block_state.masked_image = image * (mask < 0.5) block_state.batch_size = image.shape[0] image = image.to(device=block_state.device, dtype=block_state.dtype) block_state.image_latents = self._encode_vae_image(components, image=image, generator=block_state.generator) # 7. Prepare mask latent variables block_state.mask, block_state.masked_image_latents = self.prepare_mask_latents( components, mask, block_state.masked_image, block_state.batch_size, block_state.height, block_state.width, block_state.dtype, block_state.device, block_state.generator, ) self.set_block_state(state, block_state) return components, state
diffusers/src/diffusers/modular_pipelines/stable_diffusion_xl/encoders.py/0
{ "file_path": "diffusers/src/diffusers/modular_pipelines/stable_diffusion_xl/encoders.py", "repo_id": "diffusers", "token_count": 18510 }
169
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( AmusedImg2ImgPipeline, AmusedInpaintPipeline, AmusedPipeline, ) _dummy_objects.update( { "AmusedPipeline": AmusedPipeline, "AmusedImg2ImgPipeline": AmusedImg2ImgPipeline, "AmusedInpaintPipeline": AmusedInpaintPipeline, } ) else: _import_structure["pipeline_amused"] = ["AmusedPipeline"] _import_structure["pipeline_amused_img2img"] = ["AmusedImg2ImgPipeline"] _import_structure["pipeline_amused_inpaint"] = ["AmusedInpaintPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( AmusedPipeline, ) else: from .pipeline_amused import AmusedPipeline from .pipeline_amused_img2img import AmusedImg2ImgPipeline from .pipeline_amused_inpaint import AmusedInpaintPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/amused/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/amused/__init__.py", "repo_id": "diffusers", "token_count": 796 }
170
# Copyright 2025 CVSSP, ByteDance and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import torch from transformers import ( ClapFeatureExtractor, ClapModel, GPT2LMHeadModel, RobertaTokenizer, RobertaTokenizerFast, SpeechT5HifiGan, T5EncoderModel, T5Tokenizer, T5TokenizerFast, VitsModel, VitsTokenizer, ) from ...models import AutoencoderKL from ...schedulers import KarrasDiffusionSchedulers from ...utils import ( is_accelerate_available, is_accelerate_version, is_librosa_available, logging, replace_example_docstring, ) from ...utils.import_utils import is_transformers_version from ...utils.torch_utils import empty_device_cache, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .modeling_audioldm2 import AudioLDM2ProjectionModel, AudioLDM2UNet2DConditionModel if is_librosa_available(): import librosa from ...utils import is_torch_xla_available if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import scipy >>> import torch >>> from diffusers import AudioLDM2Pipeline >>> repo_id = "cvssp/audioldm2" >>> pipe = AudioLDM2Pipeline.from_pretrained(repo_id, torch_dtype=torch.float16) >>> pipe = pipe.to("cuda") >>> # define the prompts >>> prompt = "The sound of a hammer hitting a wooden surface." >>> negative_prompt = "Low quality." >>> # set the seed for generator >>> generator = torch.Generator("cuda").manual_seed(0) >>> # run the generation >>> audio = pipe( ... prompt, ... negative_prompt=negative_prompt, ... num_inference_steps=200, ... audio_length_in_s=10.0, ... num_waveforms_per_prompt=3, ... generator=generator, ... ).audios >>> # save the best audio sample (index 0) as a .wav file >>> scipy.io.wavfile.write("techno.wav", rate=16000, data=audio[0]) ``` ``` #Using AudioLDM2 for Text To Speech >>> import scipy >>> import torch >>> from diffusers import AudioLDM2Pipeline >>> repo_id = "anhnct/audioldm2_gigaspeech" >>> pipe = AudioLDM2Pipeline.from_pretrained(repo_id, torch_dtype=torch.float16) >>> pipe = pipe.to("cuda") >>> # define the prompts >>> prompt = "A female reporter is speaking" >>> transcript = "wish you have a good day" >>> # set the seed for generator >>> generator = torch.Generator("cuda").manual_seed(0) >>> # run the generation >>> audio = pipe( ... prompt, ... transcription=transcript, ... num_inference_steps=200, ... audio_length_in_s=10.0, ... num_waveforms_per_prompt=2, ... generator=generator, ... max_new_tokens=512, #Must set max_new_tokens equa to 512 for TTS ... ).audios >>> # save the best audio sample (index 0) as a .wav file >>> scipy.io.wavfile.write("tts.wav", rate=16000, data=audio[0]) ``` """ def prepare_inputs_for_generation( inputs_embeds, attention_mask=None, past_key_values=None, **kwargs, ): if past_key_values is not None: # only last token for inputs_embeds if past is defined in kwargs inputs_embeds = inputs_embeds[:, -1:] return { "inputs_embeds": inputs_embeds, "attention_mask": attention_mask, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), } class AudioLDM2Pipeline(DiffusionPipeline): r""" Pipeline for text-to-audio generation using AudioLDM2. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.ClapModel`]): First frozen text-encoder. AudioLDM2 uses the joint audio-text embedding model [CLAP](https://huggingface.co/docs/transformers/model_doc/clap#transformers.CLAPTextModelWithProjection), specifically the [laion/clap-htsat-unfused](https://huggingface.co/laion/clap-htsat-unfused) variant. The text branch is used to encode the text prompt to a prompt embedding. The full audio-text model is used to rank generated waveforms against the text prompt by computing similarity scores. text_encoder_2 ([`~transformers.T5EncoderModel`, `~transformers.VitsModel`]): Second frozen text-encoder. AudioLDM2 uses the encoder of [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the [google/flan-t5-large](https://huggingface.co/google/flan-t5-large) variant. Second frozen text-encoder use for TTS. AudioLDM2 uses the encoder of [Vits](https://huggingface.co/docs/transformers/model_doc/vits#transformers.VitsModel). projection_model ([`AudioLDM2ProjectionModel`]): A trained model used to linearly project the hidden-states from the first and second text encoder models and insert learned SOS and EOS token embeddings. The projected hidden-states from the two text encoders are concatenated to give the input to the language model. A Learned Position Embedding for the Vits hidden-states language_model ([`~transformers.GPT2Model`]): An auto-regressive language model used to generate a sequence of hidden-states conditioned on the projected outputs from the two text encoders. tokenizer ([`~transformers.RobertaTokenizer`]): Tokenizer to tokenize text for the first frozen text-encoder. tokenizer_2 ([`~transformers.T5Tokenizer`, `~transformers.VitsTokenizer`]): Tokenizer to tokenize text for the second frozen text-encoder. feature_extractor ([`~transformers.ClapFeatureExtractor`]): Feature extractor to pre-process generated audio waveforms to log-mel spectrograms for automatic scoring. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded audio latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. vocoder ([`~transformers.SpeechT5HifiGan`]): Vocoder of class `SpeechT5HifiGan` to convert the mel-spectrogram latents to the final audio waveform. """ def __init__( self, vae: AutoencoderKL, text_encoder: ClapModel, text_encoder_2: Union[T5EncoderModel, VitsModel], projection_model: AudioLDM2ProjectionModel, language_model: GPT2LMHeadModel, tokenizer: Union[RobertaTokenizer, RobertaTokenizerFast], tokenizer_2: Union[T5Tokenizer, T5TokenizerFast, VitsTokenizer], feature_extractor: ClapFeatureExtractor, unet: AudioLDM2UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, vocoder: SpeechT5HifiGan, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, projection_model=projection_model, language_model=language_model, tokenizer=tokenizer, tokenizer_2=tokenizer_2, feature_extractor=feature_extractor, unet=unet, scheduler=scheduler, vocoder=vocoder, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.enable_vae_slicing def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.vae.enable_slicing() # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.disable_vae_slicing def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_slicing() def enable_model_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"): r""" Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. """ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") torch_device = torch.device(device) device_index = torch_device.index if gpu_id is not None and device_index is not None: raise ValueError( f"You have passed both `gpu_id`={gpu_id} and an index as part of the passed device `device`={device}" f"Cannot pass both. Please make sure to either not define `gpu_id` or not pass the index as part of the device: `device`={torch_device.type}" ) device_type = torch_device.type device_str = device_type if gpu_id or torch_device.index: device_str = f"{device_str}:{gpu_id or torch_device.index}" device = torch.device(device_str) if self.device.type != "cpu": self.to("cpu", silence_dtype_warnings=True) empty_device_cache(device.type) model_sequence = [ self.text_encoder.text_model, self.text_encoder.text_projection, self.text_encoder_2, self.projection_model, self.language_model, self.unet, self.vae, self.vocoder, self.text_encoder, ] hook = None for cpu_offloaded_model in model_sequence: _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) # We'll offload the last model manually. self.final_offload_hook = hook def generate_language_model( self, inputs_embeds: torch.Tensor = None, max_new_tokens: int = 8, **model_kwargs, ): """ Generates a sequence of hidden-states from the language model, conditioned on the embedding inputs. Parameters: inputs_embeds (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): The sequence used as a prompt for the generation. max_new_tokens (`int`): Number of new tokens to generate. model_kwargs (`Dict[str, Any]`, *optional*): Ad hoc parametrization of additional model-specific kwargs that will be forwarded to the `forward` function of the model. Return: `inputs_embeds (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): The sequence of generated hidden-states. """ cache_position_kwargs = {} if is_transformers_version("<", "4.52.1"): cache_position_kwargs["input_ids"] = inputs_embeds else: cache_position_kwargs["seq_length"] = inputs_embeds.shape[0] cache_position_kwargs["device"] = ( self.language_model.device if getattr(self, "language_model", None) is not None else self.device ) cache_position_kwargs["model_kwargs"] = model_kwargs max_new_tokens = max_new_tokens if max_new_tokens is not None else self.language_model.config.max_new_tokens model_kwargs = self.language_model._get_initial_cache_position(**cache_position_kwargs) for _ in range(max_new_tokens): # prepare model inputs model_inputs = prepare_inputs_for_generation(inputs_embeds, **model_kwargs) # forward pass to get next hidden states output = self.language_model(**model_inputs, output_hidden_states=True, return_dict=True) next_hidden_states = output.hidden_states[-1] # Update the model input inputs_embeds = torch.cat([inputs_embeds, next_hidden_states[:, -1:, :]], dim=1) # Update generated hidden states, model inputs, and length for next step model_kwargs = self.language_model._update_model_kwargs_for_generation(output, model_kwargs) return inputs_embeds[:, -max_new_tokens:, :] def encode_prompt( self, prompt, device, num_waveforms_per_prompt, do_classifier_free_guidance, transcription=None, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, generated_prompt_embeds: Optional[torch.Tensor] = None, negative_generated_prompt_embeds: Optional[torch.Tensor] = None, attention_mask: Optional[torch.LongTensor] = None, negative_attention_mask: Optional[torch.LongTensor] = None, max_new_tokens: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded transcription (`str` or `List[str]`): transcription of text to speech device (`torch.device`): torch device num_waveforms_per_prompt (`int`): number of waveforms that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the audio generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-computed text embeddings from the Flan T5 model. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be computed from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-computed negative text embeddings from the Flan T5 model. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be computed from `negative_prompt` input argument. generated_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings from the GPT2 language model. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_generated_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings from the GPT2 language model. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be computed from `negative_prompt` input argument. attention_mask (`torch.LongTensor`, *optional*): Pre-computed attention mask to be applied to the `prompt_embeds`. If not provided, attention mask will be computed from `prompt` input argument. negative_attention_mask (`torch.LongTensor`, *optional*): Pre-computed attention mask to be applied to the `negative_prompt_embeds`. If not provided, attention mask will be computed from `negative_prompt` input argument. max_new_tokens (`int`, *optional*, defaults to None): The number of new tokens to generate with the GPT2 language model. Returns: prompt_embeds (`torch.Tensor`): Text embeddings from the Flan T5 model. attention_mask (`torch.LongTensor`): Attention mask to be applied to the `prompt_embeds`. generated_prompt_embeds (`torch.Tensor`): Text embeddings generated from the GPT2 language model. Example: ```python >>> import scipy >>> import torch >>> from diffusers import AudioLDM2Pipeline >>> repo_id = "cvssp/audioldm2" >>> pipe = AudioLDM2Pipeline.from_pretrained(repo_id, torch_dtype=torch.float16) >>> pipe = pipe.to("cuda") >>> # Get text embedding vectors >>> prompt_embeds, attention_mask, generated_prompt_embeds = pipe.encode_prompt( ... prompt="Techno music with a strong, upbeat tempo and high melodic riffs", ... device="cuda", ... do_classifier_free_guidance=True, ... ) >>> # Pass text embeddings to pipeline for text-conditional audio generation >>> audio = pipe( ... prompt_embeds=prompt_embeds, ... attention_mask=attention_mask, ... generated_prompt_embeds=generated_prompt_embeds, ... num_inference_steps=200, ... audio_length_in_s=10.0, ... ).audios[0] >>> # save generated audio sample >>> scipy.io.wavfile.write("techno.wav", rate=16000, data=audio) ```""" if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Define tokenizers and text encoders tokenizers = [self.tokenizer, self.tokenizer_2] is_vits_text_encoder = isinstance(self.text_encoder_2, VitsModel) if is_vits_text_encoder: text_encoders = [self.text_encoder, self.text_encoder_2.text_encoder] else: text_encoders = [self.text_encoder, self.text_encoder_2] if prompt_embeds is None: prompt_embeds_list = [] attention_mask_list = [] for tokenizer, text_encoder in zip(tokenizers, text_encoders): use_prompt = isinstance( tokenizer, (RobertaTokenizer, RobertaTokenizerFast, T5Tokenizer, T5TokenizerFast) ) text_inputs = tokenizer( prompt if use_prompt else transcription, padding="max_length" if isinstance(tokenizer, (RobertaTokenizer, RobertaTokenizerFast, VitsTokenizer)) else True, max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids attention_mask = text_inputs.attention_mask untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( f"The following part of your input was truncated because {text_encoder.config.model_type} can " f"only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids.to(device) attention_mask = attention_mask.to(device) if text_encoder.config.model_type == "clap": prompt_embeds = text_encoder.get_text_features( text_input_ids, attention_mask=attention_mask, ) # append the seq-len dim: (bs, hidden_size) -> (bs, seq_len, hidden_size) prompt_embeds = prompt_embeds[:, None, :] # make sure that we attend to this single hidden-state attention_mask = attention_mask.new_ones((batch_size, 1)) elif is_vits_text_encoder: # Add end_token_id and attention mask in the end of sequence phonemes for text_input_id, text_attention_mask in zip(text_input_ids, attention_mask): for idx, phoneme_id in enumerate(text_input_id): if phoneme_id == 0: text_input_id[idx] = 182 text_attention_mask[idx] = 1 break prompt_embeds = text_encoder( text_input_ids, attention_mask=attention_mask, padding_mask=attention_mask.unsqueeze(-1) ) prompt_embeds = prompt_embeds[0] else: prompt_embeds = text_encoder( text_input_ids, attention_mask=attention_mask, ) prompt_embeds = prompt_embeds[0] prompt_embeds_list.append(prompt_embeds) attention_mask_list.append(attention_mask) projection_output = self.projection_model( hidden_states=prompt_embeds_list[0], hidden_states_1=prompt_embeds_list[1], attention_mask=attention_mask_list[0], attention_mask_1=attention_mask_list[1], ) projected_prompt_embeds = projection_output.hidden_states projected_attention_mask = projection_output.attention_mask generated_prompt_embeds = self.generate_language_model( projected_prompt_embeds, attention_mask=projected_attention_mask, max_new_tokens=max_new_tokens, ) prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) attention_mask = ( attention_mask.to(device=device) if attention_mask is not None else torch.ones(prompt_embeds.shape[:2], dtype=torch.long, device=device) ) generated_prompt_embeds = generated_prompt_embeds.to(dtype=self.language_model.dtype, device=device) bs_embed, seq_len, hidden_size = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_waveforms_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_waveforms_per_prompt, seq_len, hidden_size) # duplicate attention mask for each generation per prompt attention_mask = attention_mask.repeat(1, num_waveforms_per_prompt) attention_mask = attention_mask.view(bs_embed * num_waveforms_per_prompt, seq_len) bs_embed, seq_len, hidden_size = generated_prompt_embeds.shape # duplicate generated embeddings for each generation per prompt, using mps friendly method generated_prompt_embeds = generated_prompt_embeds.repeat(1, num_waveforms_per_prompt, 1) generated_prompt_embeds = generated_prompt_embeds.view( bs_embed * num_waveforms_per_prompt, seq_len, hidden_size ) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt negative_prompt_embeds_list = [] negative_attention_mask_list = [] max_length = prompt_embeds.shape[1] for tokenizer, text_encoder in zip(tokenizers, text_encoders): uncond_input = tokenizer( uncond_tokens, padding="max_length", max_length=tokenizer.model_max_length if isinstance(tokenizer, (RobertaTokenizer, RobertaTokenizerFast, VitsTokenizer)) else max_length, truncation=True, return_tensors="pt", ) uncond_input_ids = uncond_input.input_ids.to(device) negative_attention_mask = uncond_input.attention_mask.to(device) if text_encoder.config.model_type == "clap": negative_prompt_embeds = text_encoder.get_text_features( uncond_input_ids, attention_mask=negative_attention_mask, ) # append the seq-len dim: (bs, hidden_size) -> (bs, seq_len, hidden_size) negative_prompt_embeds = negative_prompt_embeds[:, None, :] # make sure that we attend to this single hidden-state negative_attention_mask = negative_attention_mask.new_ones((batch_size, 1)) elif is_vits_text_encoder: negative_prompt_embeds = torch.zeros( batch_size, tokenizer.model_max_length, text_encoder.config.hidden_size, ).to(dtype=self.text_encoder_2.dtype, device=device) negative_attention_mask = torch.zeros(batch_size, tokenizer.model_max_length).to( dtype=self.text_encoder_2.dtype, device=device ) else: negative_prompt_embeds = text_encoder( uncond_input_ids, attention_mask=negative_attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_attention_mask_list.append(negative_attention_mask) projection_output = self.projection_model( hidden_states=negative_prompt_embeds_list[0], hidden_states_1=negative_prompt_embeds_list[1], attention_mask=negative_attention_mask_list[0], attention_mask_1=negative_attention_mask_list[1], ) negative_projected_prompt_embeds = projection_output.hidden_states negative_projected_attention_mask = projection_output.attention_mask negative_generated_prompt_embeds = self.generate_language_model( negative_projected_prompt_embeds, attention_mask=negative_projected_attention_mask, max_new_tokens=max_new_tokens, ) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) negative_attention_mask = ( negative_attention_mask.to(device=device) if negative_attention_mask is not None else torch.ones(negative_prompt_embeds.shape[:2], dtype=torch.long, device=device) ) negative_generated_prompt_embeds = negative_generated_prompt_embeds.to( dtype=self.language_model.dtype, device=device ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_waveforms_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_waveforms_per_prompt, seq_len, -1) # duplicate unconditional attention mask for each generation per prompt negative_attention_mask = negative_attention_mask.repeat(1, num_waveforms_per_prompt) negative_attention_mask = negative_attention_mask.view(batch_size * num_waveforms_per_prompt, seq_len) # duplicate unconditional generated embeddings for each generation per prompt seq_len = negative_generated_prompt_embeds.shape[1] negative_generated_prompt_embeds = negative_generated_prompt_embeds.repeat(1, num_waveforms_per_prompt, 1) negative_generated_prompt_embeds = negative_generated_prompt_embeds.view( batch_size * num_waveforms_per_prompt, seq_len, -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) attention_mask = torch.cat([negative_attention_mask, attention_mask]) generated_prompt_embeds = torch.cat([negative_generated_prompt_embeds, generated_prompt_embeds]) return prompt_embeds, attention_mask, generated_prompt_embeds # Copied from diffusers.pipelines.audioldm.pipeline_audioldm.AudioLDMPipeline.mel_spectrogram_to_waveform def mel_spectrogram_to_waveform(self, mel_spectrogram): if mel_spectrogram.dim() == 4: mel_spectrogram = mel_spectrogram.squeeze(1) waveform = self.vocoder(mel_spectrogram) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 waveform = waveform.cpu().float() return waveform def score_waveforms(self, text, audio, num_waveforms_per_prompt, device, dtype): if not is_librosa_available(): logger.info( "Automatic scoring of the generated audio waveforms against the input prompt text requires the " "`librosa` package to resample the generated waveforms. Returning the audios in the order they were " "generated. To enable automatic scoring, install `librosa` with: `pip install librosa`." ) return audio inputs = self.tokenizer(text, return_tensors="pt", padding=True) resampled_audio = librosa.resample( audio.numpy(), orig_sr=self.vocoder.config.sampling_rate, target_sr=self.feature_extractor.sampling_rate ) inputs["input_features"] = self.feature_extractor( list(resampled_audio), return_tensors="pt", sampling_rate=self.feature_extractor.sampling_rate ).input_features.type(dtype) inputs = inputs.to(device) # compute the audio-text similarity score using the CLAP model logits_per_text = self.text_encoder(**inputs).logits_per_text # sort by the highest matching generations per prompt indices = torch.argsort(logits_per_text, dim=1, descending=True)[:, :num_waveforms_per_prompt] audio = torch.index_select(audio, 0, indices.reshape(-1).cpu()) return audio # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, audio_length_in_s, vocoder_upsample_factor, callback_steps, transcription=None, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, generated_prompt_embeds=None, negative_generated_prompt_embeds=None, attention_mask=None, negative_attention_mask=None, ): min_audio_length_in_s = vocoder_upsample_factor * self.vae_scale_factor if audio_length_in_s < min_audio_length_in_s: raise ValueError( f"`audio_length_in_s` has to be a positive value greater than or equal to {min_audio_length_in_s}, but " f"is {audio_length_in_s}." ) if self.vocoder.config.model_in_dim % self.vae_scale_factor != 0: raise ValueError( f"The number of frequency bins in the vocoder's log-mel spectrogram has to be divisible by the " f"VAE scale factor, but got {self.vocoder.config.model_in_dim} bins and a scale factor of " f"{self.vae_scale_factor}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and (prompt_embeds is None or generated_prompt_embeds is None): raise ValueError( "Provide either `prompt`, or `prompt_embeds` and `generated_prompt_embeds`. Cannot leave " "`prompt` undefined without specifying both `prompt_embeds` and `generated_prompt_embeds`." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) elif negative_prompt_embeds is not None and negative_generated_prompt_embeds is None: raise ValueError( "Cannot forward `negative_prompt_embeds` without `negative_generated_prompt_embeds`. Ensure that" "both arguments are specified" ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if attention_mask is not None and attention_mask.shape != prompt_embeds.shape[:2]: raise ValueError( "`attention_mask should have the same batch size and sequence length as `prompt_embeds`, but got:" f"`attention_mask: {attention_mask.shape} != `prompt_embeds` {prompt_embeds.shape}" ) if transcription is None: if self.text_encoder_2.config.model_type == "vits": raise ValueError("Cannot forward without transcription. Please make sure to have transcription") elif transcription is not None and ( not isinstance(transcription, str) and not isinstance(transcription, list) ): raise ValueError(f"`transcription` has to be of type `str` or `list` but is {type(transcription)}") if generated_prompt_embeds is not None and negative_generated_prompt_embeds is not None: if generated_prompt_embeds.shape != negative_generated_prompt_embeds.shape: raise ValueError( "`generated_prompt_embeds` and `negative_generated_prompt_embeds` must have the same shape when " f"passed directly, but got: `generated_prompt_embeds` {generated_prompt_embeds.shape} != " f"`negative_generated_prompt_embeds` {negative_generated_prompt_embeds.shape}." ) if ( negative_attention_mask is not None and negative_attention_mask.shape != negative_prompt_embeds.shape[:2] ): raise ValueError( "`attention_mask should have the same batch size and sequence length as `prompt_embeds`, but got:" f"`attention_mask: {negative_attention_mask.shape} != `prompt_embeds` {negative_prompt_embeds.shape}" ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents with width->self.vocoder.config.model_in_dim def prepare_latents(self, batch_size, num_channels_latents, height, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(self.vocoder.config.model_in_dim) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, transcription: Union[str, List[str]] = None, audio_length_in_s: Optional[float] = None, num_inference_steps: int = 200, guidance_scale: float = 3.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_waveforms_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, generated_prompt_embeds: Optional[torch.Tensor] = None, negative_generated_prompt_embeds: Optional[torch.Tensor] = None, attention_mask: Optional[torch.LongTensor] = None, negative_attention_mask: Optional[torch.LongTensor] = None, max_new_tokens: Optional[int] = None, return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: Optional[int] = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, output_type: Optional[str] = "np", ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide audio generation. If not defined, you need to pass `prompt_embeds`. transcription (`str` or `List[str]`, *optional*):\ The transcript for text to speech. audio_length_in_s (`int`, *optional*, defaults to 10.24): The length of the generated audio sample in seconds. num_inference_steps (`int`, *optional*, defaults to 200): The number of denoising steps. More denoising steps usually lead to a higher quality audio at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 3.5): A higher guidance scale value encourages the model to generate audio that is closely linked to the text `prompt` at the expense of lower sound quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in audio generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_waveforms_per_prompt (`int`, *optional*, defaults to 1): The number of waveforms to generate per prompt. If `num_waveforms_per_prompt > 1`, then automatic scoring is performed between the generated outputs and the text prompt. This scoring ranks the generated waveforms based on their cosine similarity with the text input in the joint text-audio embedding space. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for spectrogram generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. generated_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings from the GPT2 language model. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_generated_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings from the GPT2 language model. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be computed from `negative_prompt` input argument. attention_mask (`torch.LongTensor`, *optional*): Pre-computed attention mask to be applied to the `prompt_embeds`. If not provided, attention mask will be computed from `prompt` input argument. negative_attention_mask (`torch.LongTensor`, *optional*): Pre-computed attention mask to be applied to the `negative_prompt_embeds`. If not provided, attention mask will be computed from `negative_prompt` input argument. max_new_tokens (`int`, *optional*, defaults to None): Number of new tokens to generate with the GPT2 language model. If not provided, number of tokens will be taken from the config of the model. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). output_type (`str`, *optional*, defaults to `"np"`): The output format of the generated audio. Choose between `"np"` to return a NumPy `np.ndarray` or `"pt"` to return a PyTorch `torch.Tensor` object. Set to `"latent"` to return the latent diffusion model (LDM) output. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated audio. """ # 0. Convert audio input length from seconds to spectrogram height vocoder_upsample_factor = np.prod(self.vocoder.config.upsample_rates) / self.vocoder.config.sampling_rate if audio_length_in_s is None: audio_length_in_s = self.unet.config.sample_size * self.vae_scale_factor * vocoder_upsample_factor height = int(audio_length_in_s / vocoder_upsample_factor) original_waveform_length = int(audio_length_in_s * self.vocoder.config.sampling_rate) if height % self.vae_scale_factor != 0: height = int(np.ceil(height / self.vae_scale_factor)) * self.vae_scale_factor logger.info( f"Audio length in seconds {audio_length_in_s} is increased to {height * vocoder_upsample_factor} " f"so that it can be handled by the model. It will be cut to {audio_length_in_s} after the " f"denoising process." ) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, audio_length_in_s, vocoder_upsample_factor, callback_steps, transcription, negative_prompt, prompt_embeds, negative_prompt_embeds, generated_prompt_embeds, negative_generated_prompt_embeds, attention_mask, negative_attention_mask, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt prompt_embeds, attention_mask, generated_prompt_embeds = self.encode_prompt( prompt, device, num_waveforms_per_prompt, do_classifier_free_guidance, transcription, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, generated_prompt_embeds=generated_prompt_embeds, negative_generated_prompt_embeds=negative_generated_prompt_embeds, attention_mask=attention_mask, negative_attention_mask=negative_attention_mask, max_new_tokens=max_new_tokens, ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_waveforms_per_prompt, num_channels_latents, height, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=generated_prompt_embeds, encoder_hidden_states_1=prompt_embeds, encoder_attention_mask_1=attention_mask, return_dict=False, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() self.maybe_free_model_hooks() # 8. Post-processing if not output_type == "latent": latents = 1 / self.vae.config.scaling_factor * latents mel_spectrogram = self.vae.decode(latents).sample else: return AudioPipelineOutput(audios=latents) audio = self.mel_spectrogram_to_waveform(mel_spectrogram) audio = audio[:, :original_waveform_length] # 9. Automatic scoring if num_waveforms_per_prompt > 1 and prompt is not None: audio = self.score_waveforms( text=prompt, audio=audio, num_waveforms_per_prompt=num_waveforms_per_prompt, device=device, dtype=prompt_embeds.dtype, ) if output_type == "np": audio = audio.numpy() if not return_dict: return (audio,) return AudioPipelineOutput(audios=audio)
diffusers/src/diffusers/pipelines/audioldm2/pipeline_audioldm2.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/audioldm2/pipeline_audioldm2.py", "repo_id": "diffusers", "token_count": 24403 }
171
from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {"pipeline_ddim": ["DDIMPipeline"]} if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pipeline_ddim import DDIMPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, )
diffusers/src/diffusers/pipelines/ddim/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/ddim/__init__.py", "repo_id": "diffusers", "token_count": 180 }
172
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_librosa_available, is_note_seq_available, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_pt_objects _dummy_objects.update(get_objects_from_module(dummy_pt_objects)) else: _import_structure["latent_diffusion_uncond"] = ["LDMPipeline"] _import_structure["pndm"] = ["PNDMPipeline"] _import_structure["repaint"] = ["RePaintPipeline"] _import_structure["score_sde_ve"] = ["ScoreSdeVePipeline"] _import_structure["stochastic_karras_ve"] = ["KarrasVePipeline"] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["alt_diffusion"] = [ "AltDiffusionImg2ImgPipeline", "AltDiffusionPipeline", "AltDiffusionPipelineOutput", ] _import_structure["versatile_diffusion"] = [ "VersatileDiffusionDualGuidedPipeline", "VersatileDiffusionImageVariationPipeline", "VersatileDiffusionPipeline", "VersatileDiffusionTextToImagePipeline", ] _import_structure["vq_diffusion"] = ["VQDiffusionPipeline"] _import_structure["stable_diffusion_variants"] = [ "CycleDiffusionPipeline", "StableDiffusionInpaintPipelineLegacy", "StableDiffusionPix2PixZeroPipeline", "StableDiffusionParadigmsPipeline", "StableDiffusionModelEditingPipeline", ] try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_librosa_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_librosa_objects)) else: _import_structure["audio_diffusion"] = ["AudioDiffusionPipeline", "Mel"] try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_transformers_and_torch_and_note_seq_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_transformers_and_torch_and_note_seq_objects)) else: _import_structure["spectrogram_diffusion"] = ["MidiProcessor", "SpectrogramDiffusionPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_pt_objects import * else: from .latent_diffusion_uncond import LDMPipeline from .pndm import PNDMPipeline from .repaint import RePaintPipeline from .score_sde_ve import ScoreSdeVePipeline from .stochastic_karras_ve import KarrasVePipeline try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .alt_diffusion import AltDiffusionImg2ImgPipeline, AltDiffusionPipeline, AltDiffusionPipelineOutput from .audio_diffusion import AudioDiffusionPipeline, Mel from .spectrogram_diffusion import SpectrogramDiffusionPipeline from .stable_diffusion_variants import ( CycleDiffusionPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionModelEditingPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPix2PixZeroPipeline, ) from .stochastic_karras_ve import KarrasVePipeline from .versatile_diffusion import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) from .vq_diffusion import VQDiffusionPipeline try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_librosa_objects import * else: from .audio_diffusion import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .spectrogram_diffusion import ( MidiProcessor, SpectrogramDiffusionPipeline, ) else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/deprecated/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/__init__.py", "repo_id": "diffusers", "token_count": 2227 }
173
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Tuple, Union import torch from ....models import UNet2DModel from ....schedulers import ScoreSdeVeScheduler from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput class ScoreSdeVePipeline(DiffusionPipeline): r""" Pipeline for unconditional image generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: unet ([`UNet2DModel`]): A `UNet2DModel` to denoise the encoded image. scheduler ([`ScoreSdeVeScheduler`]): A `ScoreSdeVeScheduler` to be used in combination with `unet` to denoise the encoded image. """ unet: UNet2DModel scheduler: ScoreSdeVeScheduler def __init__(self, unet: UNet2DModel, scheduler: ScoreSdeVeScheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, batch_size: int = 1, num_inference_steps: int = 2000, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, output_type: Optional[str] = "pil", return_dict: bool = True, **kwargs, ) -> Union[ImagePipelineOutput, Tuple]: r""" The call function to the pipeline for generation. Args: batch_size (`int`, *optional*, defaults to 1): The number of images to generate. generator (`torch.Generator`, `optional`): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. output_type (`str`, `optional`, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple. Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ img_size = self.unet.config.sample_size shape = (batch_size, 3, img_size, img_size) model = self.unet sample = randn_tensor(shape, generator=generator) * self.scheduler.init_noise_sigma sample = sample.to(self.device) self.scheduler.set_timesteps(num_inference_steps) self.scheduler.set_sigmas(num_inference_steps) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): sigma_t = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device) # correction step for _ in range(self.scheduler.config.correct_steps): model_output = self.unet(sample, sigma_t).sample sample = self.scheduler.step_correct(model_output, sample, generator=generator).prev_sample # prediction step model_output = model(sample, sigma_t).sample output = self.scheduler.step_pred(model_output, t, sample, generator=generator) sample, sample_mean = output.prev_sample, output.prev_sample_mean sample = sample_mean.clamp(0, 1) sample = sample.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": sample = self.numpy_to_pil(sample) if not return_dict: return (sample,) return ImagePipelineOutput(images=sample)
diffusers/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py", "repo_id": "diffusers", "token_count": 1759 }
174
from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from diffusers.utils import deprecate from ....configuration_utils import ConfigMixin, register_to_config from ....models import ModelMixin from ....models.activations import get_activation from ....models.attention_processor import ( ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, Attention, AttentionProcessor, AttnAddedKVProcessor, AttnAddedKVProcessor2_0, AttnProcessor, ) from ....models.embeddings import ( GaussianFourierProjection, ImageHintTimeEmbedding, ImageProjection, ImageTimeEmbedding, TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps, ) from ....models.resnet import ResnetBlockCondNorm2D from ....models.transformers.dual_transformer_2d import DualTransformer2DModel from ....models.transformers.transformer_2d import Transformer2DModel from ....models.unets.unet_2d_condition import UNet2DConditionOutput from ....utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers from ....utils.torch_utils import apply_freeu logger = logging.get_logger(__name__) # pylint: disable=invalid-name def get_down_block( down_block_type, num_layers, in_channels, out_channels, temb_channels, add_downsample, resnet_eps, resnet_act_fn, num_attention_heads, transformer_layers_per_block, attention_type, attention_head_dim, resnet_groups=None, cross_attention_dim=None, downsample_padding=None, dual_cross_attention=False, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, resnet_time_scale_shift="default", resnet_skip_time_act=False, resnet_out_scale_factor=1.0, cross_attention_norm=None, dropout=0.0, ): down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type if down_block_type == "DownBlockFlat": return DownBlockFlat( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, dropout=dropout, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "CrossAttnDownBlockFlat": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlockFlat") return CrossAttnDownBlockFlat( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, dropout=dropout, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, resnet_time_scale_shift=resnet_time_scale_shift, ) raise ValueError(f"{down_block_type} is not supported.") def get_up_block( up_block_type, num_layers, in_channels, out_channels, prev_output_channel, temb_channels, add_upsample, resnet_eps, resnet_act_fn, num_attention_heads, transformer_layers_per_block, resolution_idx, attention_type, attention_head_dim, resnet_groups=None, cross_attention_dim=None, dual_cross_attention=False, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, resnet_time_scale_shift="default", resnet_skip_time_act=False, resnet_out_scale_factor=1.0, cross_attention_norm=None, dropout=0.0, ): up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type if up_block_type == "UpBlockFlat": return UpBlockFlat( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, ) elif up_block_type == "CrossAttnUpBlockFlat": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlockFlat") return CrossAttnUpBlockFlat( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, resnet_time_scale_shift=resnet_time_scale_shift, ) raise ValueError(f"{up_block_type} is not supported.") class FourierEmbedder(nn.Module): def __init__(self, num_freqs=64, temperature=100): super().__init__() self.num_freqs = num_freqs self.temperature = temperature freq_bands = temperature ** (torch.arange(num_freqs) / num_freqs) freq_bands = freq_bands[None, None, None] self.register_buffer("freq_bands", freq_bands, persistent=False) def __call__(self, x): x = self.freq_bands * x.unsqueeze(-1) return torch.stack((x.sin(), x.cos()), dim=-1).permute(0, 1, 3, 4, 2).reshape(*x.shape[:2], -1) class GLIGENTextBoundingboxProjection(nn.Module): def __init__(self, positive_len, out_dim, feature_type, fourier_freqs=8): super().__init__() self.positive_len = positive_len self.out_dim = out_dim self.fourier_embedder = FourierEmbedder(num_freqs=fourier_freqs) self.position_dim = fourier_freqs * 2 * 4 # 2: sin/cos, 4: xyxy if isinstance(out_dim, tuple): out_dim = out_dim[0] if feature_type == "text-only": self.linears = nn.Sequential( nn.Linear(self.positive_len + self.position_dim, 512), nn.SiLU(), nn.Linear(512, 512), nn.SiLU(), nn.Linear(512, out_dim), ) self.null_positive_feature = torch.nn.Parameter(torch.zeros([self.positive_len])) elif feature_type == "text-image": self.linears_text = nn.Sequential( nn.Linear(self.positive_len + self.position_dim, 512), nn.SiLU(), nn.Linear(512, 512), nn.SiLU(), nn.Linear(512, out_dim), ) self.linears_image = nn.Sequential( nn.Linear(self.positive_len + self.position_dim, 512), nn.SiLU(), nn.Linear(512, 512), nn.SiLU(), nn.Linear(512, out_dim), ) self.null_text_feature = torch.nn.Parameter(torch.zeros([self.positive_len])) self.null_image_feature = torch.nn.Parameter(torch.zeros([self.positive_len])) self.null_position_feature = torch.nn.Parameter(torch.zeros([self.position_dim])) def forward( self, boxes, masks, positive_embeddings=None, phrases_masks=None, image_masks=None, phrases_embeddings=None, image_embeddings=None, ): masks = masks.unsqueeze(-1) xyxy_embedding = self.fourier_embedder(boxes) xyxy_null = self.null_position_feature.view(1, 1, -1) xyxy_embedding = xyxy_embedding * masks + (1 - masks) * xyxy_null if positive_embeddings: positive_null = self.null_positive_feature.view(1, 1, -1) positive_embeddings = positive_embeddings * masks + (1 - masks) * positive_null objs = self.linears(torch.cat([positive_embeddings, xyxy_embedding], dim=-1)) else: phrases_masks = phrases_masks.unsqueeze(-1) image_masks = image_masks.unsqueeze(-1) text_null = self.null_text_feature.view(1, 1, -1) image_null = self.null_image_feature.view(1, 1, -1) phrases_embeddings = phrases_embeddings * phrases_masks + (1 - phrases_masks) * text_null image_embeddings = image_embeddings * image_masks + (1 - image_masks) * image_null objs_text = self.linears_text(torch.cat([phrases_embeddings, xyxy_embedding], dim=-1)) objs_image = self.linears_image(torch.cat([image_embeddings, xyxy_embedding], dim=-1)) objs = torch.cat([objs_text, objs_image], dim=1) return objs class UNetFlatConditionModel(ModelMixin, ConfigMixin): r""" A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample shaped output. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): Height and width of input/output sample. in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): Number of channels in the output. center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample. flip_sin_to_cos (`bool`, *optional*, defaults to `False`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlockFlat", "CrossAttnDownBlockFlat", "CrossAttnDownBlockFlat", "DownBlockFlat")`): The tuple of downsample blocks to use. mid_block_type (`str`, *optional*, defaults to `"UNetMidBlockFlatCrossAttn"`): Block type for middle of UNet, it can be one of `UNetMidBlockFlatCrossAttn`, `UNetMidBlockFlat`, or `UNetMidBlockFlatSimpleCrossAttn`. If `None`, the mid block layer is skipped. up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlockFlat", "CrossAttnUpBlockFlat", "CrossAttnUpBlockFlat", "CrossAttnUpBlockFlat")`): The tuple of upsample blocks to use. only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`): Whether to include self-attention in the basic transformer blocks, see [`~models.attention.BasicTransformerBlock`]. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. If `None`, normalization and activation layers is skipped in post-processing. norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280): The dimension of the cross attention features. transformer_layers_per_block (`int`, `Tuple[int]`, or `Tuple[Tuple]` , *optional*, defaults to 1): The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for [`~models.unet_2d_blocks.CrossAttnDownBlockFlat`], [`~models.unet_2d_blocks.CrossAttnUpBlockFlat`], [`~models.unet_2d_blocks.UNetMidBlockFlatCrossAttn`]. reverse_transformer_layers_per_block : (`Tuple[Tuple]`, *optional*, defaults to None): The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`], in the upsampling blocks of the U-Net. Only relevant if `transformer_layers_per_block` is of type `Tuple[Tuple]` and for [`~models.unet_2d_blocks.CrossAttnDownBlockFlat`], [`~models.unet_2d_blocks.CrossAttnUpBlockFlat`], [`~models.unet_2d_blocks.UNetMidBlockFlatCrossAttn`]. encoder_hid_dim (`int`, *optional*, defaults to None): If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim` dimension to `cross_attention_dim`. encoder_hid_dim_type (`str`, *optional*, defaults to `None`): If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`. attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. num_attention_heads (`int`, *optional*): The number of attention heads. If not defined, defaults to `attention_head_dim` resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config for ResNet blocks (see [`~models.resnet.ResnetBlockFlat`]). Choose from `default` or `scale_shift`. class_embed_type (`str`, *optional*, defaults to `None`): The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`, `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. addition_embed_type (`str`, *optional*, defaults to `None`): Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or "text". "text" will use the `TextTimeEmbedding` layer. addition_time_embed_dim: (`int`, *optional*, defaults to `None`): Dimension for the timestep embeddings. num_class_embeds (`int`, *optional*, defaults to `None`): Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing class conditioning with `class_embed_type` equal to `None`. time_embedding_type (`str`, *optional*, defaults to `positional`): The type of position embedding to use for timesteps. Choose from `positional` or `fourier`. time_embedding_dim (`int`, *optional*, defaults to `None`): An optional override for the dimension of the projected time embedding. time_embedding_act_fn (`str`, *optional*, defaults to `None`): Optional activation function to use only once on the time embeddings before they are passed to the rest of the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`. timestep_post_act (`str`, *optional*, defaults to `None`): The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`. time_cond_proj_dim (`int`, *optional*, defaults to `None`): The dimension of `cond_proj` layer in the timestep embedding. conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer. conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer. projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when `class_embed_type="projection"`. class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time embeddings with the class embeddings. mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`): Whether to use cross attention with the mid block when using the `UNetMidBlockFlatSimpleCrossAttn`. If `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the `only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False` otherwise. """ _supports_gradient_checkpointing = True _no_split_modules = ["BasicTransformerBlock", "ResnetBlockFlat", "CrossAttnUpBlockFlat"] @register_to_config def __init__( self, sample_size: Optional[int] = None, in_channels: int = 4, out_channels: int = 4, center_input_sample: bool = False, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str] = ( "CrossAttnDownBlockFlat", "CrossAttnDownBlockFlat", "CrossAttnDownBlockFlat", "DownBlockFlat", ), mid_block_type: Optional[str] = "UNetMidBlockFlatCrossAttn", up_block_types: Tuple[str] = ( "UpBlockFlat", "CrossAttnUpBlockFlat", "CrossAttnUpBlockFlat", "CrossAttnUpBlockFlat", ), only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int] = (320, 640, 1280, 1280), layers_per_block: Union[int, Tuple[int]] = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, dropout: float = 0.0, act_fn: str = "silu", norm_num_groups: Optional[int] = 32, norm_eps: float = 1e-5, cross_attention_dim: Union[int, Tuple[int]] = 1280, transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1, reverse_transformer_layers_per_block: Optional[Tuple[Tuple[int]]] = None, encoder_hid_dim: Optional[int] = None, encoder_hid_dim_type: Optional[str] = None, attention_head_dim: Union[int, Tuple[int]] = 8, num_attention_heads: Optional[Union[int, Tuple[int]]] = None, dual_cross_attention: bool = False, use_linear_projection: bool = False, class_embed_type: Optional[str] = None, addition_embed_type: Optional[str] = None, addition_time_embed_dim: Optional[int] = None, num_class_embeds: Optional[int] = None, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", resnet_skip_time_act: bool = False, resnet_out_scale_factor: int = 1.0, time_embedding_type: str = "positional", time_embedding_dim: Optional[int] = None, time_embedding_act_fn: Optional[str] = None, timestep_post_act: Optional[str] = None, time_cond_proj_dim: Optional[int] = None, conv_in_kernel: int = 3, conv_out_kernel: int = 3, projection_class_embeddings_input_dim: Optional[int] = None, attention_type: str = "default", class_embeddings_concat: bool = False, mid_block_only_cross_attention: Optional[bool] = None, cross_attention_norm: Optional[str] = None, addition_embed_type_num_heads=64, ): super().__init__() self.sample_size = sample_size if num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = num_attention_heads or attention_head_dim # Check inputs if len(down_block_types) != len(up_block_types): raise ValueError( f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." ) if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): raise ValueError( f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." ) if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." ) if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): raise ValueError( f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." ) if isinstance(transformer_layers_per_block, list) and reverse_transformer_layers_per_block is None: for layer_number_per_block in transformer_layers_per_block: if isinstance(layer_number_per_block, list): raise ValueError("Must provide 'reverse_transformer_layers_per_block` if using asymmetrical UNet.") # input conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = LinearMultiDim( in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding ) # time if time_embedding_type == "fourier": time_embed_dim = time_embedding_dim or block_out_channels[0] * 2 if time_embed_dim % 2 != 0: raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.") self.time_proj = GaussianFourierProjection( time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos ) timestep_input_dim = time_embed_dim elif time_embedding_type == "positional": time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] else: raise ValueError( f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`." ) self.time_embedding = TimestepEmbedding( timestep_input_dim, time_embed_dim, act_fn=act_fn, post_act_fn=timestep_post_act, cond_proj_dim=time_cond_proj_dim, ) if encoder_hid_dim_type is None and encoder_hid_dim is not None: encoder_hid_dim_type = "text_proj" self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") if encoder_hid_dim is None and encoder_hid_dim_type is not None: raise ValueError( f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}." ) if encoder_hid_dim_type == "text_proj": self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) elif encoder_hid_dim_type == "text_image_proj": # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image_proj"` (Kandinsky 2.1)` self.encoder_hid_proj = TextImageProjection( text_embed_dim=encoder_hid_dim, image_embed_dim=cross_attention_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type == "image_proj": # Kandinsky 2.2 self.encoder_hid_proj = ImageProjection( image_embed_dim=encoder_hid_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type is not None: raise ValueError( f"`encoder_hid_dim_type`: {encoder_hid_dim_type} must be None, 'text_proj', 'text_image_proj' or 'image_proj'." ) else: self.encoder_hid_proj = None # class embedding if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == "timestep": self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) elif class_embed_type == "identity": self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) elif class_embed_type == "projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" ) # The projection `class_embed_type` is the same as the timestep `class_embed_type` except # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings # 2. it projects from an arbitrary input dimension. # # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. # As a result, `TimestepEmbedding` can be passed arbitrary vectors. self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif class_embed_type == "simple_projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set" ) self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) else: self.class_embedding = None if addition_embed_type == "text": if encoder_hid_dim is not None: text_time_embedding_from_dim = encoder_hid_dim else: text_time_embedding_from_dim = cross_attention_dim self.add_embedding = TextTimeEmbedding( text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads ) elif addition_embed_type == "text_image": # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image"` (Kandinsky 2.1)` self.add_embedding = TextImageTimeEmbedding( text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim ) elif addition_embed_type == "text_time": self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif addition_embed_type == "image": # Kandinsky 2.2 self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type == "image_hint": # Kandinsky 2.2 ControlNet self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type is not None: raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") if time_embedding_act_fn is None: self.time_embed_act = None else: self.time_embed_act = get_activation(time_embedding_act_fn) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): if mid_block_only_cross_attention is None: mid_block_only_cross_attention = only_cross_attention only_cross_attention = [only_cross_attention] * len(down_block_types) if mid_block_only_cross_attention is None: mid_block_only_cross_attention = False if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if class_embeddings_concat: # The time embeddings are concatenated with the class embeddings. The dimension of the # time embeddings passed to the down, middle, and up blocks is twice the dimension of the # regular time embeddings blocks_time_embed_dim = time_embed_dim * 2 else: blocks_time_embed_dim = time_embed_dim # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=blocks_time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim[i], num_attention_heads=num_attention_heads[i], downsample_padding=downsample_padding, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, attention_type=attention_type, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, dropout=dropout, ) self.down_blocks.append(down_block) # mid if mid_block_type == "UNetMidBlockFlatCrossAttn": self.mid_block = UNetMidBlockFlatCrossAttn( transformer_layers_per_block=transformer_layers_per_block[-1], in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, dropout=dropout, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim[-1], num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, attention_type=attention_type, ) elif mid_block_type == "UNetMidBlockFlatSimpleCrossAttn": self.mid_block = UNetMidBlockFlatSimpleCrossAttn( in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, dropout=dropout, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, cross_attention_dim=cross_attention_dim[-1], attention_head_dim=attention_head_dim[-1], resnet_groups=norm_num_groups, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, only_cross_attention=mid_block_only_cross_attention, cross_attention_norm=cross_attention_norm, ) elif mid_block_type == "UNetMidBlockFlat": self.mid_block = UNetMidBlockFlat( in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, dropout=dropout, num_layers=0, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_groups=norm_num_groups, resnet_time_scale_shift=resnet_time_scale_shift, add_attention=False, ) elif mid_block_type is None: self.mid_block = None else: raise ValueError(f"unknown mid_block_type : {mid_block_type}") # count how many layers upsample the images self.num_upsamplers = 0 # up reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) reversed_layers_per_block = list(reversed(layers_per_block)) reversed_cross_attention_dim = list(reversed(cross_attention_dim)) reversed_transformer_layers_per_block = ( list(reversed(transformer_layers_per_block)) if reverse_transformer_layers_per_block is None else reverse_transformer_layers_per_block ) only_cross_attention = list(reversed(only_cross_attention)) output_channel = reversed_block_out_channels[0] for i, up_block_type in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] # add upsample block for all BUT final layer if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False up_block = get_up_block( up_block_type, num_layers=reversed_layers_per_block[i] + 1, transformer_layers_per_block=reversed_transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=blocks_time_embed_dim, add_upsample=add_upsample, resnet_eps=norm_eps, resnet_act_fn=act_fn, resolution_idx=i, resnet_groups=norm_num_groups, cross_attention_dim=reversed_cross_attention_dim[i], num_attention_heads=reversed_num_attention_heads[i], dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, attention_type=attention_type, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, dropout=dropout, ) self.up_blocks.append(up_block) prev_output_channel = output_channel # out if norm_num_groups is not None: self.conv_norm_out = nn.GroupNorm( num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps ) self.conv_act = get_activation(act_fn) else: self.conv_norm_out = None self.conv_act = None conv_out_padding = (conv_out_kernel - 1) // 2 self.conv_out = LinearMultiDim( block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding ) if attention_type in ["gated", "gated-text-image"]: positive_len = 768 if isinstance(cross_attention_dim, int): positive_len = cross_attention_dim elif isinstance(cross_attention_dim, (list, tuple)): positive_len = cross_attention_dim[0] feature_type = "text-only" if attention_type == "gated" else "text-image" self.position_net = GLIGENTextBoundingboxProjection( positive_len=positive_len, out_dim=cross_attention_dim, feature_type=feature_type ) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) def set_attention_slice(self, slice_size): r""" Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor in slices to compute attention in several steps. This is useful for saving some memory in exchange for a small decrease in speed. Args: slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. """ sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) # retrieve number of attention layers for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == "max": # make smallest slice possible slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError( f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." ) for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f"size {size} has to be smaller or equal to {dim}.") # Recursively walk through all the children. # Any children which exposes the set_attention_slice method # gets the message def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, "set_attention_slice"): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def enable_freeu(self, s1, s2, b1, b2): r"""Enables the FreeU mechanism from https://huggingface.co/papers/2309.11497. The suffixes after the scaling factors represent the stage blocks where they are being applied. Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL. Args: s1 (`float`): Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process. s2 (`float`): Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process. b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features. """ for i, upsample_block in enumerate(self.up_blocks): setattr(upsample_block, "s1", s1) setattr(upsample_block, "s2", s2) setattr(upsample_block, "b1", b1) setattr(upsample_block, "b2", b2) def disable_freeu(self): """Disables the FreeU mechanism.""" freeu_keys = {"s1", "s2", "b1", "b2"} for i, upsample_block in enumerate(self.up_blocks): for k in freeu_keys: if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None: setattr(upsample_block, k, None) def fuse_qkv_projections(self): """ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ self.original_attn_processors = None for _, attn_processor in self.attn_processors.items(): if "Added" in str(attn_processor.__class__.__name__): raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) def unfuse_qkv_projections(self): """Disables the fused QKV projection if enabled. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors) def unload_lora(self): """Unloads LoRA weights.""" deprecate( "unload_lora", "0.28.0", "Calling `unload_lora()` is deprecated and will be removed in a future version. Please install `peft` and then call `disable_adapters().", ) for module in self.modules(): if hasattr(module, "set_lora_layer"): module.set_lora_layer(None) def forward( self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, class_labels: Optional[torch.Tensor] = None, timestep_cond: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None, mid_block_additional_residual: Optional[torch.Tensor] = None, down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None, encoder_attention_mask: Optional[torch.Tensor] = None, return_dict: bool = True, ) -> Union[UNet2DConditionOutput, Tuple]: r""" The [`UNetFlatConditionModel`] forward method. Args: sample (`torch.Tensor`): The noisy input tensor with the following shape `(batch, channel, height, width)`. timestep (`torch.Tensor` or `float` or `int`): The number of timesteps to denoise an input. encoder_hidden_states (`torch.Tensor`): The encoder hidden states with shape `(batch, sequence_length, feature_dim)`. class_labels (`torch.Tensor`, *optional*, defaults to `None`): Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`): Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed through the `self.time_embedding` layer to obtain the timestep embeddings. attention_mask (`torch.Tensor`, *optional*, defaults to `None`): An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). added_cond_kwargs: (`dict`, *optional*): A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that are passed along to the UNet blocks. down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*): A tuple of tensors that if specified are added to the residuals of down unet blocks. mid_block_additional_residual: (`torch.Tensor`, *optional*): A tensor that if specified is added to the residual of the middle unet block. encoder_attention_mask (`torch.Tensor`): A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttnProcessor`]. added_cond_kwargs: (`dict`, *optional*): A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that are passed along to the UNet blocks. down_block_additional_residuals (`tuple` of `torch.Tensor`, *optional*): additional residuals to be added to UNet long skip connections from down blocks to up blocks for example from ControlNet side model(s) mid_block_additional_residual (`torch.Tensor`, *optional*): additional residual to be added to UNet mid block output, for example from ControlNet side model down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*): additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s) Returns: [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] or `tuple`: If `return_dict` is True, an [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise a `tuple` is returned where the first element is the sample tensor. """ # By default samples have to be AT least a multiple of the overall upsampling factor. # The overall upsampling factor is equal to 2 ** (# num of upsampling layers). # However, the upsampling interpolation output size can be forced to fit any upsampling size # on the fly if necessary. default_overall_up_factor = 2**self.num_upsamplers # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` forward_upsample_size = False upsample_size = None for dim in sample.shape[-2:]: if dim % default_overall_up_factor != 0: # Forward upsample size to force interpolation output size. forward_upsample_size = True break # ensure attention_mask is a bias, and give it a singleton query_tokens dimension # expects mask of shape: # [batch, key_tokens] # adds singleton query_tokens dimension: # [batch, 1, key_tokens] # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) if attention_mask is not None: # assume that mask is expressed as: # (1 = keep, 0 = discard) # convert mask into a bias that can be added to attention scores: # (keep = +0, discard = -10000.0) attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) # convert encoder_attention_mask to a bias the same way we do for attention_mask if encoder_attention_mask is not None: encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1) # 0. center input if necessary if self.config.center_input_sample: sample = 2 * sample - 1.0 # 1. time timesteps = timestep if not torch.is_tensor(timesteps): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = sample.device.type == "mps" is_npu = sample.device.type == "npu" if isinstance(timestep, float): dtype = torch.float32 if (is_mps or is_npu) else torch.float64 else: dtype = torch.int32 if (is_mps or is_npu) else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) # `Timesteps` does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=sample.dtype) emb = self.time_embedding(t_emb, timestep_cond) aug_emb = None if self.class_embedding is not None: if class_labels is None: raise ValueError("class_labels should be provided when num_class_embeds > 0") if self.config.class_embed_type == "timestep": class_labels = self.time_proj(class_labels) # `Timesteps` does not contain any weights and will always return f32 tensors # there might be better ways to encapsulate this. class_labels = class_labels.to(dtype=sample.dtype) class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype) if self.config.class_embeddings_concat: emb = torch.cat([emb, class_emb], dim=-1) else: emb = emb + class_emb if self.config.addition_embed_type == "text": aug_emb = self.add_embedding(encoder_hidden_states) elif self.config.addition_embed_type == "text_image": # Kandinsky 2.1 - style if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`" ) image_embs = added_cond_kwargs.get("image_embeds") text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states) aug_emb = self.add_embedding(text_embs, image_embs) elif self.config.addition_embed_type == "text_time": # SDXL - style if "text_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`" ) text_embeds = added_cond_kwargs.get("text_embeds") if "time_ids" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`" ) time_ids = added_cond_kwargs.get("time_ids") time_embeds = self.add_time_proj(time_ids.flatten()) time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) add_embeds = add_embeds.to(emb.dtype) aug_emb = self.add_embedding(add_embeds) elif self.config.addition_embed_type == "image": # Kandinsky 2.2 - style if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`" ) image_embs = added_cond_kwargs.get("image_embeds") aug_emb = self.add_embedding(image_embs) elif self.config.addition_embed_type == "image_hint": # Kandinsky 2.2 - style if "image_embeds" not in added_cond_kwargs or "hint" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`" ) image_embs = added_cond_kwargs.get("image_embeds") hint = added_cond_kwargs.get("hint") aug_emb, hint = self.add_embedding(image_embs, hint) sample = torch.cat([sample, hint], dim=1) emb = emb + aug_emb if aug_emb is not None else emb if self.time_embed_act is not None: emb = self.time_embed_act(emb) if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj": encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states) elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj": # Kandinsky 2.1 - style if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`" ) image_embeds = added_cond_kwargs.get("image_embeds") encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds) elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "image_proj": # Kandinsky 2.2 - style if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`" ) image_embeds = added_cond_kwargs.get("image_embeds") encoder_hidden_states = self.encoder_hid_proj(image_embeds) elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "ip_image_proj": if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'ip_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`" ) image_embeds = added_cond_kwargs.get("image_embeds") image_embeds = self.encoder_hid_proj(image_embeds) encoder_hidden_states = (encoder_hidden_states, image_embeds) # 2. pre-process sample = self.conv_in(sample) # 2.5 GLIGEN position net if cross_attention_kwargs is not None and cross_attention_kwargs.get("gligen", None) is not None: cross_attention_kwargs = cross_attention_kwargs.copy() gligen_args = cross_attention_kwargs.pop("gligen") cross_attention_kwargs["gligen"] = {"objs": self.position_net(**gligen_args)} # 3. down lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0 if USE_PEFT_BACKEND: # weight the lora layers by setting `lora_scale` for each PEFT layer scale_lora_layers(self, lora_scale) is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None # using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets is_adapter = down_intrablock_additional_residuals is not None # maintain backward compatibility for legacy usage, where # T2I-Adapter and ControlNet both use down_block_additional_residuals arg # but can only use one or the other if not is_adapter and mid_block_additional_residual is None and down_block_additional_residuals is not None: deprecate( "T2I should not use down_block_additional_residuals", "1.3.0", "Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \ and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \ for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ", standard_warn=False, ) down_intrablock_additional_residuals = down_block_additional_residuals is_adapter = True down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: # For t2i-adapter CrossAttnDownBlockFlat additional_residuals = {} if is_adapter and len(down_intrablock_additional_residuals) > 0: additional_residuals["additional_residuals"] = down_intrablock_additional_residuals.pop(0) sample, res_samples = downsample_block( hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, **additional_residuals, ) else: sample, res_samples = downsample_block(hidden_states=sample, temb=emb) if is_adapter and len(down_intrablock_additional_residuals) > 0: sample += down_intrablock_additional_residuals.pop(0) down_block_res_samples += res_samples if is_controlnet: new_down_block_res_samples = () for down_block_res_sample, down_block_additional_residual in zip( down_block_res_samples, down_block_additional_residuals ): down_block_res_sample = down_block_res_sample + down_block_additional_residual new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,) down_block_res_samples = new_down_block_res_samples # 4. mid if self.mid_block is not None: if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention: sample = self.mid_block( sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, ) else: sample = self.mid_block(sample, emb) # To support T2I-Adapter-XL if ( is_adapter and len(down_intrablock_additional_residuals) > 0 and sample.shape == down_intrablock_additional_residuals[0].shape ): sample += down_intrablock_additional_residuals.pop(0) if is_controlnet: sample = sample + mid_block_additional_residual # 5. up for i, upsample_block in enumerate(self.up_blocks): is_final_block = i == len(self.up_blocks) - 1 res_samples = down_block_res_samples[-len(upsample_block.resnets) :] down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] # if we have not reached the final block and need to forward the # upsample size, we do it here if not is_final_block and forward_upsample_size: upsample_size = down_block_res_samples[-1].shape[2:] if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, upsample_size=upsample_size, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, ) else: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, scale=lora_scale, ) # 6. post-process if self.conv_norm_out: sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) if USE_PEFT_BACKEND: # remove `lora_scale` from each PEFT layer unscale_lora_layers(self, lora_scale) if not return_dict: return (sample,) return UNet2DConditionOutput(sample=sample) class LinearMultiDim(nn.Linear): def __init__(self, in_features, out_features=None, second_dim=4, *args, **kwargs): in_features = [in_features, second_dim, 1] if isinstance(in_features, int) else list(in_features) if out_features is None: out_features = in_features out_features = [out_features, second_dim, 1] if isinstance(out_features, int) else list(out_features) self.in_features_multidim = in_features self.out_features_multidim = out_features super().__init__(np.array(in_features).prod(), np.array(out_features).prod()) def forward(self, input_tensor, *args, **kwargs): shape = input_tensor.shape n_dim = len(self.in_features_multidim) input_tensor = input_tensor.reshape(*shape[0:-n_dim], self.in_features) output_tensor = super().forward(input_tensor) output_tensor = output_tensor.view(*shape[0:-n_dim], *self.out_features_multidim) return output_tensor class ResnetBlockFlat(nn.Module): def __init__( self, *, in_channels, out_channels=None, dropout=0.0, temb_channels=512, groups=32, groups_out=None, pre_norm=True, eps=1e-6, time_embedding_norm="default", use_in_shortcut=None, second_dim=4, **kwargs, ): super().__init__() self.pre_norm = pre_norm self.pre_norm = True in_channels = [in_channels, second_dim, 1] if isinstance(in_channels, int) else list(in_channels) self.in_channels_prod = np.array(in_channels).prod() self.channels_multidim = in_channels if out_channels is not None: out_channels = [out_channels, second_dim, 1] if isinstance(out_channels, int) else list(out_channels) out_channels_prod = np.array(out_channels).prod() self.out_channels_multidim = out_channels else: out_channels_prod = self.in_channels_prod self.out_channels_multidim = self.channels_multidim self.time_embedding_norm = time_embedding_norm if groups_out is None: groups_out = groups self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=self.in_channels_prod, eps=eps, affine=True) self.conv1 = torch.nn.Conv2d(self.in_channels_prod, out_channels_prod, kernel_size=1, padding=0) if temb_channels is not None: self.time_emb_proj = torch.nn.Linear(temb_channels, out_channels_prod) else: self.time_emb_proj = None self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels_prod, eps=eps, affine=True) self.dropout = torch.nn.Dropout(dropout) self.conv2 = torch.nn.Conv2d(out_channels_prod, out_channels_prod, kernel_size=1, padding=0) self.nonlinearity = nn.SiLU() self.use_in_shortcut = ( self.in_channels_prod != out_channels_prod if use_in_shortcut is None else use_in_shortcut ) self.conv_shortcut = None if self.use_in_shortcut: self.conv_shortcut = torch.nn.Conv2d( self.in_channels_prod, out_channels_prod, kernel_size=1, stride=1, padding=0 ) def forward(self, input_tensor, temb): shape = input_tensor.shape n_dim = len(self.channels_multidim) input_tensor = input_tensor.reshape(*shape[0:-n_dim], self.in_channels_prod, 1, 1) input_tensor = input_tensor.view(-1, self.in_channels_prod, 1, 1) hidden_states = input_tensor hidden_states = self.norm1(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.conv1(hidden_states) if temb is not None: temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None] hidden_states = hidden_states + temb hidden_states = self.norm2(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.conv2(hidden_states) if self.conv_shortcut is not None: input_tensor = self.conv_shortcut(input_tensor) output_tensor = input_tensor + hidden_states output_tensor = output_tensor.view(*shape[0:-n_dim], -1) output_tensor = output_tensor.view(*shape[0:-n_dim], *self.out_channels_multidim) return output_tensor class DownBlockFlat(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, output_scale_factor: float = 1.0, add_downsample: bool = True, downsample_padding: int = 1, ): super().__init__() resnets = [] for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlockFlat( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList( [ LinearMultiDim( out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]: output_states = () for resnet in self.resnets: if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb) else: hidden_states = resnet(hidden_states, temb) output_states = output_states + (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states = output_states + (hidden_states,) return hidden_states, output_states class CrossAttnDownBlockFlat(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, transformer_layers_per_block: Union[int, Tuple[int]] = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads: int = 1, cross_attention_dim: int = 1280, output_scale_factor: float = 1.0, downsample_padding: int = 1, add_downsample: bool = True, dual_cross_attention: bool = False, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, attention_type: str = "default", ): super().__init__() resnets = [] attentions = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * num_layers for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlockFlat( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) if not dual_cross_attention: attentions.append( Transformer2DModel( num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, attention_type=attention_type, ) ) else: attentions.append( DualTransformer2DModel( num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList( [ LinearMultiDim( out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, encoder_attention_mask: Optional[torch.Tensor] = None, additional_residuals: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]: output_states = () blocks = list(zip(self.resnets, self.attentions)) for i, (resnet, attn) in enumerate(blocks): if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] else: hidden_states = resnet(hidden_states, temb) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] # apply additional residuals to the output of the last pair of resnet and attention blocks if i == len(blocks) - 1 and additional_residuals is not None: hidden_states = hidden_states + additional_residuals output_states = output_states + (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states = output_states + (hidden_states,) return hidden_states, output_states # Copied from diffusers.models.unets.unet_2d_blocks.UpBlock2D with UpBlock2D->UpBlockFlat, ResnetBlock2D->ResnetBlockFlat, Upsample2D->LinearMultiDim class UpBlockFlat(nn.Module): def __init__( self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, resolution_idx: Optional[int] = None, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, output_scale_factor: float = 1.0, add_upsample: bool = True, ): super().__init__() resnets = [] for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append( ResnetBlockFlat( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([LinearMultiDim(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward( self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor] = None, upsample_size: Optional[int] = None, *args, **kwargs, ) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) is_freeu_enabled = ( getattr(self, "s1", None) and getattr(self, "s2", None) and getattr(self, "b1", None) and getattr(self, "b2", None) ) for resnet in self.resnets: # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] # FreeU: Only operate on the first two stages if is_freeu_enabled: hidden_states, res_hidden_states = apply_freeu( self.resolution_idx, hidden_states, res_hidden_states, s1=self.s1, s2=self.s2, b1=self.b1, b2=self.b2, ) hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb) else: hidden_states = resnet(hidden_states, temb) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, upsample_size) return hidden_states # Copied from diffusers.models.unets.unet_2d_blocks.CrossAttnUpBlock2D with CrossAttnUpBlock2D->CrossAttnUpBlockFlat, ResnetBlock2D->ResnetBlockFlat, Upsample2D->LinearMultiDim class CrossAttnUpBlockFlat(nn.Module): def __init__( self, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, resolution_idx: Optional[int] = None, dropout: float = 0.0, num_layers: int = 1, transformer_layers_per_block: Union[int, Tuple[int]] = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads: int = 1, cross_attention_dim: int = 1280, output_scale_factor: float = 1.0, add_upsample: bool = True, dual_cross_attention: bool = False, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, attention_type: str = "default", ): super().__init__() resnets = [] attentions = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * num_layers for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append( ResnetBlockFlat( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) if not dual_cross_attention: attentions.append( Transformer2DModel( num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, attention_type=attention_type, ) ) else: attentions.append( DualTransformer2DModel( num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([LinearMultiDim(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward( self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, upsample_size: Optional[int] = None, attention_mask: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: if cross_attention_kwargs is not None: if cross_attention_kwargs.get("scale", None) is not None: logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") is_freeu_enabled = ( getattr(self, "s1", None) and getattr(self, "s2", None) and getattr(self, "b1", None) and getattr(self, "b2", None) ) for resnet, attn in zip(self.resnets, self.attentions): # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] # FreeU: Only operate on the first two stages if is_freeu_enabled: hidden_states, res_hidden_states = apply_freeu( self.resolution_idx, hidden_states, res_hidden_states, s1=self.s1, s2=self.s2, b1=self.b1, b2=self.b2, ) hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] else: hidden_states = resnet(hidden_states, temb) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, upsample_size) return hidden_states # Copied from diffusers.models.unets.unet_2d_blocks.UNetMidBlock2D with UNetMidBlock2D->UNetMidBlockFlat, ResnetBlock2D->ResnetBlockFlat class UNetMidBlockFlat(nn.Module): """ A 2D UNet mid-block [`UNetMidBlockFlat`] with multiple residual blocks and optional attention blocks. Args: in_channels (`int`): The number of input channels. temb_channels (`int`): The number of temporal embedding channels. dropout (`float`, *optional*, defaults to 0.0): The dropout rate. num_layers (`int`, *optional*, defaults to 1): The number of residual blocks. resnet_eps (`float`, *optional*, 1e-6 ): The epsilon value for the resnet blocks. resnet_time_scale_shift (`str`, *optional*, defaults to `default`): The type of normalization to apply to the time embeddings. This can help to improve the performance of the model on tasks with long-range temporal dependencies. resnet_act_fn (`str`, *optional*, defaults to `swish`): The activation function for the resnet blocks. resnet_groups (`int`, *optional*, defaults to 32): The number of groups to use in the group normalization layers of the resnet blocks. attn_groups (`Optional[int]`, *optional*, defaults to None): The number of groups for the attention blocks. resnet_pre_norm (`bool`, *optional*, defaults to `True`): Whether to use pre-normalization for the resnet blocks. add_attention (`bool`, *optional*, defaults to `True`): Whether to add attention blocks. attention_head_dim (`int`, *optional*, defaults to 1): Dimension of a single attention head. The number of attention heads is determined based on this value and the number of input channels. output_scale_factor (`float`, *optional*, defaults to 1.0): The output scale factor. Returns: `torch.Tensor`: The output of the last residual block, which is a tensor of shape `(batch_size, in_channels, height, width)`. """ def __init__( self, in_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", # default, spatial resnet_act_fn: str = "swish", resnet_groups: int = 32, attn_groups: Optional[int] = None, resnet_pre_norm: bool = True, add_attention: bool = True, attention_head_dim: int = 1, output_scale_factor: float = 1.0, ): super().__init__() resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) self.add_attention = add_attention if attn_groups is None: attn_groups = resnet_groups if resnet_time_scale_shift == "default" else None # there is always at least one resnet if resnet_time_scale_shift == "spatial": resnets = [ ResnetBlockCondNorm2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm="spatial", non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, ) ] else: resnets = [ ResnetBlockFlat( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ] attentions = [] if attention_head_dim is None: logger.warning( f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {in_channels}." ) attention_head_dim = in_channels for _ in range(num_layers): if self.add_attention: attentions.append( Attention( in_channels, heads=in_channels // attention_head_dim, dim_head=attention_head_dim, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=attn_groups, spatial_norm_dim=temb_channels if resnet_time_scale_shift == "spatial" else None, residual_connection=True, bias=True, upcast_softmax=True, _from_deprecated_attn_block=True, ) ) else: attentions.append(None) if resnet_time_scale_shift == "spatial": resnets.append( ResnetBlockCondNorm2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm="spatial", non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, ) ) else: resnets.append( ResnetBlockFlat( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None) -> torch.Tensor: hidden_states = self.resnets[0](hidden_states, temb) for attn, resnet in zip(self.attentions, self.resnets[1:]): if torch.is_grad_enabled() and self.gradient_checkpointing: if attn is not None: hidden_states = attn(hidden_states, temb=temb) hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb) else: if attn is not None: hidden_states = attn(hidden_states, temb=temb) hidden_states = resnet(hidden_states, temb) return hidden_states # Copied from diffusers.models.unets.unet_2d_blocks.UNetMidBlock2DCrossAttn with UNetMidBlock2DCrossAttn->UNetMidBlockFlatCrossAttn, ResnetBlock2D->ResnetBlockFlat class UNetMidBlockFlatCrossAttn(nn.Module): def __init__( self, in_channels: int, temb_channels: int, out_channels: Optional[int] = None, dropout: float = 0.0, num_layers: int = 1, transformer_layers_per_block: Union[int, Tuple[int]] = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_groups_out: Optional[int] = None, resnet_pre_norm: bool = True, num_attention_heads: int = 1, output_scale_factor: float = 1.0, cross_attention_dim: int = 1280, dual_cross_attention: bool = False, use_linear_projection: bool = False, upcast_attention: bool = False, attention_type: str = "default", ): super().__init__() out_channels = out_channels or in_channels self.in_channels = in_channels self.out_channels = out_channels self.has_cross_attention = True self.num_attention_heads = num_attention_heads resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) # support for variable transformer layers per block if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * num_layers resnet_groups_out = resnet_groups_out or resnet_groups # there is always at least one resnet resnets = [ ResnetBlockFlat( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, groups_out=resnet_groups_out, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ] attentions = [] for i in range(num_layers): if not dual_cross_attention: attentions.append( Transformer2DModel( num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups_out, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, attention_type=attention_type, ) ) else: attentions.append( DualTransformer2DModel( num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) resnets.append( ResnetBlockFlat( in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups_out, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, encoder_attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: if cross_attention_kwargs is not None: if cross_attention_kwargs.get("scale", None) is not None: logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") hidden_states = self.resnets[0](hidden_states, temb) for attn, resnet in zip(self.attentions, self.resnets[1:]): if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb) else: hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] hidden_states = resnet(hidden_states, temb) return hidden_states # Copied from diffusers.models.unets.unet_2d_blocks.UNetMidBlock2DSimpleCrossAttn with UNetMidBlock2DSimpleCrossAttn->UNetMidBlockFlatSimpleCrossAttn, ResnetBlock2D->ResnetBlockFlat class UNetMidBlockFlatSimpleCrossAttn(nn.Module): def __init__( self, in_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attention_head_dim: int = 1, output_scale_factor: float = 1.0, cross_attention_dim: int = 1280, skip_time_act: bool = False, only_cross_attention: bool = False, cross_attention_norm: Optional[str] = None, ): super().__init__() self.has_cross_attention = True self.attention_head_dim = attention_head_dim resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) self.num_heads = in_channels // self.attention_head_dim # there is always at least one resnet resnets = [ ResnetBlockFlat( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act, ) ] attentions = [] for _ in range(num_layers): processor = ( AttnAddedKVProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnAddedKVProcessor() ) attentions.append( Attention( query_dim=in_channels, cross_attention_dim=in_channels, heads=self.num_heads, dim_head=self.attention_head_dim, added_kv_proj_dim=cross_attention_dim, norm_num_groups=resnet_groups, bias=True, upcast_softmax=True, only_cross_attention=only_cross_attention, cross_attention_norm=cross_attention_norm, processor=processor, ) ) resnets.append( ResnetBlockFlat( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, encoder_attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} if cross_attention_kwargs.get("scale", None) is not None: logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") if attention_mask is None: # if encoder_hidden_states is defined: we are doing cross-attn, so we should use cross-attn mask. mask = None if encoder_hidden_states is None else encoder_attention_mask else: # when attention_mask is defined: we don't even check for encoder_attention_mask. # this is to maintain compatibility with UnCLIP, which uses 'attention_mask' param for cross-attn masks. # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask. # then we can simplify this whole if/else block to: # mask = attention_mask if encoder_hidden_states is None else encoder_attention_mask mask = attention_mask hidden_states = self.resnets[0](hidden_states, temb) for attn, resnet in zip(self.attentions, self.resnets[1:]): # attn hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=mask, **cross_attention_kwargs, ) # resnet hidden_states = resnet(hidden_states, temb) return hidden_states
diffusers/src/diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py", "repo_id": "diffusers", "token_count": 54027 }
175
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _additional_imports = {} _import_structure = {"pipeline_output": ["HiDreamImagePipelineOutput"]} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["pipeline_hidream_image"] = ["HiDreamImagePipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_hidream_image import HiDreamImagePipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) for name, value in _additional_imports.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/hidream_image/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/hidream_image/__init__.py", "repo_id": "diffusers", "token_count": 586 }
176
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, List, Optional, Union import PIL.Image import torch from transformers import ( XLMRobertaTokenizer, ) from ...image_processor import VaeImageProcessor from ...models import UNet2DConditionModel, VQModel from ...schedulers import DDIMScheduler from ...utils import ( is_torch_xla_available, logging, replace_example_docstring, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .text_encoder import MultilingualCLIP if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> from diffusers import KandinskyImg2ImgPipeline, KandinskyPriorPipeline >>> from diffusers.utils import load_image >>> import torch >>> pipe_prior = KandinskyPriorPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16 ... ) >>> pipe_prior.to("cuda") >>> prompt = "A red cartoon frog, 4k" >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) >>> pipe = KandinskyImg2ImgPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16 ... ) >>> pipe.to("cuda") >>> init_image = load_image( ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" ... "/kandinsky/frog.png" ... ) >>> image = pipe( ... prompt, ... image=init_image, ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... strength=0.2, ... ).images >>> image[0].save("red_frog.png") ``` """ def get_new_h_w(h, w, scale_factor=8): new_h = h // scale_factor**2 if h % scale_factor**2 != 0: new_h += 1 new_w = w // scale_factor**2 if w % scale_factor**2 != 0: new_w += 1 return new_h * scale_factor, new_w * scale_factor class KandinskyImg2ImgPipeline(DiffusionPipeline): """ Pipeline for image-to-image generation using Kandinsky This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: text_encoder ([`MultilingualCLIP`]): Frozen text-encoder. tokenizer ([`XLMRobertaTokenizer`]): Tokenizer of class scheduler ([`DDIMScheduler`]): A scheduler to be used in combination with `unet` to generate image latents. unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ image encoder and decoder """ model_cpu_offload_seq = "text_encoder->unet->movq" def __init__( self, text_encoder: MultilingualCLIP, movq: VQModel, tokenizer: XLMRobertaTokenizer, unet: UNet2DConditionModel, scheduler: DDIMScheduler, ): super().__init__() self.register_modules( text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, movq=movq, ) self.movq_scale_factor = ( 2 ** (len(self.movq.config.block_out_channels) - 1) if getattr(self, "movq", None) else 8 ) movq_latent_channels = self.movq.config.latent_channels if getattr(self, "movq", None) else 4 self.image_processor = VaeImageProcessor( vae_scale_factor=self.movq_scale_factor, vae_latent_channels=movq_latent_channels, resample="bicubic", reducing_gap=1, ) def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def prepare_latents(self, latents, latent_timestep, shape, dtype, device, generator, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma shape = latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = self.add_noise(latents, noise, latent_timestep) return latents def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, ): batch_size = len(prompt) if isinstance(prompt, list) else 1 # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=77, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids.to(device) text_mask = text_inputs.attention_mask.to(device) prompt_embeds, text_encoder_hidden_states = self.text_encoder( input_ids=text_input_ids, attention_mask=text_mask ) prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=77, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors="pt", ) uncond_text_input_ids = uncond_input.input_ids.to(device) uncond_text_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds, uncond_text_encoder_hidden_states = self.text_encoder( input_ids=uncond_text_input_ids, attention_mask=uncond_text_mask ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_encoder_hidden_states.shape[1] uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt, seq_len, -1 ) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return prompt_embeds, text_encoder_hidden_states, text_mask # add_noise method to overwrite the one in schedule because it use a different beta schedule for adding noise vs sampling def add_noise( self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor, ) -> torch.Tensor: betas = torch.linspace(0.0001, 0.02, 1000, dtype=torch.float32) alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_cumprod = alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(original_samples.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]], image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], image_embeds: torch.Tensor, negative_image_embeds: torch.Tensor, negative_prompt: Optional[Union[str, List[str]]] = None, height: int = 512, width: int = 512, num_inference_steps: int = 100, strength: float = 0.3, guidance_scale: float = 7.0, num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, output_type: Optional[str] = "pil", callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, return_dict: bool = True, ): """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. image (`torch.Tensor`, `PIL.Image.Image`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. image_embeds (`torch.Tensor` or `List[torch.Tensor]`): The clip image embeddings for text prompt, that will be used to condition the image generation. negative_image_embeds (`torch.Tensor` or `List[torch.Tensor]`): The clip image embeddings for negative text prompt, will be used to condition the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. strength (`float`, *optional*, defaults to 0.3): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ # 1. Define call parameters if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") device = self._execution_device batch_size = batch_size * num_images_per_prompt do_classifier_free_guidance = guidance_scale > 1.0 # 2. get text and image embeddings prompt_embeds, text_encoder_hidden_states, _ = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if isinstance(image_embeds, list): image_embeds = torch.cat(image_embeds, dim=0) if isinstance(negative_image_embeds, list): negative_image_embeds = torch.cat(negative_image_embeds, dim=0) if do_classifier_free_guidance: image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to( dtype=prompt_embeds.dtype, device=device ) # 3. pre-processing initial image if not isinstance(image, list): image = [image] if not all(isinstance(i, (PIL.Image.Image, torch.Tensor)) for i in image): raise ValueError( f"Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support PIL image and pytorch tensor" ) image = torch.cat([self.image_processor.preprocess(i, width, height) for i in image], dim=0) image = image.to(dtype=prompt_embeds.dtype, device=device) latents = self.movq.encode(image)["latents"] latents = latents.repeat_interleave(num_images_per_prompt, dim=0) # 4. set timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps_tensor, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) # the formular to calculate timestep for add_noise is taken from the original kandinsky repo latent_timestep = int(self.scheduler.config.num_train_timesteps * strength) - 2 latent_timestep = torch.tensor([latent_timestep] * batch_size, dtype=timesteps_tensor.dtype, device=device) num_channels_latents = self.unet.config.in_channels height, width = get_new_h_w(height, width, self.movq_scale_factor) # 5. Create initial latent latents = self.prepare_latents( latents, latent_timestep, (batch_size, num_channels_latents, height, width), text_encoder_hidden_states.dtype, device, generator, self.scheduler, ) # 6. Denoising loop for i, t in enumerate(self.progress_bar(timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents added_cond_kwargs = {"text_embeds": prompt_embeds, "image_embeds": image_embeds} noise_pred = self.unet( sample=latent_model_input, timestep=t, encoder_hidden_states=text_encoder_hidden_states, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] if do_classifier_free_guidance: noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1) noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) _, variance_pred_text = variance_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) if not ( hasattr(self.scheduler.config, "variance_type") and self.scheduler.config.variance_type in ["learned", "learned_range"] ): noise_pred, _ = noise_pred.split(latents.shape[1], dim=1) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step( noise_pred, t, latents, generator=generator, ).prev_sample if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() # 7. post-processing image = self.movq.decode(latents, force_not_quantize=True)["sample"] self.maybe_free_model_hooks() if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}") image = self.image_processor.postprocess(image, output_type) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py", "repo_id": "diffusers", "token_count": 9822 }
177
import inspect import math from itertools import repeat from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch import torch.nn.functional as F from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from ...configuration_utils import FrozenDict from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...models.attention_processor import Attention, AttnProcessor from ...models.lora import adjust_lora_scale_text_encoder from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from ...schedulers import DDIMScheduler, DPMSolverMultistepScheduler from ...utils import ( USE_PEFT_BACKEND, deprecate, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import LEditsPPDiffusionPipelineOutput, LEditsPPInversionPipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import LEditsPPPipelineStableDiffusion >>> from diffusers.utils import load_image >>> pipe = LEditsPPPipelineStableDiffusion.from_pretrained( ... "runwayml/stable-diffusion-v1-5", variant="fp16", torch_dtype=torch.float16 ... ) >>> pipe.enable_vae_tiling() >>> pipe = pipe.to("cuda") >>> img_url = "https://www.aiml.informatik.tu-darmstadt.de/people/mbrack/cherry_blossom.png" >>> image = load_image(img_url).resize((512, 512)) >>> _ = pipe.invert(image=image, num_inversion_steps=50, skip=0.1) >>> edited_image = pipe( ... editing_prompt=["cherry blossom"], edit_guidance_scale=10.0, edit_threshold=0.75 ... ).images[0] ``` """ # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionAttendAndExcitePipeline.AttentionStore class LeditsAttentionStore: @staticmethod def get_empty_store(): return {"down_cross": [], "mid_cross": [], "up_cross": [], "down_self": [], "mid_self": [], "up_self": []} def __call__(self, attn, is_cross: bool, place_in_unet: str, editing_prompts, PnP=False): # attn.shape = batch_size * head_size, seq_len query, seq_len_key if attn.shape[1] <= self.max_size: bs = 1 + int(PnP) + editing_prompts skip = 2 if PnP else 1 # skip PnP & unconditional attn = torch.stack(attn.split(self.batch_size)).permute(1, 0, 2, 3) source_batch_size = int(attn.shape[1] // bs) self.forward(attn[:, skip * source_batch_size :], is_cross, place_in_unet) def forward(self, attn, is_cross: bool, place_in_unet: str): key = f"{place_in_unet}_{'cross' if is_cross else 'self'}" self.step_store[key].append(attn) def between_steps(self, store_step=True): if store_step: if self.average: if len(self.attention_store) == 0: self.attention_store = self.step_store else: for key in self.attention_store: for i in range(len(self.attention_store[key])): self.attention_store[key][i] += self.step_store[key][i] else: if len(self.attention_store) == 0: self.attention_store = [self.step_store] else: self.attention_store.append(self.step_store) self.cur_step += 1 self.step_store = self.get_empty_store() def get_attention(self, step: int): if self.average: attention = { key: [item / self.cur_step for item in self.attention_store[key]] for key in self.attention_store } else: assert step is not None attention = self.attention_store[step] return attention def aggregate_attention( self, attention_maps, prompts, res: Union[int, Tuple[int]], from_where: List[str], is_cross: bool, select: int ): out = [[] for x in range(self.batch_size)] if isinstance(res, int): num_pixels = res**2 resolution = (res, res) else: num_pixels = res[0] * res[1] resolution = res[:2] for location in from_where: for bs_item in attention_maps[f"{location}_{'cross' if is_cross else 'self'}"]: for batch, item in enumerate(bs_item): if item.shape[1] == num_pixels: cross_maps = item.reshape(len(prompts), -1, *resolution, item.shape[-1])[select] out[batch].append(cross_maps) out = torch.stack([torch.cat(x, dim=0) for x in out]) # average over heads out = out.sum(1) / out.shape[1] return out def __init__(self, average: bool, batch_size=1, max_resolution=16, max_size: int = None): self.step_store = self.get_empty_store() self.attention_store = [] self.cur_step = 0 self.average = average self.batch_size = batch_size if max_size is None: self.max_size = max_resolution**2 elif max_size is not None and max_resolution is None: self.max_size = max_size else: raise ValueError("Only allowed to set one of max_resolution or max_size") # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionAttendAndExcitePipeline.GaussianSmoothing class LeditsGaussianSmoothing: def __init__(self, device): kernel_size = [3, 3] sigma = [0.5, 0.5] # The gaussian kernel is the product of the gaussian function of each dimension. kernel = 1 meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size], indexing="ij") for size, std, mgrid in zip(kernel_size, sigma, meshgrids): mean = (size - 1) / 2 kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-(((mgrid - mean) / (2 * std)) ** 2)) # Make sure sum of values in gaussian kernel equals 1. kernel = kernel / torch.sum(kernel) # Reshape to depthwise convolutional weight kernel = kernel.view(1, 1, *kernel.size()) kernel = kernel.repeat(1, *[1] * (kernel.dim() - 1)) self.weight = kernel.to(device) def __call__(self, input): """ Arguments: Apply gaussian filter to input. input (torch.Tensor): Input to apply gaussian filter on. Returns: filtered (torch.Tensor): Filtered output. """ return F.conv2d(input, weight=self.weight.to(input.dtype)) class LEDITSCrossAttnProcessor: def __init__(self, attention_store, place_in_unet, pnp, editing_prompts): self.attnstore = attention_store self.place_in_unet = place_in_unet self.editing_prompts = editing_prompts self.pnp = pnp def __call__( self, attn: Attention, hidden_states, encoder_hidden_states, attention_mask=None, temb=None, ): batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) self.attnstore( attention_probs, is_cross=True, place_in_unet=self.place_in_unet, editing_prompts=self.editing_prompts, PnP=self.pnp, ) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) hidden_states = hidden_states / attn.rescale_output_factor return hidden_states # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): r""" Rescales `noise_cfg` tensor based on `guidance_rescale` to improve image quality and fix overexposure. Based on Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). Args: noise_cfg (`torch.Tensor`): The predicted noise tensor for the guided diffusion process. noise_pred_text (`torch.Tensor`): The predicted noise tensor for the text-guided diffusion process. guidance_rescale (`float`, *optional*, defaults to 0.0): A rescale factor applied to the noise predictions. Returns: noise_cfg (`torch.Tensor`): The rescaled noise prediction tensor. """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg class LEditsPPPipelineStableDiffusion( DiffusionPipeline, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin ): """ Pipeline for textual image editing using LEDits++ with Stable Diffusion. This model inherits from [`DiffusionPipeline`] and builds on the [`StableDiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer ([`~transformers.CLIPTokenizer`]): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`DPMSolverMultistepScheduler`] or [`DDIMScheduler`]): A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of [`DPMSolverMultistepScheduler`] or [`DDIMScheduler`]. If any other scheduler is passed it will automatically be set to [`DPMSolverMultistepScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details. feature_extractor ([`~transformers.CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ model_cpu_offload_seq = "text_encoder->unet->vae" _exclude_from_cpu_offload = ["safety_checker"] _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, DPMSolverMultistepScheduler], safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() if not isinstance(scheduler, DDIMScheduler) and not isinstance(scheduler, DPMSolverMultistepScheduler): scheduler = DPMSolverMultistepScheduler.from_config( scheduler.config, algorithm_type="sde-dpmsolver++", solver_order=2 ) logger.warning( "This pipeline only supports DDIMScheduler and DPMSolverMultistepScheduler. " "The scheduler has been changed to DPMSolverMultistepScheduler." ) if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if scheduler is not None and getattr(scheduler.config, "clip_sample", False) is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) is_unet_version_less_0_9_0 = ( unet is not None and hasattr(unet.config, "_diffusers_version") and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse("0.9.0.dev0") ) is_unet_sample_size_less_64 = ( unet is not None and hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 ) if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) self.inversion_steps = None # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, eta, generator=None): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs def check_inputs( self, negative_prompt=None, editing_prompt_embeddings=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if editing_prompt_embeddings is not None and negative_prompt_embeds is not None: if editing_prompt_embeddings.shape != negative_prompt_embeds.shape: raise ValueError( "`editing_prompt_embeddings` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `editing_prompt_embeddings` {editing_prompt_embeddings.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, latents): # shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) # if latents.shape != shape: # raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_unet(self, attention_store, PnP: bool = False): attn_procs = {} for name in self.unet.attn_processors.keys(): if name.startswith("mid_block"): place_in_unet = "mid" elif name.startswith("up_blocks"): place_in_unet = "up" elif name.startswith("down_blocks"): place_in_unet = "down" else: continue if "attn2" in name and place_in_unet != "mid": attn_procs[name] = LEDITSCrossAttnProcessor( attention_store=attention_store, place_in_unet=place_in_unet, pnp=PnP, editing_prompts=self.enabled_editing_prompts, ) else: attn_procs[name] = AttnProcessor() self.unet.set_attn_processor(attn_procs) def encode_prompt( self, device, num_images_per_prompt, enable_edit_guidance, negative_prompt=None, editing_prompt=None, negative_prompt_embeds: Optional[torch.Tensor] = None, editing_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt enable_edit_guidance (`bool`): whether to perform any editing or reconstruct the input image instead negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). editing_prompt (`str` or `List[str]`, *optional*): Editing prompt(s) to be encoded. If not defined, one has to pass `editing_prompt_embeds` instead. editing_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) batch_size = self.batch_size num_edit_tokens = None if negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but exoected" f"{batch_size} based on the input images. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: procecss multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = negative_prompt_embeds.dtype negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) if enable_edit_guidance: if editing_prompt_embeds is None: # textual inversion: procecss multi-vector tokens if necessary # if isinstance(self, TextualInversionLoaderMixin): # prompt = self.maybe_convert_prompt(prompt, self.tokenizer) if isinstance(editing_prompt, str): editing_prompt = [editing_prompt] max_length = negative_prompt_embeds.shape[1] text_inputs = self.tokenizer( [x for item in editing_prompt for x in repeat(item, batch_size)], padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", return_length=True, ) num_edit_tokens = text_inputs.length - 2 # not counting startoftext and endoftext text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer( [x for item in editing_prompt for x in repeat(item, batch_size)], padding="longest", return_tensors="pt", ).input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if ( hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask ): attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: editing_prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) editing_prompt_embeds = editing_prompt_embeds[0] else: editing_prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. editing_prompt_embeds = editing_prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. editing_prompt_embeds = self.text_encoder.text_model.final_layer_norm(editing_prompt_embeds) editing_prompt_embeds = editing_prompt_embeds.to(dtype=negative_prompt_embeds.dtype, device=device) bs_embed_edit, seq_len, _ = editing_prompt_embeds.shape editing_prompt_embeds = editing_prompt_embeds.to(dtype=negative_prompt_embeds.dtype, device=device) editing_prompt_embeds = editing_prompt_embeds.repeat(1, num_images_per_prompt, 1) editing_prompt_embeds = editing_prompt_embeds.view(bs_embed_edit * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return editing_prompt_embeds, negative_prompt_embeds, num_edit_tokens @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip @property def cross_attention_kwargs(self): return self._cross_attention_kwargs def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.vae.enable_slicing() def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_slicing() def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ self.vae.enable_tiling() def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_tiling() @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, negative_prompt: Optional[Union[str, List[str]]] = None, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, output_type: Optional[str] = "pil", return_dict: bool = True, editing_prompt: Optional[Union[str, List[str]]] = None, editing_prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, reverse_editing_direction: Optional[Union[bool, List[bool]]] = False, edit_guidance_scale: Optional[Union[float, List[float]]] = 5, edit_warmup_steps: Optional[Union[int, List[int]]] = 0, edit_cooldown_steps: Optional[Union[int, List[int]]] = None, edit_threshold: Optional[Union[float, List[float]]] = 0.9, user_mask: Optional[torch.Tensor] = None, sem_guidance: Optional[List[torch.Tensor]] = None, use_cross_attn_mask: bool = False, use_intersect_mask: bool = True, attn_store_steps: Optional[List[int]] = [], store_averaged_over_steps: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" The call function to the pipeline for editing. The [`~pipelines.ledits_pp.LEditsPPPipelineStableDiffusion.invert`] method has to be called beforehand. Edits will always be performed for the last inverted image(s). Args: negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). generator (`torch.Generator`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] instead of a plain tuple. editing_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. The image is reconstructed by setting `editing_prompt = None`. Guidance direction of prompt should be specified via `reverse_editing_direction`. editing_prompt_embeds (`torch.Tensor>`, *optional*): Pre-computed embeddings to use for guiding the image generation. Guidance direction of embedding should be specified via `reverse_editing_direction`. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. reverse_editing_direction (`bool` or `List[bool]`, *optional*, defaults to `False`): Whether the corresponding prompt in `editing_prompt` should be increased or decreased. edit_guidance_scale (`float` or `List[float]`, *optional*, defaults to 5): Guidance scale for guiding the image generation. If provided as list values should correspond to `editing_prompt`. `edit_guidance_scale` is defined as `s_e` of equation 12 of [LEDITS++ Paper](https://huggingface.co/papers/2301.12247). edit_warmup_steps (`float` or `List[float]`, *optional*, defaults to 10): Number of diffusion steps (for each prompt) for which guidance will not be applied. edit_cooldown_steps (`float` or `List[float]`, *optional*, defaults to `None`): Number of diffusion steps (for each prompt) after which guidance will no longer be applied. edit_threshold (`float` or `List[float]`, *optional*, defaults to 0.9): Masking threshold of guidance. Threshold should be proportional to the image region that is modified. 'edit_threshold' is defined as 'λ' of equation 12 of [LEDITS++ Paper](https://huggingface.co/papers/2301.12247). user_mask (`torch.Tensor`, *optional*): User-provided mask for even better control over the editing process. This is helpful when LEDITS++'s implicit masks do not meet user preferences. sem_guidance (`List[torch.Tensor]`, *optional*): List of pre-generated guidance vectors to be applied at generation. Length of the list has to correspond to `num_inference_steps`. use_cross_attn_mask (`bool`, defaults to `False`): Whether cross-attention masks are used. Cross-attention masks are always used when use_intersect_mask is set to true. Cross-attention masks are defined as 'M^1' of equation 12 of [LEDITS++ paper](https://huggingface.co/papers/2311.16711). use_intersect_mask (`bool`, defaults to `True`): Whether the masking term is calculated as intersection of cross-attention masks and masks derived from the noise estimate. Cross-attention mask are defined as 'M^1' and masks derived from the noise estimate are defined as 'M^2' of equation 12 of [LEDITS++ paper](https://huggingface.co/papers/2311.16711). attn_store_steps (`List[int]`, *optional*): Steps for which the attention maps are stored in the AttentionStore. Just for visualization purposes. store_averaged_over_steps (`bool`, defaults to `True`): Whether the attention maps for the 'attn_store_steps' are stored averaged over the diffusion steps. If False, attention maps for each step are stores separately. Just for visualization purposes. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). guidance_rescale (`float`, *optional*, defaults to 0.0): Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). Guidance rescale factor should fix overexposure when using zero terminal SNR. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] or `tuple`: [`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ if self.inversion_steps is None: raise ValueError( "You need to invert an input image first before calling the pipeline. The `invert` method has to be called beforehand. Edits will always be performed for the last inverted image(s)." ) eta = self.eta num_images_per_prompt = 1 latents = self.init_latents zs = self.zs self.scheduler.set_timesteps(len(self.scheduler.timesteps)) if use_intersect_mask: use_cross_attn_mask = True if use_cross_attn_mask: self.smoothing = LeditsGaussianSmoothing(self.device) if user_mask is not None: user_mask = user_mask.to(self.device) org_prompt = "" # 1. Check inputs. Raise error if not correct self.check_inputs( negative_prompt, editing_prompt_embeds, negative_prompt_embeds, callback_on_step_end_tensor_inputs, ) self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs # 2. Define call parameters batch_size = self.batch_size if editing_prompt: enable_edit_guidance = True if isinstance(editing_prompt, str): editing_prompt = [editing_prompt] self.enabled_editing_prompts = len(editing_prompt) elif editing_prompt_embeds is not None: enable_edit_guidance = True self.enabled_editing_prompts = editing_prompt_embeds.shape[0] else: self.enabled_editing_prompts = 0 enable_edit_guidance = False # 3. Encode input prompt lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) edit_concepts, uncond_embeddings, num_edit_tokens = self.encode_prompt( editing_prompt=editing_prompt, device=self.device, num_images_per_prompt=num_images_per_prompt, enable_edit_guidance=enable_edit_guidance, negative_prompt=negative_prompt, editing_prompt_embeds=editing_prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, clip_skip=self.clip_skip, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if enable_edit_guidance: text_embeddings = torch.cat([uncond_embeddings, edit_concepts]) self.text_cross_attention_maps = [editing_prompt] if isinstance(editing_prompt, str) else editing_prompt else: text_embeddings = torch.cat([uncond_embeddings]) # 4. Prepare timesteps # self.scheduler.set_timesteps(num_inference_steps, device=self.device) timesteps = self.inversion_steps t_to_idx = {int(v): k for k, v in enumerate(timesteps[-zs.shape[0] :])} if use_cross_attn_mask: self.attention_store = LeditsAttentionStore( average=store_averaged_over_steps, batch_size=batch_size, max_size=(latents.shape[-2] / 4.0) * (latents.shape[-1] / 4.0), max_resolution=None, ) self.prepare_unet(self.attention_store, PnP=False) resolution = latents.shape[-2:] att_res = (int(resolution[0] / 4), int(resolution[1] / 4)) # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, None, None, text_embeddings.dtype, self.device, latents, ) # 6. Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(eta) self.sem_guidance = None self.activation_mask = None # 7. Denoising loop num_warmup_steps = 0 with self.progress_bar(total=len(timesteps)) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance if enable_edit_guidance: latent_model_input = torch.cat([latents] * (1 + self.enabled_editing_prompts)) else: latent_model_input = latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) text_embed_input = text_embeddings # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embed_input).sample noise_pred_out = noise_pred.chunk(1 + self.enabled_editing_prompts) # [b,4, 64, 64] noise_pred_uncond = noise_pred_out[0] noise_pred_edit_concepts = noise_pred_out[1:] noise_guidance_edit = torch.zeros( noise_pred_uncond.shape, device=self.device, dtype=noise_pred_uncond.dtype, ) if sem_guidance is not None and len(sem_guidance) > i: noise_guidance_edit += sem_guidance[i].to(self.device) elif enable_edit_guidance: if self.activation_mask is None: self.activation_mask = torch.zeros( (len(timesteps), len(noise_pred_edit_concepts), *noise_pred_edit_concepts[0].shape) ) if self.sem_guidance is None: self.sem_guidance = torch.zeros((len(timesteps), *noise_pred_uncond.shape)) for c, noise_pred_edit_concept in enumerate(noise_pred_edit_concepts): if isinstance(edit_warmup_steps, list): edit_warmup_steps_c = edit_warmup_steps[c] else: edit_warmup_steps_c = edit_warmup_steps if i < edit_warmup_steps_c: continue if isinstance(edit_guidance_scale, list): edit_guidance_scale_c = edit_guidance_scale[c] else: edit_guidance_scale_c = edit_guidance_scale if isinstance(edit_threshold, list): edit_threshold_c = edit_threshold[c] else: edit_threshold_c = edit_threshold if isinstance(reverse_editing_direction, list): reverse_editing_direction_c = reverse_editing_direction[c] else: reverse_editing_direction_c = reverse_editing_direction if isinstance(edit_cooldown_steps, list): edit_cooldown_steps_c = edit_cooldown_steps[c] elif edit_cooldown_steps is None: edit_cooldown_steps_c = i + 1 else: edit_cooldown_steps_c = edit_cooldown_steps if i >= edit_cooldown_steps_c: continue noise_guidance_edit_tmp = noise_pred_edit_concept - noise_pred_uncond if reverse_editing_direction_c: noise_guidance_edit_tmp = noise_guidance_edit_tmp * -1 noise_guidance_edit_tmp = noise_guidance_edit_tmp * edit_guidance_scale_c if user_mask is not None: noise_guidance_edit_tmp = noise_guidance_edit_tmp * user_mask if use_cross_attn_mask: out = self.attention_store.aggregate_attention( attention_maps=self.attention_store.step_store, prompts=self.text_cross_attention_maps, res=att_res, from_where=["up", "down"], is_cross=True, select=self.text_cross_attention_maps.index(editing_prompt[c]), ) attn_map = out[:, :, :, 1 : 1 + num_edit_tokens[c]] # 0 -> startoftext # average over all tokens if attn_map.shape[3] != num_edit_tokens[c]: raise ValueError( f"Incorrect shape of attention_map. Expected size {num_edit_tokens[c]}, but found {attn_map.shape[3]}!" ) attn_map = torch.sum(attn_map, dim=3) # gaussian_smoothing attn_map = F.pad(attn_map.unsqueeze(1), (1, 1, 1, 1), mode="reflect") attn_map = self.smoothing(attn_map).squeeze(1) # torch.quantile function expects float32 if attn_map.dtype == torch.float32: tmp = torch.quantile(attn_map.flatten(start_dim=1), edit_threshold_c, dim=1) else: tmp = torch.quantile( attn_map.flatten(start_dim=1).to(torch.float32), edit_threshold_c, dim=1 ).to(attn_map.dtype) attn_mask = torch.where( attn_map >= tmp.unsqueeze(1).unsqueeze(1).repeat(1, *att_res), 1.0, 0.0 ) # resolution must match latent space dimension attn_mask = F.interpolate( attn_mask.unsqueeze(1), noise_guidance_edit_tmp.shape[-2:], # 64,64 ).repeat(1, 4, 1, 1) self.activation_mask[i, c] = attn_mask.detach().cpu() if not use_intersect_mask: noise_guidance_edit_tmp = noise_guidance_edit_tmp * attn_mask if use_intersect_mask: if t <= 800: noise_guidance_edit_tmp_quantile = torch.abs(noise_guidance_edit_tmp) noise_guidance_edit_tmp_quantile = torch.sum( noise_guidance_edit_tmp_quantile, dim=1, keepdim=True ) noise_guidance_edit_tmp_quantile = noise_guidance_edit_tmp_quantile.repeat( 1, self.unet.config.in_channels, 1, 1 ) # torch.quantile function expects float32 if noise_guidance_edit_tmp_quantile.dtype == torch.float32: tmp = torch.quantile( noise_guidance_edit_tmp_quantile.flatten(start_dim=2), edit_threshold_c, dim=2, keepdim=False, ) else: tmp = torch.quantile( noise_guidance_edit_tmp_quantile.flatten(start_dim=2).to(torch.float32), edit_threshold_c, dim=2, keepdim=False, ).to(noise_guidance_edit_tmp_quantile.dtype) intersect_mask = ( torch.where( noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], torch.ones_like(noise_guidance_edit_tmp), torch.zeros_like(noise_guidance_edit_tmp), ) * attn_mask ) self.activation_mask[i, c] = intersect_mask.detach().cpu() noise_guidance_edit_tmp = noise_guidance_edit_tmp * intersect_mask else: # print(f"only attention mask for step {i}") noise_guidance_edit_tmp = noise_guidance_edit_tmp * attn_mask elif not use_cross_attn_mask: # calculate quantile noise_guidance_edit_tmp_quantile = torch.abs(noise_guidance_edit_tmp) noise_guidance_edit_tmp_quantile = torch.sum( noise_guidance_edit_tmp_quantile, dim=1, keepdim=True ) noise_guidance_edit_tmp_quantile = noise_guidance_edit_tmp_quantile.repeat(1, 4, 1, 1) # torch.quantile function expects float32 if noise_guidance_edit_tmp_quantile.dtype == torch.float32: tmp = torch.quantile( noise_guidance_edit_tmp_quantile.flatten(start_dim=2), edit_threshold_c, dim=2, keepdim=False, ) else: tmp = torch.quantile( noise_guidance_edit_tmp_quantile.flatten(start_dim=2).to(torch.float32), edit_threshold_c, dim=2, keepdim=False, ).to(noise_guidance_edit_tmp_quantile.dtype) self.activation_mask[i, c] = ( torch.where( noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], torch.ones_like(noise_guidance_edit_tmp), torch.zeros_like(noise_guidance_edit_tmp), ) .detach() .cpu() ) noise_guidance_edit_tmp = torch.where( noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], noise_guidance_edit_tmp, torch.zeros_like(noise_guidance_edit_tmp), ) noise_guidance_edit += noise_guidance_edit_tmp self.sem_guidance[i] = noise_guidance_edit.detach().cpu() noise_pred = noise_pred_uncond + noise_guidance_edit if enable_edit_guidance and self.guidance_rescale > 0.0: # Based on 3.4. in https://huggingface.co/papers/2305.08891 noise_pred = rescale_noise_cfg( noise_pred, noise_pred_edit_concepts.mean(dim=0, keepdim=False), guidance_rescale=self.guidance_rescale, ) idx = t_to_idx[int(t)] latents = self.scheduler.step( noise_pred, t, latents, variance_noise=zs[idx], **extra_step_kwargs ).prev_sample # step callback if use_cross_attn_mask: store_step = i in attn_store_steps self.attention_store.between_steps(store_step) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) # prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() # 8. Post-processing if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ 0 ] image, has_nsfw_concept = self.run_safety_checker(image, self.device, text_embeddings.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return LEditsPPDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) @torch.no_grad() def invert( self, image: PipelineImageInput, source_prompt: str = "", source_guidance_scale: float = 3.5, num_inversion_steps: int = 30, skip: float = 0.15, generator: Optional[torch.Generator] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, clip_skip: Optional[int] = None, height: Optional[int] = None, width: Optional[int] = None, resize_mode: Optional[str] = "default", crops_coords: Optional[Tuple[int, int, int, int]] = None, ): r""" The function to the pipeline for image inversion as described by the [LEDITS++ Paper](https://huggingface.co/papers/2301.12247). If the scheduler is set to [`~schedulers.DDIMScheduler`] the inversion proposed by [edit-friendly DPDM](https://huggingface.co/papers/2304.06140) will be performed instead. Args: image (`PipelineImageInput`): Input for the image(s) that are to be edited. Multiple input images have to default to the same aspect ratio. source_prompt (`str`, defaults to `""`): Prompt describing the input image that will be used for guidance during inversion. Guidance is disabled if the `source_prompt` is `""`. source_guidance_scale (`float`, defaults to `3.5`): Strength of guidance during inversion. num_inversion_steps (`int`, defaults to `30`): Number of total performed inversion steps after discarding the initial `skip` steps. skip (`float`, defaults to `0.15`): Portion of initial steps that will be ignored for inversion and subsequent generation. Lower values will lead to stronger changes to the input image. `skip` has to be between `0` and `1`. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make inversion deterministic. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. height (`int`, *optional*, defaults to `None`): The height in preprocessed image. If `None`, will use the `get_default_height_width()` to get default height. width (`int`, *optional*`, defaults to `None`): The width in preprocessed. If `None`, will use get_default_height_width()` to get the default width. resize_mode (`str`, *optional*, defaults to `default`): The resize mode, can be one of `default` or `fill`. If `default`, will resize the image to fit within the specified width and height, and it may not maintaining the original aspect ratio. If `fill`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, filling empty with data from image. If `crop`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, cropping the excess. Note that resize_mode `fill` and `crop` are only supported for PIL image input. crops_coords (`List[Tuple[int, int, int, int]]`, *optional*, defaults to `None`): The crop coordinates for each image in the batch. If `None`, will not crop the image. Returns: [`~pipelines.ledits_pp.LEditsPPInversionPipelineOutput`]: Output will contain the resized input image(s) and respective VAE reconstruction(s). """ if height is not None and height % 32 != 0 or width is not None and width % 32 != 0: raise ValueError("height and width must be a factor of 32.") # Reset attn processor, we do not want to store attn maps during inversion self.unet.set_attn_processor(AttnProcessor()) self.eta = 1.0 self.scheduler.config.timestep_spacing = "leading" self.scheduler.set_timesteps(int(num_inversion_steps * (1 + skip))) self.inversion_steps = self.scheduler.timesteps[-num_inversion_steps:] timesteps = self.inversion_steps # 1. encode image x0, resized = self.encode_image( image, dtype=self.text_encoder.dtype, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords, ) self.batch_size = x0.shape[0] # autoencoder reconstruction image_rec = self.vae.decode(x0 / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] image_rec = self.image_processor.postprocess(image_rec, output_type="pil") # 2. get embeddings do_classifier_free_guidance = source_guidance_scale > 1.0 lora_scale = cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None uncond_embedding, text_embeddings, _ = self.encode_prompt( num_images_per_prompt=1, device=self.device, negative_prompt=None, enable_edit_guidance=do_classifier_free_guidance, editing_prompt=source_prompt, lora_scale=lora_scale, clip_skip=clip_skip, ) # 3. find zs and xts variance_noise_shape = (num_inversion_steps, *x0.shape) # intermediate latents t_to_idx = {int(v): k for k, v in enumerate(timesteps)} xts = torch.zeros(size=variance_noise_shape, device=self.device, dtype=uncond_embedding.dtype) for t in reversed(timesteps): idx = num_inversion_steps - t_to_idx[int(t)] - 1 noise = randn_tensor(shape=x0.shape, generator=generator, device=self.device, dtype=x0.dtype) xts[idx] = self.scheduler.add_noise(x0, noise, torch.Tensor([t])) xts = torch.cat([x0.unsqueeze(0), xts], dim=0) self.scheduler.set_timesteps(len(self.scheduler.timesteps)) # noise maps zs = torch.zeros(size=variance_noise_shape, device=self.device, dtype=uncond_embedding.dtype) with self.progress_bar(total=len(timesteps)) as progress_bar: for t in timesteps: idx = num_inversion_steps - t_to_idx[int(t)] - 1 # 1. predict noise residual xt = xts[idx + 1] noise_pred = self.unet(xt, timestep=t, encoder_hidden_states=uncond_embedding).sample if not source_prompt == "": noise_pred_cond = self.unet(xt, timestep=t, encoder_hidden_states=text_embeddings).sample noise_pred = noise_pred + source_guidance_scale * (noise_pred_cond - noise_pred) xtm1 = xts[idx] z, xtm1_corrected = compute_noise(self.scheduler, xtm1, xt, t, noise_pred, self.eta) zs[idx] = z # correction to avoid error accumulation xts[idx] = xtm1_corrected progress_bar.update() if XLA_AVAILABLE: xm.mark_step() self.init_latents = xts[-1].expand(self.batch_size, -1, -1, -1) zs = zs.flip(0) self.zs = zs return LEditsPPInversionPipelineOutput(images=resized, vae_reconstruction_images=image_rec) @torch.no_grad() def encode_image(self, image, dtype=None, height=None, width=None, resize_mode="default", crops_coords=None): image = self.image_processor.preprocess( image=image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords ) height, width = image.shape[-2:] if height % 32 != 0 or width % 32 != 0: raise ValueError( "Image height and width must be a factor of 32. " "Consider down-sampling the input using the `height` and `width` parameters" ) resized = self.image_processor.postprocess(image=image, output_type="pil") if max(image.shape[-2:]) > self.vae.config["sample_size"] * 1.5: logger.warning( "Your input images far exceed the default resolution of the underlying diffusion model. " "The output images may contain severe artifacts! " "Consider down-sampling the input using the `height` and `width` parameters" ) image = image.to(dtype) x0 = self.vae.encode(image.to(self.device)).latent_dist.mode() x0 = x0.to(dtype) x0 = self.vae.config.scaling_factor * x0 return x0, resized def compute_noise_ddim(scheduler, prev_latents, latents, timestep, noise_pred, eta): # 1. get previous step value (=t-1) prev_timestep = timestep - scheduler.config.num_train_timesteps // scheduler.num_inference_steps # 2. compute alphas, betas alpha_prod_t = scheduler.alphas_cumprod[timestep] alpha_prod_t_prev = ( scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else scheduler.final_alpha_cumprod ) beta_prod_t = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://huggingface.co/papers/2010.02502 pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5) # 4. Clip "predicted x_0" if scheduler.config.clip_sample: pred_original_sample = torch.clamp(pred_original_sample, -1, 1) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) variance = scheduler._get_variance(timestep, prev_timestep) std_dev_t = eta * variance ** (0.5) # 6. compute "direction pointing to x_t" of formula (12) from https://huggingface.co/papers/2010.02502 pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * noise_pred # modified so that updated xtm1 is returned as well (to avoid error accumulation) mu_xt = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction if variance > 0.0: noise = (prev_latents - mu_xt) / (variance ** (0.5) * eta) else: noise = torch.tensor([0.0]).to(latents.device) return noise, mu_xt + (eta * variance**0.5) * noise def compute_noise_sde_dpm_pp_2nd(scheduler, prev_latents, latents, timestep, noise_pred, eta): def first_order_update(model_output, sample): # timestep, prev_timestep, sample): sigma_t, sigma_s = scheduler.sigmas[scheduler.step_index + 1], scheduler.sigmas[scheduler.step_index] alpha_t, sigma_t = scheduler._sigma_to_alpha_sigma_t(sigma_t) alpha_s, sigma_s = scheduler._sigma_to_alpha_sigma_t(sigma_s) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s = torch.log(alpha_s) - torch.log(sigma_s) h = lambda_t - lambda_s mu_xt = (sigma_t / sigma_s * torch.exp(-h)) * sample + (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output mu_xt = scheduler.dpm_solver_first_order_update( model_output=model_output, sample=sample, noise=torch.zeros_like(sample) ) sigma = sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) if sigma > 0.0: noise = (prev_latents - mu_xt) / sigma else: noise = torch.tensor([0.0]).to(sample.device) prev_sample = mu_xt + sigma * noise return noise, prev_sample def second_order_update(model_output_list, sample): # timestep_list, prev_timestep, sample): sigma_t, sigma_s0, sigma_s1 = ( scheduler.sigmas[scheduler.step_index + 1], scheduler.sigmas[scheduler.step_index], scheduler.sigmas[scheduler.step_index - 1], ) alpha_t, sigma_t = scheduler._sigma_to_alpha_sigma_t(sigma_t) alpha_s0, sigma_s0 = scheduler._sigma_to_alpha_sigma_t(sigma_s0) alpha_s1, sigma_s1 = scheduler._sigma_to_alpha_sigma_t(sigma_s1) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) m0, m1 = model_output_list[-1], model_output_list[-2] h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1 r0 = h_0 / h D0, D1 = m0, (1.0 / r0) * (m0 - m1) mu_xt = ( (sigma_t / sigma_s0 * torch.exp(-h)) * sample + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 ) sigma = sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) if sigma > 0.0: noise = (prev_latents - mu_xt) / sigma else: noise = torch.tensor([0.0]).to(sample.device) prev_sample = mu_xt + sigma * noise return noise, prev_sample if scheduler.step_index is None: scheduler._init_step_index(timestep) model_output = scheduler.convert_model_output(model_output=noise_pred, sample=latents) for i in range(scheduler.config.solver_order - 1): scheduler.model_outputs[i] = scheduler.model_outputs[i + 1] scheduler.model_outputs[-1] = model_output if scheduler.lower_order_nums < 1: noise, prev_sample = first_order_update(model_output, latents) else: noise, prev_sample = second_order_update(scheduler.model_outputs, latents) if scheduler.lower_order_nums < scheduler.config.solver_order: scheduler.lower_order_nums += 1 # upon completion increase step index by one scheduler._step_index += 1 return noise, prev_sample def compute_noise(scheduler, *args): if isinstance(scheduler, DDIMScheduler): return compute_noise_ddim(scheduler, *args) elif ( isinstance(scheduler, DPMSolverMultistepScheduler) and scheduler.config.algorithm_type == "sde-dpmsolver++" and scheduler.config.solver_order == 2 ): return compute_noise_sde_dpm_pp_2nd(scheduler, *args) else: raise NotImplementedError
diffusers/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py", "repo_id": "diffusers", "token_count": 36659 }
178
# Copyright 2023-2025 Marigold Team, ETH Zürich. All rights reserved. # Copyright 2024-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # -------------------------------------------------------------------------- # More information and citation instructions are available on the # Marigold project website: https://marigoldcomputervision.github.io # -------------------------------------------------------------------------- from dataclasses import dataclass from functools import partial from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import torch from PIL import Image from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer from ...image_processor import PipelineImageInput from ...models import ( AutoencoderKL, UNet2DConditionModel, ) from ...schedulers import ( DDIMScheduler, LCMScheduler, ) from ...utils import ( BaseOutput, is_torch_xla_available, logging, replace_example_docstring, ) from ...utils.import_utils import is_scipy_available from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .marigold_image_processing import MarigoldImageProcessor if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import diffusers >>> import torch >>> pipe = diffusers.MarigoldDepthPipeline.from_pretrained( ... "prs-eth/marigold-depth-v1-1", variant="fp16", torch_dtype=torch.float16 ... ).to("cuda") >>> image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg") >>> depth = pipe(image) >>> vis = pipe.image_processor.visualize_depth(depth.prediction) >>> vis[0].save("einstein_depth.png") >>> depth_16bit = pipe.image_processor.export_depth_to_16bit_png(depth.prediction) >>> depth_16bit[0].save("einstein_depth_16bit.png") ``` """ @dataclass class MarigoldDepthOutput(BaseOutput): """ Output class for Marigold monocular depth prediction pipeline. Args: prediction (`np.ndarray`, `torch.Tensor`): Predicted depth maps with values in the range [0, 1]. The shape is $numimages \times 1 \times height \times width$ for `torch.Tensor` or $numimages \times height \times width \times 1$ for `np.ndarray`. uncertainty (`None`, `np.ndarray`, `torch.Tensor`): Uncertainty maps computed from the ensemble, with values in the range [0, 1]. The shape is $numimages \times 1 \times height \times width$ for `torch.Tensor` or $numimages \times height \times width \times 1$ for `np.ndarray`. latent (`None`, `torch.Tensor`): Latent features corresponding to the predictions, compatible with the `latents` argument of the pipeline. The shape is $numimages * numensemble \times 4 \times latentheight \times latentwidth$. """ prediction: Union[np.ndarray, torch.Tensor] uncertainty: Union[None, np.ndarray, torch.Tensor] latent: Union[None, torch.Tensor] class MarigoldDepthPipeline(DiffusionPipeline): """ Pipeline for monocular depth estimation using the Marigold method: https://marigoldmonodepth.github.io. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: unet (`UNet2DConditionModel`): Conditional U-Net to denoise the depth latent, conditioned on image latent. vae (`AutoencoderKL`): Variational Auto-Encoder (VAE) Model to encode and decode images and predictions to and from latent representations. scheduler (`DDIMScheduler` or `LCMScheduler`): A scheduler to be used in combination with `unet` to denoise the encoded image latents. text_encoder (`CLIPTextModel`): Text-encoder, for empty text embedding. tokenizer (`CLIPTokenizer`): CLIP tokenizer. prediction_type (`str`, *optional*): Type of predictions made by the model. scale_invariant (`bool`, *optional*): A model property specifying whether the predicted depth maps are scale-invariant. This value must be set in the model config. When used together with the `shift_invariant=True` flag, the model is also called "affine-invariant". NB: overriding this value is not supported. shift_invariant (`bool`, *optional*): A model property specifying whether the predicted depth maps are shift-invariant. This value must be set in the model config. When used together with the `scale_invariant=True` flag, the model is also called "affine-invariant". NB: overriding this value is not supported. default_denoising_steps (`int`, *optional*): The minimum number of denoising diffusion steps that are required to produce a prediction of reasonable quality with the given model. This value must be set in the model config. When the pipeline is called without explicitly setting `num_inference_steps`, the default value is used. This is required to ensure reasonable results with various model flavors compatible with the pipeline, such as those relying on very short denoising schedules (`LCMScheduler`) and those with full diffusion schedules (`DDIMScheduler`). default_processing_resolution (`int`, *optional*): The recommended value of the `processing_resolution` parameter of the pipeline. This value must be set in the model config. When the pipeline is called without explicitly setting `processing_resolution`, the default value is used. This is required to ensure reasonable results with various model flavors trained with varying optimal processing resolution values. """ model_cpu_offload_seq = "text_encoder->unet->vae" supported_prediction_types = ("depth", "disparity") def __init__( self, unet: UNet2DConditionModel, vae: AutoencoderKL, scheduler: Union[DDIMScheduler, LCMScheduler], text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, prediction_type: Optional[str] = None, scale_invariant: Optional[bool] = True, shift_invariant: Optional[bool] = True, default_denoising_steps: Optional[int] = None, default_processing_resolution: Optional[int] = None, ): super().__init__() if prediction_type not in self.supported_prediction_types: logger.warning( f"Potentially unsupported `prediction_type='{prediction_type}'`; values supported by the pipeline: " f"{self.supported_prediction_types}." ) self.register_modules( unet=unet, vae=vae, scheduler=scheduler, text_encoder=text_encoder, tokenizer=tokenizer, ) self.register_to_config( prediction_type=prediction_type, scale_invariant=scale_invariant, shift_invariant=shift_invariant, default_denoising_steps=default_denoising_steps, default_processing_resolution=default_processing_resolution, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.scale_invariant = scale_invariant self.shift_invariant = shift_invariant self.default_denoising_steps = default_denoising_steps self.default_processing_resolution = default_processing_resolution self.empty_text_embedding = None self.image_processor = MarigoldImageProcessor(vae_scale_factor=self.vae_scale_factor) def check_inputs( self, image: PipelineImageInput, num_inference_steps: int, ensemble_size: int, processing_resolution: int, resample_method_input: str, resample_method_output: str, batch_size: int, ensembling_kwargs: Optional[Dict[str, Any]], latents: Optional[torch.Tensor], generator: Optional[Union[torch.Generator, List[torch.Generator]]], output_type: str, output_uncertainty: bool, ) -> int: actual_vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if actual_vae_scale_factor != self.vae_scale_factor: raise ValueError( f"`vae_scale_factor` computed at initialization ({self.vae_scale_factor}) differs from the actual one ({actual_vae_scale_factor})." ) if num_inference_steps is None: raise ValueError("`num_inference_steps` is not specified and could not be resolved from the model config.") if num_inference_steps < 1: raise ValueError("`num_inference_steps` must be positive.") if ensemble_size < 1: raise ValueError("`ensemble_size` must be positive.") if ensemble_size == 2: logger.warning( "`ensemble_size` == 2 results are similar to no ensembling (1); " "consider increasing the value to at least 3." ) if ensemble_size > 1 and (self.scale_invariant or self.shift_invariant) and not is_scipy_available(): raise ImportError("Make sure to install scipy if you want to use ensembling.") if ensemble_size == 1 and output_uncertainty: raise ValueError( "Computing uncertainty by setting `output_uncertainty=True` also requires setting `ensemble_size` " "greater than 1." ) if processing_resolution is None: raise ValueError( "`processing_resolution` is not specified and could not be resolved from the model config." ) if processing_resolution < 0: raise ValueError( "`processing_resolution` must be non-negative: 0 for native resolution, or any positive value for " "downsampled processing." ) if processing_resolution % self.vae_scale_factor != 0: raise ValueError(f"`processing_resolution` must be a multiple of {self.vae_scale_factor}.") if resample_method_input not in ("nearest", "nearest-exact", "bilinear", "bicubic", "area"): raise ValueError( "`resample_method_input` takes string values compatible with PIL library: " "nearest, nearest-exact, bilinear, bicubic, area." ) if resample_method_output not in ("nearest", "nearest-exact", "bilinear", "bicubic", "area"): raise ValueError( "`resample_method_output` takes string values compatible with PIL library: " "nearest, nearest-exact, bilinear, bicubic, area." ) if batch_size < 1: raise ValueError("`batch_size` must be positive.") if output_type not in ["pt", "np"]: raise ValueError("`output_type` must be one of `pt` or `np`.") if latents is not None and generator is not None: raise ValueError("`latents` and `generator` cannot be used together.") if ensembling_kwargs is not None: if not isinstance(ensembling_kwargs, dict): raise ValueError("`ensembling_kwargs` must be a dictionary.") if "reduction" in ensembling_kwargs and ensembling_kwargs["reduction"] not in ("mean", "median"): raise ValueError("`ensembling_kwargs['reduction']` can be either `'mean'` or `'median'`.") # image checks num_images = 0 W, H = None, None if not isinstance(image, list): image = [image] for i, img in enumerate(image): if isinstance(img, np.ndarray) or torch.is_tensor(img): if img.ndim not in (2, 3, 4): raise ValueError(f"`image[{i}]` has unsupported dimensions or shape: {img.shape}.") H_i, W_i = img.shape[-2:] N_i = 1 if img.ndim == 4: N_i = img.shape[0] elif isinstance(img, Image.Image): W_i, H_i = img.size N_i = 1 else: raise ValueError(f"Unsupported `image[{i}]` type: {type(img)}.") if W is None: W, H = W_i, H_i elif (W, H) != (W_i, H_i): raise ValueError( f"Input `image[{i}]` has incompatible dimensions {(W_i, H_i)} with the previous images {(W, H)}" ) num_images += N_i # latents checks if latents is not None: if not torch.is_tensor(latents): raise ValueError("`latents` must be a torch.Tensor.") if latents.dim() != 4: raise ValueError(f"`latents` has unsupported dimensions or shape: {latents.shape}.") if processing_resolution > 0: max_orig = max(H, W) new_H = H * processing_resolution // max_orig new_W = W * processing_resolution // max_orig if new_H == 0 or new_W == 0: raise ValueError(f"Extreme aspect ratio of the input image: [{W} x {H}]") W, H = new_W, new_H w = (W + self.vae_scale_factor - 1) // self.vae_scale_factor h = (H + self.vae_scale_factor - 1) // self.vae_scale_factor shape_expected = (num_images * ensemble_size, self.vae.config.latent_channels, h, w) if latents.shape != shape_expected: raise ValueError(f"`latents` has unexpected shape={latents.shape} expected={shape_expected}.") # generator checks if generator is not None: if isinstance(generator, list): if len(generator) != num_images * ensemble_size: raise ValueError( "The number of generators must match the total number of ensemble members for all input images." ) if not all(g.device.type == generator[0].device.type for g in generator): raise ValueError("`generator` device placement is not consistent in the list.") elif not isinstance(generator, torch.Generator): raise ValueError(f"Unsupported generator type: {type(generator)}.") return num_images @torch.compiler.disable def progress_bar(self, iterable=None, total=None, desc=None, leave=True): if not hasattr(self, "_progress_bar_config"): self._progress_bar_config = {} elif not isinstance(self._progress_bar_config, dict): raise ValueError( f"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}." ) progress_bar_config = dict(**self._progress_bar_config) progress_bar_config["desc"] = progress_bar_config.get("desc", desc) progress_bar_config["leave"] = progress_bar_config.get("leave", leave) if iterable is not None: return tqdm(iterable, **progress_bar_config) elif total is not None: return tqdm(total=total, **progress_bar_config) else: raise ValueError("Either `total` or `iterable` has to be defined.") @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: PipelineImageInput, num_inference_steps: Optional[int] = None, ensemble_size: int = 1, processing_resolution: Optional[int] = None, match_input_resolution: bool = True, resample_method_input: str = "bilinear", resample_method_output: str = "bilinear", batch_size: int = 1, ensembling_kwargs: Optional[Dict[str, Any]] = None, latents: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, output_type: str = "np", output_uncertainty: bool = False, output_latent: bool = False, return_dict: bool = True, ): """ Function invoked when calling the pipeline. Args: image (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`), `List[torch.Tensor]`: An input image or images used as an input for the depth estimation task. For arrays and tensors, the expected value range is between `[0, 1]`. Passing a batch of images is possible by providing a four-dimensional array or a tensor. Additionally, a list of images of two- or three-dimensional arrays or tensors can be passed. In the latter case, all list elements must have the same width and height. num_inference_steps (`int`, *optional*, defaults to `None`): Number of denoising diffusion steps during inference. The default value `None` results in automatic selection. ensemble_size (`int`, defaults to `1`): Number of ensemble predictions. Higher values result in measurable improvements and visual degradation. processing_resolution (`int`, *optional*, defaults to `None`): Effective processing resolution. When set to `0`, matches the larger input image dimension. This produces crisper predictions, but may also lead to the overall loss of global context. The default value `None` resolves to the optimal value from the model config. match_input_resolution (`bool`, *optional*, defaults to `True`): When enabled, the output prediction is resized to match the input dimensions. When disabled, the longer side of the output will equal to `processing_resolution`. resample_method_input (`str`, *optional*, defaults to `"bilinear"`): Resampling method used to resize input images to `processing_resolution`. The accepted values are: `"nearest"`, `"nearest-exact"`, `"bilinear"`, `"bicubic"`, or `"area"`. resample_method_output (`str`, *optional*, defaults to `"bilinear"`): Resampling method used to resize output predictions to match the input resolution. The accepted values are `"nearest"`, `"nearest-exact"`, `"bilinear"`, `"bicubic"`, or `"area"`. batch_size (`int`, *optional*, defaults to `1`): Batch size; only matters when setting `ensemble_size` or passing a tensor of images. ensembling_kwargs (`dict`, *optional*, defaults to `None`) Extra dictionary with arguments for precise ensembling control. The following options are available: - reduction (`str`, *optional*, defaults to `"median"`): Defines the ensembling function applied in every pixel location, can be either `"median"` or `"mean"`. - regularizer_strength (`float`, *optional*, defaults to `0.02`): Strength of the regularizer that pulls the aligned predictions to the unit range from 0 to 1. - max_iter (`int`, *optional*, defaults to `2`): Maximum number of the alignment solver steps. Refer to `scipy.optimize.minimize` function, `options` argument. - tol (`float`, *optional*, defaults to `1e-3`): Alignment solver tolerance. The solver stops when the tolerance is reached. - max_res (`int`, *optional*, defaults to `None`): Resolution at which the alignment is performed; `None` matches the `processing_resolution`. latents (`torch.Tensor`, or `List[torch.Tensor]`, *optional*, defaults to `None`): Latent noise tensors to replace the random initialization. These can be taken from the previous function call's output. generator (`torch.Generator`, or `List[torch.Generator]`, *optional*, defaults to `None`): Random number generator object to ensure reproducibility. output_type (`str`, *optional*, defaults to `"np"`): Preferred format of the output's `prediction` and the optional `uncertainty` fields. The accepted values are: `"np"` (numpy array) or `"pt"` (torch tensor). output_uncertainty (`bool`, *optional*, defaults to `False`): When enabled, the output's `uncertainty` field contains the predictive uncertainty map, provided that the `ensemble_size` argument is set to a value above 2. output_latent (`bool`, *optional*, defaults to `False`): When enabled, the output's `latent` field contains the latent codes corresponding to the predictions within the ensemble. These codes can be saved, modified, and used for subsequent calls with the `latents` argument. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.marigold.MarigoldDepthOutput`] instead of a plain tuple. Examples: Returns: [`~pipelines.marigold.MarigoldDepthOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.marigold.MarigoldDepthOutput`] is returned, otherwise a `tuple` is returned where the first element is the prediction, the second element is the uncertainty (or `None`), and the third is the latent (or `None`). """ # 0. Resolving variables. device = self._execution_device dtype = self.dtype # Model-specific optimal default values leading to fast and reasonable results. if num_inference_steps is None: num_inference_steps = self.default_denoising_steps if processing_resolution is None: processing_resolution = self.default_processing_resolution # 1. Check inputs. num_images = self.check_inputs( image, num_inference_steps, ensemble_size, processing_resolution, resample_method_input, resample_method_output, batch_size, ensembling_kwargs, latents, generator, output_type, output_uncertainty, ) # 2. Prepare empty text conditioning. # Model invocation: self.tokenizer, self.text_encoder. if self.empty_text_embedding is None: prompt = "" text_inputs = self.tokenizer( prompt, padding="do_not_pad", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids.to(device) self.empty_text_embedding = self.text_encoder(text_input_ids)[0] # [1,2,1024] # 3. Preprocess input images. This function loads input image or images of compatible dimensions `(H, W)`, # optionally downsamples them to the `processing_resolution` `(PH, PW)`, where # `max(PH, PW) == processing_resolution`, and pads the dimensions to `(PPH, PPW)` such that these values are # divisible by the latent space downscaling factor (typically 8 in Stable Diffusion). The default value `None` # of `processing_resolution` resolves to the optimal value from the model config. It is a recommended mode of # operation and leads to the most reasonable results. Using the native image resolution or any other processing # resolution can lead to loss of either fine details or global context in the output predictions. image, padding, original_resolution = self.image_processor.preprocess( image, processing_resolution, resample_method_input, device, dtype ) # [N,3,PPH,PPW] # 4. Encode input image into latent space. At this step, each of the `N` input images is represented with `E` # ensemble members. Each ensemble member is an independent diffused prediction, just initialized independently. # Latents of each such predictions across all input images and all ensemble members are represented in the # `pred_latent` variable. The variable `image_latent` is of the same shape: it contains each input image encoded # into latent space and replicated `E` times. The latents can be either generated (see `generator` to ensure # reproducibility), or passed explicitly via the `latents` argument. The latter can be set outside the pipeline # code. This behavior can be achieved by setting the `output_latent` argument to `True`. The latent space # dimensions are `(h, w)`. Encoding into latent space happens in batches of size `batch_size`. # Model invocation: self.vae.encoder. image_latent, pred_latent = self.prepare_latents( image, latents, generator, ensemble_size, batch_size ) # [N*E,4,h,w], [N*E,4,h,w] del image batch_empty_text_embedding = self.empty_text_embedding.to(device=device, dtype=dtype).repeat( batch_size, 1, 1 ) # [B,1024,2] # 5. Process the denoising loop. All `N * E` latents are processed sequentially in batches of size `batch_size`. # The unet model takes concatenated latent spaces of the input image and the predicted modality as an input, and # outputs noise for the predicted modality's latent space. The number of denoising diffusion steps is defined by # `num_inference_steps`. It is either set directly, or resolves to the optimal value specific to the loaded # model. # Model invocation: self.unet. pred_latents = [] for i in self.progress_bar( range(0, num_images * ensemble_size, batch_size), leave=True, desc="Marigold predictions..." ): batch_image_latent = image_latent[i : i + batch_size] # [B,4,h,w] batch_pred_latent = pred_latent[i : i + batch_size] # [B,4,h,w] effective_batch_size = batch_image_latent.shape[0] text = batch_empty_text_embedding[:effective_batch_size] # [B,2,1024] self.scheduler.set_timesteps(num_inference_steps, device=device) for t in self.progress_bar(self.scheduler.timesteps, leave=False, desc="Diffusion steps..."): batch_latent = torch.cat([batch_image_latent, batch_pred_latent], dim=1) # [B,8,h,w] noise = self.unet(batch_latent, t, encoder_hidden_states=text, return_dict=False)[0] # [B,4,h,w] batch_pred_latent = self.scheduler.step( noise, t, batch_pred_latent, generator=generator ).prev_sample # [B,4,h,w] if XLA_AVAILABLE: xm.mark_step() pred_latents.append(batch_pred_latent) pred_latent = torch.cat(pred_latents, dim=0) # [N*E,4,h,w] del ( pred_latents, image_latent, batch_empty_text_embedding, batch_image_latent, batch_pred_latent, text, batch_latent, noise, ) # 6. Decode predictions from latent into pixel space. The resulting `N * E` predictions have shape `(PPH, PPW)`, # which requires slight postprocessing. Decoding into pixel space happens in batches of size `batch_size`. # Model invocation: self.vae.decoder. prediction = torch.cat( [ self.decode_prediction(pred_latent[i : i + batch_size]) for i in range(0, pred_latent.shape[0], batch_size) ], dim=0, ) # [N*E,1,PPH,PPW] if not output_latent: pred_latent = None # 7. Remove padding. The output shape is (PH, PW). prediction = self.image_processor.unpad_image(prediction, padding) # [N*E,1,PH,PW] # 8. Ensemble and compute uncertainty (when `output_uncertainty` is set). This code treats each of the `N` # groups of `E` ensemble predictions independently. For each group it computes an ensembled prediction of shape # `(PH, PW)` and an optional uncertainty map of the same dimensions. After computing this pair of outputs for # each group independently, it stacks them respectively into batches of `N` almost final predictions and # uncertainty maps. uncertainty = None if ensemble_size > 1: prediction = prediction.reshape(num_images, ensemble_size, *prediction.shape[1:]) # [N,E,1,PH,PW] prediction = [ self.ensemble_depth( prediction[i], self.scale_invariant, self.shift_invariant, output_uncertainty, **(ensembling_kwargs or {}), ) for i in range(num_images) ] # [ [[1,1,PH,PW], [1,1,PH,PW]], ... ] prediction, uncertainty = zip(*prediction) # [[1,1,PH,PW], ... ], [[1,1,PH,PW], ... ] prediction = torch.cat(prediction, dim=0) # [N,1,PH,PW] if output_uncertainty: uncertainty = torch.cat(uncertainty, dim=0) # [N,1,PH,PW] else: uncertainty = None # 9. If `match_input_resolution` is set, the output prediction and the uncertainty are upsampled to match the # input resolution `(H, W)`. This step may introduce upsampling artifacts, and therefore can be disabled. # Depending on the downstream use-case, upsampling can be also chosen based on the tolerated artifacts by # setting the `resample_method_output` parameter (e.g., to `"nearest"`). if match_input_resolution: prediction = self.image_processor.resize_antialias( prediction, original_resolution, resample_method_output, is_aa=False ) # [N,1,H,W] if uncertainty is not None and output_uncertainty: uncertainty = self.image_processor.resize_antialias( uncertainty, original_resolution, resample_method_output, is_aa=False ) # [N,1,H,W] # 10. Prepare the final outputs. if output_type == "np": prediction = self.image_processor.pt_to_numpy(prediction) # [N,H,W,1] if uncertainty is not None and output_uncertainty: uncertainty = self.image_processor.pt_to_numpy(uncertainty) # [N,H,W,1] # 11. Offload all models self.maybe_free_model_hooks() if not return_dict: return (prediction, uncertainty, pred_latent) return MarigoldDepthOutput( prediction=prediction, uncertainty=uncertainty, latent=pred_latent, ) def prepare_latents( self, image: torch.Tensor, latents: Optional[torch.Tensor], generator: Optional[torch.Generator], ensemble_size: int, batch_size: int, ) -> Tuple[torch.Tensor, torch.Tensor]: def retrieve_latents(encoder_output): if hasattr(encoder_output, "latent_dist"): return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") image_latent = torch.cat( [ retrieve_latents(self.vae.encode(image[i : i + batch_size])) for i in range(0, image.shape[0], batch_size) ], dim=0, ) # [N,4,h,w] image_latent = image_latent * self.vae.config.scaling_factor image_latent = image_latent.repeat_interleave(ensemble_size, dim=0) # [N*E,4,h,w] pred_latent = latents if pred_latent is None: pred_latent = randn_tensor( image_latent.shape, generator=generator, device=image_latent.device, dtype=image_latent.dtype, ) # [N*E,4,h,w] return image_latent, pred_latent def decode_prediction(self, pred_latent: torch.Tensor) -> torch.Tensor: if pred_latent.dim() != 4 or pred_latent.shape[1] != self.vae.config.latent_channels: raise ValueError( f"Expecting 4D tensor of shape [B,{self.vae.config.latent_channels},H,W]; got {pred_latent.shape}." ) prediction = self.vae.decode(pred_latent / self.vae.config.scaling_factor, return_dict=False)[0] # [B,3,H,W] prediction = prediction.mean(dim=1, keepdim=True) # [B,1,H,W] prediction = torch.clip(prediction, -1.0, 1.0) # [B,1,H,W] prediction = (prediction + 1.0) / 2.0 return prediction # [B,1,H,W] @staticmethod def ensemble_depth( depth: torch.Tensor, scale_invariant: bool = True, shift_invariant: bool = True, output_uncertainty: bool = False, reduction: str = "median", regularizer_strength: float = 0.02, max_iter: int = 2, tol: float = 1e-3, max_res: int = 1024, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: """ Ensembles the depth maps represented by the `depth` tensor with expected shape `(B, 1, H, W)`, where B is the number of ensemble members for a given prediction of size `(H x W)`. Even though the function is designed for depth maps, it can also be used with disparity maps as long as the input tensor values are non-negative. The alignment happens when the predictions have one or more degrees of freedom, that is when they are either affine-invariant (`scale_invariant=True` and `shift_invariant=True`), or just scale-invariant (only `scale_invariant=True`). For absolute predictions (`scale_invariant=False` and `shift_invariant=False`) alignment is skipped and only ensembling is performed. Args: depth (`torch.Tensor`): Input ensemble depth maps. scale_invariant (`bool`, *optional*, defaults to `True`): Whether to treat predictions as scale-invariant. shift_invariant (`bool`, *optional*, defaults to `True`): Whether to treat predictions as shift-invariant. output_uncertainty (`bool`, *optional*, defaults to `False`): Whether to output uncertainty map. reduction (`str`, *optional*, defaults to `"median"`): Reduction method used to ensemble aligned predictions. The accepted values are: `"mean"` and `"median"`. regularizer_strength (`float`, *optional*, defaults to `0.02`): Strength of the regularizer that pulls the aligned predictions to the unit range from 0 to 1. max_iter (`int`, *optional*, defaults to `2`): Maximum number of the alignment solver steps. Refer to `scipy.optimize.minimize` function, `options` argument. tol (`float`, *optional*, defaults to `1e-3`): Alignment solver tolerance. The solver stops when the tolerance is reached. max_res (`int`, *optional*, defaults to `1024`): Resolution at which the alignment is performed; `None` matches the `processing_resolution`. Returns: A tensor of aligned and ensembled depth maps and optionally a tensor of uncertainties of the same shape: `(1, 1, H, W)`. """ if depth.dim() != 4 or depth.shape[1] != 1: raise ValueError(f"Expecting 4D tensor of shape [B,1,H,W]; got {depth.shape}.") if reduction not in ("mean", "median"): raise ValueError(f"Unrecognized reduction method: {reduction}.") if not scale_invariant and shift_invariant: raise ValueError("Pure shift-invariant ensembling is not supported.") def init_param(depth: torch.Tensor): init_min = depth.reshape(ensemble_size, -1).min(dim=1).values init_max = depth.reshape(ensemble_size, -1).max(dim=1).values if scale_invariant and shift_invariant: init_s = 1.0 / (init_max - init_min).clamp(min=1e-6) init_t = -init_s * init_min param = torch.cat((init_s, init_t)).cpu().numpy() elif scale_invariant: init_s = 1.0 / init_max.clamp(min=1e-6) param = init_s.cpu().numpy() else: raise ValueError("Unrecognized alignment.") param = param.astype(np.float64) return param def align(depth: torch.Tensor, param: np.ndarray) -> torch.Tensor: if scale_invariant and shift_invariant: s, t = np.split(param, 2) s = torch.from_numpy(s).to(depth).view(ensemble_size, 1, 1, 1) t = torch.from_numpy(t).to(depth).view(ensemble_size, 1, 1, 1) out = depth * s + t elif scale_invariant: s = torch.from_numpy(param).to(depth).view(ensemble_size, 1, 1, 1) out = depth * s else: raise ValueError("Unrecognized alignment.") return out def ensemble( depth_aligned: torch.Tensor, return_uncertainty: bool = False ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: uncertainty = None if reduction == "mean": prediction = torch.mean(depth_aligned, dim=0, keepdim=True) if return_uncertainty: uncertainty = torch.std(depth_aligned, dim=0, keepdim=True) elif reduction == "median": prediction = torch.median(depth_aligned, dim=0, keepdim=True).values if return_uncertainty: uncertainty = torch.median(torch.abs(depth_aligned - prediction), dim=0, keepdim=True).values else: raise ValueError(f"Unrecognized reduction method: {reduction}.") return prediction, uncertainty def cost_fn(param: np.ndarray, depth: torch.Tensor) -> float: cost = 0.0 depth_aligned = align(depth, param) for i, j in torch.combinations(torch.arange(ensemble_size)): diff = depth_aligned[i] - depth_aligned[j] cost += (diff**2).mean().sqrt().item() if regularizer_strength > 0: prediction, _ = ensemble(depth_aligned, return_uncertainty=False) err_near = prediction.min().abs().item() err_far = (1.0 - prediction.max()).abs().item() cost += (err_near + err_far) * regularizer_strength return cost def compute_param(depth: torch.Tensor): import scipy depth_to_align = depth.to(torch.float32) if max_res is not None and max(depth_to_align.shape[2:]) > max_res: depth_to_align = MarigoldImageProcessor.resize_to_max_edge(depth_to_align, max_res, "nearest-exact") param = init_param(depth_to_align) res = scipy.optimize.minimize( partial(cost_fn, depth=depth_to_align), param, method="BFGS", tol=tol, options={"maxiter": max_iter, "disp": False}, ) return res.x requires_aligning = scale_invariant or shift_invariant ensemble_size = depth.shape[0] if requires_aligning: param = compute_param(depth) depth = align(depth, param) depth, uncertainty = ensemble(depth, return_uncertainty=output_uncertainty) depth_max = depth.max() if scale_invariant and shift_invariant: depth_min = depth.min() elif scale_invariant: depth_min = 0 else: raise ValueError("Unrecognized alignment.") depth_range = (depth_max - depth_min).clamp(min=1e-6) depth = (depth - depth_min) / depth_range if output_uncertainty: uncertainty /= depth_range return depth, uncertainty # [1,1,H,W], [1,1,H,W]
diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py", "repo_id": "diffusers", "token_count": 17538 }
179
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name class PaintByExampleImageEncoder(CLIPPreTrainedModel): def __init__(self, config, proj_size=None): super().__init__(config) self.proj_size = proj_size or getattr(config, "projection_dim", 768) self.model = CLIPVisionModel(config) self.mapper = PaintByExampleMapper(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size) self.proj_out = nn.Linear(config.hidden_size, self.proj_size) # uncondition for scaling self.uncond_vector = nn.Parameter(torch.randn((1, 1, self.proj_size))) def forward(self, pixel_values, return_uncond_vector=False): clip_output = self.model(pixel_values=pixel_values) latent_states = clip_output.pooler_output latent_states = self.mapper(latent_states[:, None]) latent_states = self.final_layer_norm(latent_states) latent_states = self.proj_out(latent_states) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class PaintByExampleMapper(nn.Module): def __init__(self, config): super().__init__() num_layers = (config.num_hidden_layers + 1) // 5 hid_size = config.hidden_size num_heads = 1 self.blocks = nn.ModuleList( [ BasicTransformerBlock(hid_size, num_heads, hid_size, activation_fn="gelu", attention_bias=True) for _ in range(num_layers) ] ) def forward(self, hidden_states): for block in self.blocks: hidden_states = block(hidden_states) return hidden_states
diffusers/src/diffusers/pipelines/paint_by_example/image_encoder.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/paint_by_example/image_encoder.py", "repo_id": "diffusers", "token_count": 942 }
180
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Conversion script for the Stable Diffusion checkpoints.""" import re from contextlib import nullcontext from io import BytesIO from typing import Dict, Optional, Union import requests import torch import yaml from transformers import ( AutoFeatureExtractor, BertTokenizerFast, CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from ...models import ( AutoencoderKL, ControlNetModel, PriorTransformer, UNet2DConditionModel, ) from ...schedulers import ( DDIMScheduler, DDPMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, UnCLIPScheduler, ) from ...utils import is_accelerate_available, logging from ...utils.constants import DIFFUSERS_REQUEST_TIMEOUT from ...utils.torch_utils import get_device from ..latent_diffusion.pipeline_latent_diffusion import LDMBertConfig, LDMBertModel from ..paint_by_example import PaintByExampleImageEncoder from ..pipeline_utils import DiffusionPipeline from .safety_checker import StableDiffusionSafetyChecker from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import set_module_tensor_to_device logger = logging.get_logger(__name__) # pylint: disable=invalid-name def shave_segments(path, n_shave_prefix_segments=1): """ Removes segments. Positive values shave the first segments, negative shave the last segments. """ if n_shave_prefix_segments >= 0: return ".".join(path.split(".")[n_shave_prefix_segments:]) else: return ".".join(path.split(".")[:n_shave_prefix_segments]) def renew_resnet_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside resnets to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item.replace("in_layers.0", "norm1") new_item = new_item.replace("in_layers.2", "conv1") new_item = new_item.replace("out_layers.0", "norm2") new_item = new_item.replace("out_layers.3", "conv2") new_item = new_item.replace("emb_layers.1", "time_emb_proj") new_item = new_item.replace("skip_connection", "conv_shortcut") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def renew_vae_resnet_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside resnets to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item new_item = new_item.replace("nin_shortcut", "conv_shortcut") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def renew_attention_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside attentions to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item # new_item = new_item.replace('norm.weight', 'group_norm.weight') # new_item = new_item.replace('norm.bias', 'group_norm.bias') # new_item = new_item.replace('proj_out.weight', 'proj_attn.weight') # new_item = new_item.replace('proj_out.bias', 'proj_attn.bias') # new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside attentions to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item new_item = new_item.replace("norm.weight", "group_norm.weight") new_item = new_item.replace("norm.bias", "group_norm.bias") new_item = new_item.replace("q.weight", "to_q.weight") new_item = new_item.replace("q.bias", "to_q.bias") new_item = new_item.replace("k.weight", "to_k.weight") new_item = new_item.replace("k.bias", "to_k.bias") new_item = new_item.replace("v.weight", "to_v.weight") new_item = new_item.replace("v.bias", "to_v.bias") new_item = new_item.replace("proj_out.weight", "to_out.0.weight") new_item = new_item.replace("proj_out.bias", "to_out.0.bias") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def assign_to_checkpoint( paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None ): """ This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits attention layers, and takes into account additional replacements that may arise. Assigns the weights to the new checkpoint. """ assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): old_tensor = old_checkpoint[path] channels = old_tensor.shape[0] // 3 target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1) num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3 old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:]) query, key, value = old_tensor.split(channels // num_heads, dim=1) checkpoint[path_map["query"]] = query.reshape(target_shape) checkpoint[path_map["key"]] = key.reshape(target_shape) checkpoint[path_map["value"]] = value.reshape(target_shape) for path in paths: new_path = path["new"] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here new_path = new_path.replace("middle_block.0", "mid_block.resnets.0") new_path = new_path.replace("middle_block.1", "mid_block.attentions.0") new_path = new_path.replace("middle_block.2", "mid_block.resnets.1") if additional_replacements is not None: for replacement in additional_replacements: new_path = new_path.replace(replacement["old"], replacement["new"]) # proj_attn.weight has to be converted from conv 1D to linear is_attn_weight = "proj_attn.weight" in new_path or ("attentions" in new_path and "to_" in new_path) shape = old_checkpoint[path["old"]].shape if is_attn_weight and len(shape) == 3: checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0] elif is_attn_weight and len(shape) == 4: checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0, 0] else: checkpoint[new_path] = old_checkpoint[path["old"]] def conv_attn_to_linear(checkpoint): keys = list(checkpoint.keys()) attn_keys = ["query.weight", "key.weight", "value.weight"] for key in keys: if ".".join(key.split(".")[-2:]) in attn_keys: if checkpoint[key].ndim > 2: checkpoint[key] = checkpoint[key][:, :, 0, 0] elif "proj_attn.weight" in key: if checkpoint[key].ndim > 2: checkpoint[key] = checkpoint[key][:, :, 0] def create_unet_diffusers_config(original_config, image_size: int, controlnet=False): """ Creates a config for the diffusers based on the config of the LDM model. """ if controlnet: unet_params = original_config["model"]["params"]["control_stage_config"]["params"] else: if ( "unet_config" in original_config["model"]["params"] and original_config["model"]["params"]["unet_config"] is not None ): unet_params = original_config["model"]["params"]["unet_config"]["params"] else: unet_params = original_config["model"]["params"]["network_config"]["params"] vae_params = original_config["model"]["params"]["first_stage_config"]["params"]["ddconfig"] block_out_channels = [unet_params["model_channels"] * mult for mult in unet_params["channel_mult"]] down_block_types = [] resolution = 1 for i in range(len(block_out_channels)): block_type = "CrossAttnDownBlock2D" if resolution in unet_params["attention_resolutions"] else "DownBlock2D" down_block_types.append(block_type) if i != len(block_out_channels) - 1: resolution *= 2 up_block_types = [] for i in range(len(block_out_channels)): block_type = "CrossAttnUpBlock2D" if resolution in unet_params["attention_resolutions"] else "UpBlock2D" up_block_types.append(block_type) resolution //= 2 if unet_params["transformer_depth"] is not None: transformer_layers_per_block = ( unet_params["transformer_depth"] if isinstance(unet_params["transformer_depth"], int) else list(unet_params["transformer_depth"]) ) else: transformer_layers_per_block = 1 vae_scale_factor = 2 ** (len(vae_params["ch_mult"]) - 1) head_dim = unet_params["num_heads"] if "num_heads" in unet_params else None use_linear_projection = ( unet_params["use_linear_in_transformer"] if "use_linear_in_transformer" in unet_params else False ) if use_linear_projection: # stable diffusion 2-base-512 and 2-768 if head_dim is None: head_dim_mult = unet_params["model_channels"] // unet_params["num_head_channels"] head_dim = [head_dim_mult * c for c in list(unet_params["channel_mult"])] class_embed_type = None addition_embed_type = None addition_time_embed_dim = None projection_class_embeddings_input_dim = None context_dim = None if unet_params["context_dim"] is not None: context_dim = ( unet_params["context_dim"] if isinstance(unet_params["context_dim"], int) else unet_params["context_dim"][0] ) if "num_classes" in unet_params: if unet_params["num_classes"] == "sequential": if context_dim in [2048, 1280]: # SDXL addition_embed_type = "text_time" addition_time_embed_dim = 256 else: class_embed_type = "projection" assert "adm_in_channels" in unet_params projection_class_embeddings_input_dim = unet_params["adm_in_channels"] config = { "sample_size": image_size // vae_scale_factor, "in_channels": unet_params["in_channels"], "down_block_types": tuple(down_block_types), "block_out_channels": tuple(block_out_channels), "layers_per_block": unet_params["num_res_blocks"], "cross_attention_dim": context_dim, "attention_head_dim": head_dim, "use_linear_projection": use_linear_projection, "class_embed_type": class_embed_type, "addition_embed_type": addition_embed_type, "addition_time_embed_dim": addition_time_embed_dim, "projection_class_embeddings_input_dim": projection_class_embeddings_input_dim, "transformer_layers_per_block": transformer_layers_per_block, } if "disable_self_attentions" in unet_params: config["only_cross_attention"] = unet_params["disable_self_attentions"] if "num_classes" in unet_params and isinstance(unet_params["num_classes"], int): config["num_class_embeds"] = unet_params["num_classes"] if controlnet: config["conditioning_channels"] = unet_params["hint_channels"] else: config["out_channels"] = unet_params["out_channels"] config["up_block_types"] = tuple(up_block_types) return config def create_vae_diffusers_config(original_config, image_size: int): """ Creates a config for the diffusers based on the config of the LDM model. """ vae_params = original_config["model"]["params"]["first_stage_config"]["params"]["ddconfig"] _ = original_config["model"]["params"]["first_stage_config"]["params"]["embed_dim"] block_out_channels = [vae_params["ch"] * mult for mult in vae_params["ch_mult"]] down_block_types = [ "DownEncoderBlock2D" if image_size // 2**i not in vae_params["attn_resolutions"] else "AttnDownEncoderBlock2D" for i, _ in enumerate(block_out_channels) ] up_block_types = [ "UpDecoderBlock2D" if image_size // 2**i not in vae_params["attn_resolutions"] else "AttnUpDecoderBlock2D" for i, _ in enumerate(block_out_channels) ][::-1] config = { "sample_size": image_size, "in_channels": vae_params["in_channels"], "out_channels": vae_params["out_ch"], "down_block_types": tuple(down_block_types), "up_block_types": tuple(up_block_types), "block_out_channels": tuple(block_out_channels), "latent_channels": vae_params["z_channels"], "layers_per_block": vae_params["num_res_blocks"], } return config def create_diffusers_schedular(original_config): schedular = DDIMScheduler( num_train_timesteps=original_config["model"]["params"]["timesteps"], beta_start=original_config["model"]["params"]["linear_start"], beta_end=original_config["model"]["params"]["linear_end"], beta_schedule="scaled_linear", ) return schedular def create_ldm_bert_config(original_config): bert_params = original_config["model"]["params"]["cond_stage_config"]["params"] config = LDMBertConfig( d_model=bert_params.n_embed, encoder_layers=bert_params.n_layer, encoder_ffn_dim=bert_params.n_embed * 4, ) return config def convert_ldm_unet_checkpoint( checkpoint, config, path=None, extract_ema=False, controlnet=False, skip_extract_state_dict=False ): """ Takes a state dict and a config, and returns a converted checkpoint. """ if skip_extract_state_dict: unet_state_dict = checkpoint else: # extract state_dict for UNet unet_state_dict = {} keys = list(checkpoint.keys()) if controlnet: unet_key = "control_model." else: unet_key = "model.diffusion_model." # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA if sum(k.startswith("model_ema") for k in keys) > 100 and extract_ema: logger.warning(f"Checkpoint {path} has both EMA and non-EMA weights.") logger.warning( "In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA" " weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag." ) for key in keys: if key.startswith("model.diffusion_model"): flat_ema_key = "model_ema." + "".join(key.split(".")[1:]) unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key) else: if sum(k.startswith("model_ema") for k in keys) > 100: logger.warning( "In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA" " weights (usually better for inference), please make sure to add the `--extract_ema` flag." ) for key in keys: if key.startswith(unet_key): unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key) new_checkpoint = {} new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"] new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"] new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"] new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"] if config["class_embed_type"] is None: # No parameters to port ... elif config["class_embed_type"] == "timestep" or config["class_embed_type"] == "projection": new_checkpoint["class_embedding.linear_1.weight"] = unet_state_dict["label_emb.0.0.weight"] new_checkpoint["class_embedding.linear_1.bias"] = unet_state_dict["label_emb.0.0.bias"] new_checkpoint["class_embedding.linear_2.weight"] = unet_state_dict["label_emb.0.2.weight"] new_checkpoint["class_embedding.linear_2.bias"] = unet_state_dict["label_emb.0.2.bias"] else: raise NotImplementedError(f"Not implemented `class_embed_type`: {config['class_embed_type']}") if config["addition_embed_type"] == "text_time": new_checkpoint["add_embedding.linear_1.weight"] = unet_state_dict["label_emb.0.0.weight"] new_checkpoint["add_embedding.linear_1.bias"] = unet_state_dict["label_emb.0.0.bias"] new_checkpoint["add_embedding.linear_2.weight"] = unet_state_dict["label_emb.0.2.weight"] new_checkpoint["add_embedding.linear_2.bias"] = unet_state_dict["label_emb.0.2.bias"] # Relevant to StableDiffusionUpscalePipeline if "num_class_embeds" in config: if (config["num_class_embeds"] is not None) and ("label_emb.weight" in unet_state_dict): new_checkpoint["class_embedding.weight"] = unet_state_dict["label_emb.weight"] new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"] new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"] if not controlnet: new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"] new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"] new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"] new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"] # Retrieves the keys for the input blocks only num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer}) input_blocks = { layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key] for layer_id in range(num_input_blocks) } # Retrieves the keys for the middle blocks only num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer}) middle_blocks = { layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key] for layer_id in range(num_middle_blocks) } # Retrieves the keys for the output blocks only num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer}) output_blocks = { layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key] for layer_id in range(num_output_blocks) } for i in range(1, num_input_blocks): block_id = (i - 1) // (config["layers_per_block"] + 1) layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1) resnets = [ key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key ] attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key] if f"input_blocks.{i}.0.op.weight" in unet_state_dict: new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop( f"input_blocks.{i}.0.op.weight" ) new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop( f"input_blocks.{i}.0.op.bias" ) paths = renew_resnet_paths(resnets) meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) if len(attentions): paths = renew_attention_paths(attentions) meta_path = {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"} assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) resnet_0 = middle_blocks[0] attentions = middle_blocks[1] resnet_1 = middle_blocks[2] resnet_0_paths = renew_resnet_paths(resnet_0) assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config) resnet_1_paths = renew_resnet_paths(resnet_1) assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config) attentions_paths = renew_attention_paths(attentions) meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"} assign_to_checkpoint( attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) for i in range(num_output_blocks): block_id = i // (config["layers_per_block"] + 1) layer_in_block_id = i % (config["layers_per_block"] + 1) output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]] output_block_list = {} for layer in output_block_layers: layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1) if layer_id in output_block_list: output_block_list[layer_id].append(layer_name) else: output_block_list[layer_id] = [layer_name] if len(output_block_list) > 1: resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key] attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key] resnet_0_paths = renew_resnet_paths(resnets) paths = renew_resnet_paths(resnets) meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) output_block_list = {k: sorted(v) for k, v in sorted(output_block_list.items())} if ["conv.bias", "conv.weight"] in output_block_list.values(): index = list(output_block_list.values()).index(["conv.bias", "conv.weight"]) new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[ f"output_blocks.{i}.{index}.conv.weight" ] new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[ f"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(attentions) == 2: attentions = [] if len(attentions): paths = renew_attention_paths(attentions) meta_path = { "old": f"output_blocks.{i}.1", "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}", } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) else: resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1) for path in resnet_0_paths: old_path = ".".join(["output_blocks", str(i), path["old"]]) new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]]) new_checkpoint[new_path] = unet_state_dict[old_path] if controlnet: # conditioning embedding orig_index = 0 new_checkpoint["controlnet_cond_embedding.conv_in.weight"] = unet_state_dict.pop( f"input_hint_block.{orig_index}.weight" ) new_checkpoint["controlnet_cond_embedding.conv_in.bias"] = unet_state_dict.pop( f"input_hint_block.{orig_index}.bias" ) orig_index += 2 diffusers_index = 0 while diffusers_index < 6: new_checkpoint[f"controlnet_cond_embedding.blocks.{diffusers_index}.weight"] = unet_state_dict.pop( f"input_hint_block.{orig_index}.weight" ) new_checkpoint[f"controlnet_cond_embedding.blocks.{diffusers_index}.bias"] = unet_state_dict.pop( f"input_hint_block.{orig_index}.bias" ) diffusers_index += 1 orig_index += 2 new_checkpoint["controlnet_cond_embedding.conv_out.weight"] = unet_state_dict.pop( f"input_hint_block.{orig_index}.weight" ) new_checkpoint["controlnet_cond_embedding.conv_out.bias"] = unet_state_dict.pop( f"input_hint_block.{orig_index}.bias" ) # down blocks for i in range(num_input_blocks): new_checkpoint[f"controlnet_down_blocks.{i}.weight"] = unet_state_dict.pop(f"zero_convs.{i}.0.weight") new_checkpoint[f"controlnet_down_blocks.{i}.bias"] = unet_state_dict.pop(f"zero_convs.{i}.0.bias") # mid block new_checkpoint["controlnet_mid_block.weight"] = unet_state_dict.pop("middle_block_out.0.weight") new_checkpoint["controlnet_mid_block.bias"] = unet_state_dict.pop("middle_block_out.0.bias") return new_checkpoint def convert_ldm_vae_checkpoint(checkpoint, config): # extract state dict for VAE vae_state_dict = {} keys = list(checkpoint.keys()) vae_key = "first_stage_model." if any(k.startswith("first_stage_model.") for k in keys) else "" for key in keys: if key.startswith(vae_key): vae_state_dict[key.replace(vae_key, "")] = checkpoint.get(key) new_checkpoint = {} new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"] new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"] new_checkpoint["encoder.conv_out.weight"] = vae_state_dict["encoder.conv_out.weight"] new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"] new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict["encoder.norm_out.weight"] new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict["encoder.norm_out.bias"] new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"] new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"] new_checkpoint["decoder.conv_out.weight"] = vae_state_dict["decoder.conv_out.weight"] new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"] new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict["decoder.norm_out.weight"] new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict["decoder.norm_out.bias"] new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"] new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"] new_checkpoint["post_quant_conv.weight"] = vae_state_dict["post_quant_conv.weight"] new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "encoder.down" in layer}) down_blocks = { layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks) } # Retrieves the keys for the decoder up blocks only num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "decoder.up" in layer}) up_blocks = { layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks) } for i in range(num_down_blocks): resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key] if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.pop( f"encoder.down.{i}.downsample.conv.weight" ) new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.pop( f"encoder.down.{i}.downsample.conv.bias" ) paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key] paths = renew_vae_attention_paths(mid_attentions) meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) conv_attn_to_linear(new_checkpoint) for i in range(num_up_blocks): block_id = num_up_blocks - 1 - i resnets = [ key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key ] if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.weight" ] new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.bias" ] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key] paths = renew_vae_attention_paths(mid_attentions) meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) conv_attn_to_linear(new_checkpoint) return new_checkpoint def convert_ldm_bert_checkpoint(checkpoint, config): def _copy_attn_layer(hf_attn_layer, pt_attn_layer): hf_attn_layer.q_proj.weight.data = pt_attn_layer.to_q.weight hf_attn_layer.k_proj.weight.data = pt_attn_layer.to_k.weight hf_attn_layer.v_proj.weight.data = pt_attn_layer.to_v.weight hf_attn_layer.out_proj.weight = pt_attn_layer.to_out.weight hf_attn_layer.out_proj.bias = pt_attn_layer.to_out.bias def _copy_linear(hf_linear, pt_linear): hf_linear.weight = pt_linear.weight hf_linear.bias = pt_linear.bias def _copy_layer(hf_layer, pt_layer): # copy layer norms _copy_linear(hf_layer.self_attn_layer_norm, pt_layer[0][0]) _copy_linear(hf_layer.final_layer_norm, pt_layer[1][0]) # copy attn _copy_attn_layer(hf_layer.self_attn, pt_layer[0][1]) # copy MLP pt_mlp = pt_layer[1][1] _copy_linear(hf_layer.fc1, pt_mlp.net[0][0]) _copy_linear(hf_layer.fc2, pt_mlp.net[2]) def _copy_layers(hf_layers, pt_layers): for i, hf_layer in enumerate(hf_layers): if i != 0: i += i pt_layer = pt_layers[i : i + 2] _copy_layer(hf_layer, pt_layer) hf_model = LDMBertModel(config).eval() # copy embeds hf_model.model.embed_tokens.weight = checkpoint.transformer.token_emb.weight hf_model.model.embed_positions.weight.data = checkpoint.transformer.pos_emb.emb.weight # copy layer norm _copy_linear(hf_model.model.layer_norm, checkpoint.transformer.norm) # copy hidden layers _copy_layers(hf_model.model.layers, checkpoint.transformer.attn_layers.layers) _copy_linear(hf_model.to_logits, checkpoint.transformer.to_logits) return hf_model def convert_ldm_clip_checkpoint(checkpoint, local_files_only=False, text_encoder=None): if text_encoder is None: config_name = "openai/clip-vit-large-patch14" try: config = CLIPTextConfig.from_pretrained(config_name, local_files_only=local_files_only) except Exception: raise ValueError( f"With local_files_only set to {local_files_only}, you must first locally save the configuration in the following path: 'openai/clip-vit-large-patch14'." ) ctx = init_empty_weights if is_accelerate_available() else nullcontext with ctx(): text_model = CLIPTextModel(config) else: text_model = text_encoder keys = list(checkpoint.keys()) text_model_dict = {} remove_prefixes = ["cond_stage_model.transformer", "conditioner.embedders.0.transformer"] for key in keys: for prefix in remove_prefixes: if key.startswith(prefix): text_model_dict[key[len(prefix + ".") :]] = checkpoint[key] if is_accelerate_available(): for param_name, param in text_model_dict.items(): set_module_tensor_to_device(text_model, param_name, "cpu", value=param) else: if not (hasattr(text_model, "embeddings") and hasattr(text_model.embeddings.position_ids)): text_model_dict.pop("text_model.embeddings.position_ids", None) text_model.load_state_dict(text_model_dict) return text_model textenc_conversion_lst = [ ("positional_embedding", "text_model.embeddings.position_embedding.weight"), ("token_embedding.weight", "text_model.embeddings.token_embedding.weight"), ("ln_final.weight", "text_model.final_layer_norm.weight"), ("ln_final.bias", "text_model.final_layer_norm.bias"), ("text_projection", "text_projection.weight"), ] textenc_conversion_map = {x[0]: x[1] for x in textenc_conversion_lst} textenc_transformer_conversion_lst = [ # (stable-diffusion, HF Diffusers) ("resblocks.", "text_model.encoder.layers."), ("ln_1", "layer_norm1"), ("ln_2", "layer_norm2"), (".c_fc.", ".fc1."), (".c_proj.", ".fc2."), (".attn", ".self_attn"), ("ln_final.", "transformer.text_model.final_layer_norm."), ("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"), ("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"), ] protected = {re.escape(x[0]): x[1] for x in textenc_transformer_conversion_lst} textenc_pattern = re.compile("|".join(protected.keys())) def convert_paint_by_example_checkpoint(checkpoint, local_files_only=False): config = CLIPVisionConfig.from_pretrained("openai/clip-vit-large-patch14", local_files_only=local_files_only) model = PaintByExampleImageEncoder(config) keys = list(checkpoint.keys()) text_model_dict = {} for key in keys: if key.startswith("cond_stage_model.transformer"): text_model_dict[key[len("cond_stage_model.transformer.") :]] = checkpoint[key] # load clip vision model.model.load_state_dict(text_model_dict) # load mapper keys_mapper = { k[len("cond_stage_model.mapper.res") :]: v for k, v in checkpoint.items() if k.startswith("cond_stage_model.mapper") } MAPPING = { "attn.c_qkv": ["attn1.to_q", "attn1.to_k", "attn1.to_v"], "attn.c_proj": ["attn1.to_out.0"], "ln_1": ["norm1"], "ln_2": ["norm3"], "mlp.c_fc": ["ff.net.0.proj"], "mlp.c_proj": ["ff.net.2"], } mapped_weights = {} for key, value in keys_mapper.items(): prefix = key[: len("blocks.i")] suffix = key.split(prefix)[-1].split(".")[-1] name = key.split(prefix)[-1].split(suffix)[0][1:-1] mapped_names = MAPPING[name] num_splits = len(mapped_names) for i, mapped_name in enumerate(mapped_names): new_name = ".".join([prefix, mapped_name, suffix]) shape = value.shape[0] // num_splits mapped_weights[new_name] = value[i * shape : (i + 1) * shape] model.mapper.load_state_dict(mapped_weights) # load final layer norm model.final_layer_norm.load_state_dict( { "bias": checkpoint["cond_stage_model.final_ln.bias"], "weight": checkpoint["cond_stage_model.final_ln.weight"], } ) # load final proj model.proj_out.load_state_dict( { "bias": checkpoint["proj_out.bias"], "weight": checkpoint["proj_out.weight"], } ) # load uncond vector model.uncond_vector.data = torch.nn.Parameter(checkpoint["learnable_vector"]) return model def convert_open_clip_checkpoint( checkpoint, config_name, prefix="cond_stage_model.model.", has_projection=False, local_files_only=False, **config_kwargs, ): # text_model = CLIPTextModel.from_pretrained("stabilityai/stable-diffusion-2", subfolder="text_encoder") # text_model = CLIPTextModelWithProjection.from_pretrained( # "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", projection_dim=1280 # ) try: config = CLIPTextConfig.from_pretrained(config_name, **config_kwargs, local_files_only=local_files_only) except Exception: raise ValueError( f"With local_files_only set to {local_files_only}, you must first locally save the configuration in the following path: '{config_name}'." ) ctx = init_empty_weights if is_accelerate_available() else nullcontext with ctx(): text_model = CLIPTextModelWithProjection(config) if has_projection else CLIPTextModel(config) keys = list(checkpoint.keys()) keys_to_ignore = [] if config_name == "stabilityai/stable-diffusion-2" and config.num_hidden_layers == 23: # make sure to remove all keys > 22 keys_to_ignore += [k for k in keys if k.startswith("cond_stage_model.model.transformer.resblocks.23")] keys_to_ignore += ["cond_stage_model.model.text_projection"] text_model_dict = {} if prefix + "text_projection" in checkpoint: d_model = int(checkpoint[prefix + "text_projection"].shape[0]) else: d_model = 1024 text_model_dict["text_model.embeddings.position_ids"] = text_model.text_model.embeddings.get_buffer("position_ids") for key in keys: if key in keys_to_ignore: continue if key[len(prefix) :] in textenc_conversion_map: if key.endswith("text_projection"): value = checkpoint[key].T.contiguous() else: value = checkpoint[key] text_model_dict[textenc_conversion_map[key[len(prefix) :]]] = value if key.startswith(prefix + "transformer."): new_key = key[len(prefix + "transformer.") :] if new_key.endswith(".in_proj_weight"): new_key = new_key[: -len(".in_proj_weight")] new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key) text_model_dict[new_key + ".q_proj.weight"] = checkpoint[key][:d_model, :] text_model_dict[new_key + ".k_proj.weight"] = checkpoint[key][d_model : d_model * 2, :] text_model_dict[new_key + ".v_proj.weight"] = checkpoint[key][d_model * 2 :, :] elif new_key.endswith(".in_proj_bias"): new_key = new_key[: -len(".in_proj_bias")] new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key) text_model_dict[new_key + ".q_proj.bias"] = checkpoint[key][:d_model] text_model_dict[new_key + ".k_proj.bias"] = checkpoint[key][d_model : d_model * 2] text_model_dict[new_key + ".v_proj.bias"] = checkpoint[key][d_model * 2 :] else: new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key) text_model_dict[new_key] = checkpoint[key] if is_accelerate_available(): for param_name, param in text_model_dict.items(): set_module_tensor_to_device(text_model, param_name, "cpu", value=param) else: if not (hasattr(text_model, "embeddings") and hasattr(text_model.embeddings.position_ids)): text_model_dict.pop("text_model.embeddings.position_ids", None) text_model.load_state_dict(text_model_dict) return text_model def stable_unclip_image_encoder(original_config, local_files_only=False): """ Returns the image processor and clip image encoder for the img2img unclip pipeline. We currently know of two types of stable unclip models which separately use the clip and the openclip image encoders. """ image_embedder_config = original_config["model"]["params"]["embedder_config"] sd_clip_image_embedder_class = image_embedder_config["target"] sd_clip_image_embedder_class = sd_clip_image_embedder_class.split(".")[-1] if sd_clip_image_embedder_class == "ClipImageEmbedder": clip_model_name = image_embedder_config.params.model if clip_model_name == "ViT-L/14": feature_extractor = CLIPImageProcessor() image_encoder = CLIPVisionModelWithProjection.from_pretrained( "openai/clip-vit-large-patch14", local_files_only=local_files_only ) else: raise NotImplementedError(f"Unknown CLIP checkpoint name in stable diffusion checkpoint {clip_model_name}") elif sd_clip_image_embedder_class == "FrozenOpenCLIPImageEmbedder": feature_extractor = CLIPImageProcessor() image_encoder = CLIPVisionModelWithProjection.from_pretrained( "laion/CLIP-ViT-H-14-laion2B-s32B-b79K", local_files_only=local_files_only ) else: raise NotImplementedError( f"Unknown CLIP image embedder class in stable diffusion checkpoint {sd_clip_image_embedder_class}" ) return feature_extractor, image_encoder def stable_unclip_image_noising_components( original_config, clip_stats_path: Optional[str] = None, device: Optional[str] = None ): """ Returns the noising components for the img2img and txt2img unclip pipelines. Converts the stability noise augmentor into 1. a `StableUnCLIPImageNormalizer` for holding the CLIP stats 2. a `DDPMScheduler` for holding the noise schedule If the noise augmentor config specifies a clip stats path, the `clip_stats_path` must be provided. """ noise_aug_config = original_config["model"]["params"]["noise_aug_config"] noise_aug_class = noise_aug_config["target"] noise_aug_class = noise_aug_class.split(".")[-1] if noise_aug_class == "CLIPEmbeddingNoiseAugmentation": noise_aug_config = noise_aug_config.params embedding_dim = noise_aug_config.timestep_dim max_noise_level = noise_aug_config.noise_schedule_config.timesteps beta_schedule = noise_aug_config.noise_schedule_config.beta_schedule image_normalizer = StableUnCLIPImageNormalizer(embedding_dim=embedding_dim) image_noising_scheduler = DDPMScheduler(num_train_timesteps=max_noise_level, beta_schedule=beta_schedule) if "clip_stats_path" in noise_aug_config: if clip_stats_path is None: raise ValueError("This stable unclip config requires a `clip_stats_path`") clip_mean, clip_std = torch.load(clip_stats_path, map_location=device) clip_mean = clip_mean[None, :] clip_std = clip_std[None, :] clip_stats_state_dict = { "mean": clip_mean, "std": clip_std, } image_normalizer.load_state_dict(clip_stats_state_dict) else: raise NotImplementedError(f"Unknown noise augmentor class: {noise_aug_class}") return image_normalizer, image_noising_scheduler def convert_controlnet_checkpoint( checkpoint, original_config, checkpoint_path, image_size, upcast_attention, extract_ema, use_linear_projection=None, cross_attention_dim=None, ): ctrlnet_config = create_unet_diffusers_config(original_config, image_size=image_size, controlnet=True) ctrlnet_config["upcast_attention"] = upcast_attention ctrlnet_config.pop("sample_size") if use_linear_projection is not None: ctrlnet_config["use_linear_projection"] = use_linear_projection if cross_attention_dim is not None: ctrlnet_config["cross_attention_dim"] = cross_attention_dim ctx = init_empty_weights if is_accelerate_available() else nullcontext with ctx(): controlnet = ControlNetModel(**ctrlnet_config) # Some controlnet ckpt files are distributed independently from the rest of the # model components i.e. https://huggingface.co/thibaud/controlnet-sd21/ if "time_embed.0.weight" in checkpoint: skip_extract_state_dict = True else: skip_extract_state_dict = False converted_ctrl_checkpoint = convert_ldm_unet_checkpoint( checkpoint, ctrlnet_config, path=checkpoint_path, extract_ema=extract_ema, controlnet=True, skip_extract_state_dict=skip_extract_state_dict, ) if is_accelerate_available(): for param_name, param in converted_ctrl_checkpoint.items(): set_module_tensor_to_device(controlnet, param_name, "cpu", value=param) else: controlnet.load_state_dict(converted_ctrl_checkpoint) return controlnet def download_from_original_stable_diffusion_ckpt( checkpoint_path_or_dict: Union[str, Dict[str, torch.Tensor]], original_config_file: str = None, image_size: Optional[int] = None, prediction_type: str = None, model_type: str = None, extract_ema: bool = False, scheduler_type: str = "pndm", num_in_channels: Optional[int] = None, upcast_attention: Optional[bool] = None, device: str = None, from_safetensors: bool = False, stable_unclip: Optional[str] = None, stable_unclip_prior: Optional[str] = None, clip_stats_path: Optional[str] = None, controlnet: Optional[bool] = None, adapter: Optional[bool] = None, load_safety_checker: bool = True, safety_checker: Optional[StableDiffusionSafetyChecker] = None, feature_extractor: Optional[AutoFeatureExtractor] = None, pipeline_class: DiffusionPipeline = None, local_files_only=False, vae_path=None, vae=None, text_encoder=None, text_encoder_2=None, tokenizer=None, tokenizer_2=None, config_files=None, ) -> DiffusionPipeline: """ Load a Stable Diffusion pipeline object from a CompVis-style `.ckpt`/`.safetensors` file and (ideally) a `.yaml` config file. Although many of the arguments can be automatically inferred, some of these rely on brittle checks against the global step count, which will likely fail for models that have undergone further fine-tuning. Therefore, it is recommended that you override the default values and/or supply an `original_config_file` wherever possible. Args: checkpoint_path_or_dict (`str` or `dict`): Path to `.ckpt` file, or the state dict. original_config_file (`str`): Path to `.yaml` config file corresponding to the original architecture. If `None`, will be automatically inferred by looking for a key that only exists in SD2.0 models. image_size (`int`, *optional*, defaults to 512): The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Diffusion v2 Base. Use 768 for Stable Diffusion v2. prediction_type (`str`, *optional*): The prediction type that the model was trained on. Use `'epsilon'` for Stable Diffusion v1.X and Stable Diffusion v2 Base. Use `'v_prediction'` for Stable Diffusion v2. num_in_channels (`int`, *optional*, defaults to None): The number of input channels. If `None`, it will be automatically inferred. scheduler_type (`str`, *optional*, defaults to 'pndm'): Type of scheduler to use. Should be one of `["pndm", "lms", "heun", "euler", "euler-ancestral", "dpm", "ddim"]`. model_type (`str`, *optional*, defaults to `None`): The pipeline type. `None` to automatically infer, or one of `["FrozenOpenCLIPEmbedder", "FrozenCLIPEmbedder", "PaintByExample"]`. is_img2img (`bool`, *optional*, defaults to `False`): Whether the model should be loaded as an img2img pipeline. extract_ema (`bool`, *optional*, defaults to `False`): Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights or not. Defaults to `False`. Pass `True` to extract the EMA weights. EMA weights usually yield higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning. upcast_attention (`bool`, *optional*, defaults to `None`): Whether the attention computation should always be upcasted. This is necessary when running stable diffusion 2.1. device (`str`, *optional*, defaults to `None`): The device to use. Pass `None` to determine automatically. from_safetensors (`str`, *optional*, defaults to `False`): If `checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch. load_safety_checker (`bool`, *optional*, defaults to `True`): Whether to load the safety checker or not. Defaults to `True`. safety_checker (`StableDiffusionSafetyChecker`, *optional*, defaults to `None`): Safety checker to use. If this parameter is `None`, the function will load a new instance of [StableDiffusionSafetyChecker] by itself, if needed. feature_extractor (`AutoFeatureExtractor`, *optional*, defaults to `None`): Feature extractor to use. If this parameter is `None`, the function will load a new instance of [AutoFeatureExtractor] by itself, if needed. pipeline_class (`str`, *optional*, defaults to `None`): The pipeline class to use. Pass `None` to determine automatically. local_files_only (`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (i.e., do not try to download the model). vae (`AutoencoderKL`, *optional*, defaults to `None`): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. If this parameter is `None`, the function will load a new instance of [CLIP] by itself, if needed. text_encoder (`CLIPTextModel`, *optional*, defaults to `None`): An instance of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel) to use, specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. If this parameter is `None`, the function will load a new instance of [CLIP] by itself, if needed. tokenizer (`CLIPTokenizer`, *optional*, defaults to `None`): An instance of [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer) to use. If this parameter is `None`, the function will load a new instance of [CLIPTokenizer] by itself, if needed. config_files (`Dict[str, str]`, *optional*, defaults to `None`): A dictionary mapping from config file names to their contents. If this parameter is `None`, the function will load the config files by itself, if needed. Valid keys are: - `v1`: Config file for Stable Diffusion v1 - `v2`: Config file for Stable Diffusion v2 - `xl`: Config file for Stable Diffusion XL - `xl_refiner`: Config file for Stable Diffusion XL Refiner return: A StableDiffusionPipeline object representing the passed-in `.ckpt`/`.safetensors` file. """ # import pipelines here to avoid circular import error when using from_single_file method from diffusers import ( LDMTextToImagePipeline, PaintByExamplePipeline, StableDiffusionControlNetPipeline, StableDiffusionInpaintPipeline, StableDiffusionPipeline, StableDiffusionUpscalePipeline, StableDiffusionXLControlNetInpaintPipeline, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline, StableDiffusionXLPipeline, StableUnCLIPImg2ImgPipeline, StableUnCLIPPipeline, ) if prediction_type == "v-prediction": prediction_type = "v_prediction" if isinstance(checkpoint_path_or_dict, str): if from_safetensors: from safetensors.torch import load_file as safe_load checkpoint = safe_load(checkpoint_path_or_dict, device="cpu") else: if device is None: device = get_device() checkpoint = torch.load(checkpoint_path_or_dict, map_location=device) else: checkpoint = torch.load(checkpoint_path_or_dict, map_location=device) elif isinstance(checkpoint_path_or_dict, dict): checkpoint = checkpoint_path_or_dict # Sometimes models don't have the global_step item if "global_step" in checkpoint: global_step = checkpoint["global_step"] else: logger.debug("global_step key not found in model") global_step = None # NOTE: this while loop isn't great but this controlnet checkpoint has one additional # "state_dict" key https://huggingface.co/thibaud/controlnet-canny-sd21 while "state_dict" in checkpoint: checkpoint = checkpoint["state_dict"] if original_config_file is None: key_name_v2_1 = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight" key_name_sd_xl_base = "conditioner.embedders.1.model.transformer.resblocks.9.mlp.c_proj.bias" key_name_sd_xl_refiner = "conditioner.embedders.0.model.transformer.resblocks.9.mlp.c_proj.bias" is_upscale = pipeline_class == StableDiffusionUpscalePipeline config_url = None # model_type = "v1" if config_files is not None and "v1" in config_files: original_config_file = config_files["v1"] else: config_url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" if key_name_v2_1 in checkpoint and checkpoint[key_name_v2_1].shape[-1] == 1024: # model_type = "v2" if config_files is not None and "v2" in config_files: original_config_file = config_files["v2"] else: config_url = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml" if global_step == 110000: # v2.1 needs to upcast attention upcast_attention = True elif key_name_sd_xl_base in checkpoint: # only base xl has two text embedders if config_files is not None and "xl" in config_files: original_config_file = config_files["xl"] else: config_url = "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml" elif key_name_sd_xl_refiner in checkpoint: # only refiner xl has embedder and one text embedders if config_files is not None and "xl_refiner" in config_files: original_config_file = config_files["xl_refiner"] else: config_url = "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_refiner.yaml" if is_upscale: config_url = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/x4-upscaling.yaml" if config_url is not None: original_config_file = BytesIO(requests.get(config_url, timeout=DIFFUSERS_REQUEST_TIMEOUT).content) else: with open(original_config_file, "r") as f: original_config_file = f.read() else: with open(original_config_file, "r") as f: original_config_file = f.read() original_config = yaml.safe_load(original_config_file) # Convert the text model. if ( model_type is None and "cond_stage_config" in original_config["model"]["params"] and original_config["model"]["params"]["cond_stage_config"] is not None ): model_type = original_config["model"]["params"]["cond_stage_config"]["target"].split(".")[-1] logger.debug(f"no `model_type` given, `model_type` inferred as: {model_type}") elif model_type is None and original_config["model"]["params"]["network_config"] is not None: if original_config["model"]["params"]["network_config"]["params"]["context_dim"] == 2048: model_type = "SDXL" else: model_type = "SDXL-Refiner" if image_size is None: image_size = 1024 if pipeline_class is None: # Check if we have a SDXL or SD model and initialize default pipeline if model_type not in ["SDXL", "SDXL-Refiner"]: pipeline_class = StableDiffusionPipeline if not controlnet else StableDiffusionControlNetPipeline else: pipeline_class = StableDiffusionXLPipeline if model_type == "SDXL" else StableDiffusionXLImg2ImgPipeline if num_in_channels is None and pipeline_class in [ StableDiffusionInpaintPipeline, StableDiffusionXLInpaintPipeline, StableDiffusionXLControlNetInpaintPipeline, ]: num_in_channels = 9 if num_in_channels is None and pipeline_class == StableDiffusionUpscalePipeline: num_in_channels = 7 elif num_in_channels is None: num_in_channels = 4 if "unet_config" in original_config["model"]["params"]: original_config["model"]["params"]["unet_config"]["params"]["in_channels"] = num_in_channels elif "network_config" in original_config["model"]["params"]: original_config["model"]["params"]["network_config"]["params"]["in_channels"] = num_in_channels if ( "parameterization" in original_config["model"]["params"] and original_config["model"]["params"]["parameterization"] == "v" ): if prediction_type is None: # NOTE: For stable diffusion 2 base it is recommended to pass `prediction_type=="epsilon"` # as it relies on a brittle global step parameter here prediction_type = "epsilon" if global_step == 875000 else "v_prediction" if image_size is None: # NOTE: For stable diffusion 2 base one has to pass `image_size==512` # as it relies on a brittle global step parameter here image_size = 512 if global_step == 875000 else 768 else: if prediction_type is None: prediction_type = "epsilon" if image_size is None: image_size = 512 if controlnet is None and "control_stage_config" in original_config["model"]["params"]: path = checkpoint_path_or_dict if isinstance(checkpoint_path_or_dict, str) else "" controlnet = convert_controlnet_checkpoint( checkpoint, original_config, path, image_size, upcast_attention, extract_ema ) if "timesteps" in original_config["model"]["params"]: num_train_timesteps = original_config["model"]["params"]["timesteps"] else: num_train_timesteps = 1000 if model_type in ["SDXL", "SDXL-Refiner"]: scheduler_dict = { "beta_schedule": "scaled_linear", "beta_start": 0.00085, "beta_end": 0.012, "interpolation_type": "linear", "num_train_timesteps": num_train_timesteps, "prediction_type": "epsilon", "sample_max_value": 1.0, "set_alpha_to_one": False, "skip_prk_steps": True, "steps_offset": 1, "timestep_spacing": "leading", } scheduler = EulerDiscreteScheduler.from_config(scheduler_dict) scheduler_type = "euler" else: if "linear_start" in original_config["model"]["params"]: beta_start = original_config["model"]["params"]["linear_start"] else: beta_start = 0.02 if "linear_end" in original_config["model"]["params"]: beta_end = original_config["model"]["params"]["linear_end"] else: beta_end = 0.085 scheduler = DDIMScheduler( beta_end=beta_end, beta_schedule="scaled_linear", beta_start=beta_start, num_train_timesteps=num_train_timesteps, steps_offset=1, clip_sample=False, set_alpha_to_one=False, prediction_type=prediction_type, ) # make sure scheduler works correctly with DDIM scheduler.register_to_config(clip_sample=False) if scheduler_type == "pndm": config = dict(scheduler.config) config["skip_prk_steps"] = True scheduler = PNDMScheduler.from_config(config) elif scheduler_type == "lms": scheduler = LMSDiscreteScheduler.from_config(scheduler.config) elif scheduler_type == "heun": scheduler = HeunDiscreteScheduler.from_config(scheduler.config) elif scheduler_type == "euler": scheduler = EulerDiscreteScheduler.from_config(scheduler.config) elif scheduler_type == "euler-ancestral": scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler.config) elif scheduler_type == "dpm": scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config) elif scheduler_type == "ddim": scheduler = scheduler else: raise ValueError(f"Scheduler of type {scheduler_type} doesn't exist!") if pipeline_class == StableDiffusionUpscalePipeline: image_size = original_config["model"]["params"]["unet_config"]["params"]["image_size"] # Convert the UNet2DConditionModel model. unet_config = create_unet_diffusers_config(original_config, image_size=image_size) unet_config["upcast_attention"] = upcast_attention path = checkpoint_path_or_dict if isinstance(checkpoint_path_or_dict, str) else "" converted_unet_checkpoint = convert_ldm_unet_checkpoint( checkpoint, unet_config, path=path, extract_ema=extract_ema ) ctx = init_empty_weights if is_accelerate_available() else nullcontext with ctx(): unet = UNet2DConditionModel(**unet_config) if is_accelerate_available(): if model_type not in ["SDXL", "SDXL-Refiner"]: # SBM Delay this. for param_name, param in converted_unet_checkpoint.items(): set_module_tensor_to_device(unet, param_name, "cpu", value=param) else: unet.load_state_dict(converted_unet_checkpoint) # Convert the VAE model. if vae_path is None and vae is None: vae_config = create_vae_diffusers_config(original_config, image_size=image_size) converted_vae_checkpoint = convert_ldm_vae_checkpoint(checkpoint, vae_config) if ( "model" in original_config and "params" in original_config["model"] and "scale_factor" in original_config["model"]["params"] ): vae_scaling_factor = original_config["model"]["params"]["scale_factor"] else: vae_scaling_factor = 0.18215 # default SD scaling factor vae_config["scaling_factor"] = vae_scaling_factor ctx = init_empty_weights if is_accelerate_available() else nullcontext with ctx(): vae = AutoencoderKL(**vae_config) if is_accelerate_available(): for param_name, param in converted_vae_checkpoint.items(): set_module_tensor_to_device(vae, param_name, "cpu", value=param) else: vae.load_state_dict(converted_vae_checkpoint) elif vae is None: vae = AutoencoderKL.from_pretrained(vae_path, local_files_only=local_files_only) if model_type == "FrozenOpenCLIPEmbedder": config_name = "stabilityai/stable-diffusion-2" config_kwargs = {"subfolder": "text_encoder"} if text_encoder is None: text_model = convert_open_clip_checkpoint( checkpoint, config_name, local_files_only=local_files_only, **config_kwargs ) else: text_model = text_encoder try: tokenizer = CLIPTokenizer.from_pretrained( "stabilityai/stable-diffusion-2", subfolder="tokenizer", local_files_only=local_files_only ) except Exception: raise ValueError( f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'stabilityai/stable-diffusion-2'." ) if stable_unclip is None: if controlnet: pipe = pipeline_class( vae=vae, text_encoder=text_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler, controlnet=controlnet, safety_checker=safety_checker, feature_extractor=feature_extractor, ) if hasattr(pipe, "requires_safety_checker"): pipe.requires_safety_checker = False elif pipeline_class == StableDiffusionUpscalePipeline: scheduler = DDIMScheduler.from_pretrained( "stabilityai/stable-diffusion-x4-upscaler", subfolder="scheduler" ) low_res_scheduler = DDPMScheduler.from_pretrained( "stabilityai/stable-diffusion-x4-upscaler", subfolder="low_res_scheduler" ) pipe = pipeline_class( vae=vae, text_encoder=text_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler, low_res_scheduler=low_res_scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) else: pipe = pipeline_class( vae=vae, text_encoder=text_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) if hasattr(pipe, "requires_safety_checker"): pipe.requires_safety_checker = False else: image_normalizer, image_noising_scheduler = stable_unclip_image_noising_components( original_config, clip_stats_path=clip_stats_path, device=device ) if stable_unclip == "img2img": feature_extractor, image_encoder = stable_unclip_image_encoder(original_config) pipe = StableUnCLIPImg2ImgPipeline( # image encoding components feature_extractor=feature_extractor, image_encoder=image_encoder, # image noising components image_normalizer=image_normalizer, image_noising_scheduler=image_noising_scheduler, # regular denoising components tokenizer=tokenizer, text_encoder=text_model, unet=unet, scheduler=scheduler, # vae vae=vae, ) elif stable_unclip == "txt2img": if stable_unclip_prior is None or stable_unclip_prior == "karlo": karlo_model = "kakaobrain/karlo-v1-alpha" prior = PriorTransformer.from_pretrained( karlo_model, subfolder="prior", local_files_only=local_files_only ) try: prior_tokenizer = CLIPTokenizer.from_pretrained( "openai/clip-vit-large-patch14", local_files_only=local_files_only ) except Exception: raise ValueError( f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'openai/clip-vit-large-patch14'." ) prior_text_model = CLIPTextModelWithProjection.from_pretrained( "openai/clip-vit-large-patch14", local_files_only=local_files_only ) prior_scheduler = UnCLIPScheduler.from_pretrained( karlo_model, subfolder="prior_scheduler", local_files_only=local_files_only ) prior_scheduler = DDPMScheduler.from_config(prior_scheduler.config) else: raise NotImplementedError(f"unknown prior for stable unclip model: {stable_unclip_prior}") pipe = StableUnCLIPPipeline( # prior components prior_tokenizer=prior_tokenizer, prior_text_encoder=prior_text_model, prior=prior, prior_scheduler=prior_scheduler, # image noising components image_normalizer=image_normalizer, image_noising_scheduler=image_noising_scheduler, # regular denoising components tokenizer=tokenizer, text_encoder=text_model, unet=unet, scheduler=scheduler, # vae vae=vae, ) else: raise NotImplementedError(f"unknown `stable_unclip` type: {stable_unclip}") elif model_type == "PaintByExample": vision_model = convert_paint_by_example_checkpoint(checkpoint) try: tokenizer = CLIPTokenizer.from_pretrained( "openai/clip-vit-large-patch14", local_files_only=local_files_only ) except Exception: raise ValueError( f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'openai/clip-vit-large-patch14'." ) try: feature_extractor = AutoFeatureExtractor.from_pretrained( "CompVis/stable-diffusion-safety-checker", local_files_only=local_files_only ) except Exception: raise ValueError( f"With local_files_only set to {local_files_only}, you must first locally save the feature_extractor in the following path: 'CompVis/stable-diffusion-safety-checker'." ) pipe = PaintByExamplePipeline( vae=vae, image_encoder=vision_model, unet=unet, scheduler=scheduler, safety_checker=None, feature_extractor=feature_extractor, ) elif model_type == "FrozenCLIPEmbedder": text_model = convert_ldm_clip_checkpoint( checkpoint, local_files_only=local_files_only, text_encoder=text_encoder ) try: tokenizer = ( CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", local_files_only=local_files_only) if tokenizer is None else tokenizer ) except Exception: raise ValueError( f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'openai/clip-vit-large-patch14'." ) if load_safety_checker: safety_checker = StableDiffusionSafetyChecker.from_pretrained( "CompVis/stable-diffusion-safety-checker", local_files_only=local_files_only ) feature_extractor = AutoFeatureExtractor.from_pretrained( "CompVis/stable-diffusion-safety-checker", local_files_only=local_files_only ) if controlnet: pipe = pipeline_class( vae=vae, text_encoder=text_model, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) else: pipe = pipeline_class( vae=vae, text_encoder=text_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) elif model_type in ["SDXL", "SDXL-Refiner"]: is_refiner = model_type == "SDXL-Refiner" if (is_refiner is False) and (tokenizer is None): try: tokenizer = CLIPTokenizer.from_pretrained( "openai/clip-vit-large-patch14", local_files_only=local_files_only ) except Exception: raise ValueError( f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'openai/clip-vit-large-patch14'." ) if (is_refiner is False) and (text_encoder is None): text_encoder = convert_ldm_clip_checkpoint(checkpoint, local_files_only=local_files_only) if tokenizer_2 is None: try: tokenizer_2 = CLIPTokenizer.from_pretrained( "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", pad_token="!", local_files_only=local_files_only ) except Exception: raise ValueError( f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'laion/CLIP-ViT-bigG-14-laion2B-39B-b160k' with `pad_token` set to '!'." ) if text_encoder_2 is None: config_name = "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k" config_kwargs = {"projection_dim": 1280} prefix = "conditioner.embedders.0.model." if is_refiner else "conditioner.embedders.1.model." text_encoder_2 = convert_open_clip_checkpoint( checkpoint, config_name, prefix=prefix, has_projection=True, local_files_only=local_files_only, **config_kwargs, ) if is_accelerate_available(): # SBM Now move model to cpu. for param_name, param in converted_unet_checkpoint.items(): set_module_tensor_to_device(unet, param_name, "cpu", value=param) if controlnet: pipe = pipeline_class( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, text_encoder_2=text_encoder_2, tokenizer_2=tokenizer_2, unet=unet, controlnet=controlnet, scheduler=scheduler, force_zeros_for_empty_prompt=True, ) elif adapter: pipe = pipeline_class( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, text_encoder_2=text_encoder_2, tokenizer_2=tokenizer_2, unet=unet, adapter=adapter, scheduler=scheduler, force_zeros_for_empty_prompt=True, ) else: pipeline_kwargs = { "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "unet": unet, "scheduler": scheduler, } if (pipeline_class == StableDiffusionXLImg2ImgPipeline) or ( pipeline_class == StableDiffusionXLInpaintPipeline ): pipeline_kwargs.update({"requires_aesthetics_score": is_refiner}) if is_refiner: pipeline_kwargs.update({"force_zeros_for_empty_prompt": False}) pipe = pipeline_class(**pipeline_kwargs) else: text_config = create_ldm_bert_config(original_config) text_model = convert_ldm_bert_checkpoint(checkpoint, text_config) tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased", local_files_only=local_files_only) pipe = LDMTextToImagePipeline(vqvae=vae, bert=text_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler) return pipe def download_controlnet_from_original_ckpt( checkpoint_path: str, original_config_file: str, image_size: int = 512, extract_ema: bool = False, num_in_channels: Optional[int] = None, upcast_attention: Optional[bool] = None, device: str = None, from_safetensors: bool = False, use_linear_projection: Optional[bool] = None, cross_attention_dim: Optional[bool] = None, ) -> DiffusionPipeline: if from_safetensors: from safetensors import safe_open checkpoint = {} with safe_open(checkpoint_path, framework="pt", device="cpu") as f: for key in f.keys(): checkpoint[key] = f.get_tensor(key) else: if device is None: device = get_device() checkpoint = torch.load(checkpoint_path, map_location=device) else: checkpoint = torch.load(checkpoint_path, map_location=device) # NOTE: this while loop isn't great but this controlnet checkpoint has one additional # "state_dict" key https://huggingface.co/thibaud/controlnet-canny-sd21 while "state_dict" in checkpoint: checkpoint = checkpoint["state_dict"] with open(original_config_file, "r") as f: original_config_file = f.read() original_config = yaml.safe_load(original_config_file) if num_in_channels is not None: original_config["model"]["params"]["unet_config"]["params"]["in_channels"] = num_in_channels if "control_stage_config" not in original_config["model"]["params"]: raise ValueError("`control_stage_config` not present in original config") controlnet = convert_controlnet_checkpoint( checkpoint, original_config, checkpoint_path, image_size, upcast_attention, extract_ema, use_linear_projection=use_linear_projection, cross_attention_dim=cross_attention_dim, ) return controlnet
diffusers/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py", "repo_id": "diffusers", "token_count": 36488 }
181
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial from typing import Dict, List, Optional, Union import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from transformers import CLIPTokenizer, FlaxCLIPTextModel from diffusers.utils import logging from ...models import FlaxAutoencoderKL, FlaxUNet2DConditionModel from ...schedulers import ( FlaxDDIMScheduler, FlaxDPMSolverMultistepScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, ) from ..pipeline_flax_utils import FlaxDiffusionPipeline from .pipeline_output import FlaxStableDiffusionXLPipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name # Set to True to use python for loop instead of jax.fori_loop for easier debugging DEBUG = False class FlaxStableDiffusionXLPipeline(FlaxDiffusionPipeline): def __init__( self, text_encoder: FlaxCLIPTextModel, text_encoder_2: FlaxCLIPTextModel, vae: FlaxAutoencoderKL, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: FlaxUNet2DConditionModel, scheduler: Union[ FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler ], dtype: jnp.dtype = jnp.float32, ): super().__init__() self.dtype = dtype self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 def prepare_inputs(self, prompt: Union[str, List[str]]): if not isinstance(prompt, (str, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") # Assume we have the two encoders inputs = [] for tokenizer in [self.tokenizer, self.tokenizer_2]: text_inputs = tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="np", ) inputs.append(text_inputs.input_ids) inputs = jnp.stack(inputs, axis=1) return inputs def __call__( self, prompt_ids: jax.Array, params: Union[Dict, FrozenDict], prng_seed: jax.Array, num_inference_steps: int = 50, guidance_scale: Union[float, jax.Array] = 7.5, height: Optional[int] = None, width: Optional[int] = None, latents: jnp.array = None, neg_prompt_ids: jnp.array = None, return_dict: bool = True, output_type: str = None, jit: bool = False, ): # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor if isinstance(guidance_scale, float) and jit: # Convert to a tensor so each device gets a copy. guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0]) guidance_scale = guidance_scale[:, None] return_latents = output_type == "latent" if jit: images = _p_generate( self, prompt_ids, params, prng_seed, num_inference_steps, height, width, guidance_scale, latents, neg_prompt_ids, return_latents, ) else: images = self._generate( prompt_ids, params, prng_seed, num_inference_steps, height, width, guidance_scale, latents, neg_prompt_ids, return_latents, ) if not return_dict: return (images,) return FlaxStableDiffusionXLPipelineOutput(images=images) def get_embeddings(self, prompt_ids: jnp.array, params): # We assume we have the two encoders # bs, encoder_input, seq_length te_1_inputs = prompt_ids[:, 0, :] te_2_inputs = prompt_ids[:, 1, :] prompt_embeds = self.text_encoder(te_1_inputs, params=params["text_encoder"], output_hidden_states=True) prompt_embeds = prompt_embeds["hidden_states"][-2] prompt_embeds_2_out = self.text_encoder_2( te_2_inputs, params=params["text_encoder_2"], output_hidden_states=True ) prompt_embeds_2 = prompt_embeds_2_out["hidden_states"][-2] text_embeds = prompt_embeds_2_out["text_embeds"] prompt_embeds = jnp.concatenate([prompt_embeds, prompt_embeds_2], axis=-1) return prompt_embeds, text_embeds def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, bs, dtype): add_time_ids = list(original_size + crops_coords_top_left + target_size) add_time_ids = jnp.array([add_time_ids] * bs, dtype=dtype) return add_time_ids def _generate( self, prompt_ids: jnp.array, params: Union[Dict, FrozenDict], prng_seed: jax.Array, num_inference_steps: int, height: int, width: int, guidance_scale: float, latents: Optional[jnp.array] = None, neg_prompt_ids: Optional[jnp.array] = None, return_latents=False, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") # Encode input prompt prompt_embeds, pooled_embeds = self.get_embeddings(prompt_ids, params) # Get unconditional embeddings batch_size = prompt_embeds.shape[0] if neg_prompt_ids is None: neg_prompt_embeds = jnp.zeros_like(prompt_embeds) negative_pooled_embeds = jnp.zeros_like(pooled_embeds) else: neg_prompt_embeds, negative_pooled_embeds = self.get_embeddings(neg_prompt_ids, params) add_time_ids = self._get_add_time_ids( (height, width), (0, 0), (height, width), prompt_embeds.shape[0], dtype=prompt_embeds.dtype ) prompt_embeds = jnp.concatenate([neg_prompt_embeds, prompt_embeds], axis=0) # (2, 77, 2048) add_text_embeds = jnp.concatenate([negative_pooled_embeds, pooled_embeds], axis=0) add_time_ids = jnp.concatenate([add_time_ids, add_time_ids], axis=0) # Ensure model output will be `float32` before going into the scheduler guidance_scale = jnp.array([guidance_scale], dtype=jnp.float32) # Create random latents latents_shape = ( batch_size, self.unet.config.in_channels, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if latents is None: latents = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") # Prepare scheduler state scheduler_state = self.scheduler.set_timesteps( params["scheduler"], num_inference_steps=num_inference_steps, shape=latents.shape ) # scale the initial noise by the standard deviation required by the scheduler latents = latents * scheduler_state.init_noise_sigma added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} # Denoising loop def loop_body(step, args): latents, scheduler_state = args # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes latents_input = jnp.concatenate([latents] * 2) t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step] timestep = jnp.broadcast_to(t, latents_input.shape[0]) latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t) # predict the noise residual noise_pred = self.unet.apply( {"params": params["unet"]}, jnp.array(latents_input), jnp.array(timestep, dtype=jnp.int32), encoder_hidden_states=prompt_embeds, added_cond_kwargs=added_cond_kwargs, ).sample # perform guidance noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0) noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple() return latents, scheduler_state if DEBUG: # run with python for loop for i in range(num_inference_steps): latents, scheduler_state = loop_body(i, (latents, scheduler_state)) else: latents, _ = jax.lax.fori_loop(0, num_inference_steps, loop_body, (latents, scheduler_state)) if return_latents: return latents # Decode latents latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1) return image # Static argnums are pipe, num_inference_steps, height, width, return_latents. A change would trigger recompilation. # Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`). @partial( jax.pmap, in_axes=(None, 0, 0, 0, None, None, None, 0, 0, 0, None), static_broadcasted_argnums=(0, 4, 5, 6, 10), ) def _p_generate( pipe, prompt_ids, params, prng_seed, num_inference_steps, height, width, guidance_scale, latents, neg_prompt_ids, return_latents, ): return pipe._generate( prompt_ids, params, prng_seed, num_inference_steps, height, width, guidance_scale, latents, neg_prompt_ids, return_latents, )
diffusers/src/diffusers/pipelines/stable_diffusion_xl/pipeline_flax_stable_diffusion_xl.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_xl/pipeline_flax_stable_diffusion_xl.py", "repo_id": "diffusers", "token_count": 5263 }
182
import copy import inspect from dataclasses import dataclass from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from torch.nn.functional import grid_sample from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from ...image_processor import VaeImageProcessor from ...loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import ( USE_PEFT_BACKEND, BaseOutput, is_torch_xla_available, logging, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import empty_device_cache, randn_tensor from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionSafetyChecker if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name def rearrange_0(tensor, f): F, C, H, W = tensor.size() tensor = torch.permute(torch.reshape(tensor, (F // f, f, C, H, W)), (0, 2, 1, 3, 4)) return tensor def rearrange_1(tensor): B, C, F, H, W = tensor.size() return torch.reshape(torch.permute(tensor, (0, 2, 1, 3, 4)), (B * F, C, H, W)) def rearrange_3(tensor, f): F, D, C = tensor.size() return torch.reshape(tensor, (F // f, f, D, C)) def rearrange_4(tensor): B, F, D, C = tensor.size() return torch.reshape(tensor, (B * F, D, C)) class CrossFrameAttnProcessor: """ Cross frame attention processor. Each frame attends the first frame. Args: batch_size: The number that represents actual batch size, other than the frames. For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to 2, due to classifier-free guidance. """ def __init__(self, batch_size=2): self.batch_size = batch_size def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) is_cross_attention = encoder_hidden_states is not None if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) # Cross Frame Attention if not is_cross_attention: video_length = key.size()[0] // self.batch_size first_frame_index = [0] * video_length # rearrange keys to have batch and frames in the 1st and 2nd dims respectively key = rearrange_3(key, video_length) key = key[:, first_frame_index] # rearrange values to have batch and frames in the 1st and 2nd dims respectively value = rearrange_3(value, video_length) value = value[:, first_frame_index] # rearrange back to original shape key = rearrange_4(key) value = rearrange_4(value) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states class CrossFrameAttnProcessor2_0: """ Cross frame attention processor with scaled_dot_product attention of Pytorch 2.0. Args: batch_size: The number that represents actual batch size, other than the frames. For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to 2, due to classifier-free guidance. """ def __init__(self, batch_size=2): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") self.batch_size = batch_size def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) inner_dim = hidden_states.shape[-1] if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) query = attn.to_q(hidden_states) is_cross_attention = encoder_hidden_states is not None if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) # Cross Frame Attention if not is_cross_attention: video_length = max(1, key.size()[0] // self.batch_size) first_frame_index = [0] * video_length # rearrange keys to have batch and frames in the 1st and 2nd dims respectively key = rearrange_3(key, video_length) key = key[:, first_frame_index] # rearrange values to have batch and frames in the 1st and 2nd dims respectively value = rearrange_3(value, video_length) value = value[:, first_frame_index] # rearrange back to original shape key = rearrange_4(key) value = rearrange_4(value) head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states @dataclass class TextToVideoPipelineOutput(BaseOutput): r""" Output class for zero-shot text-to-video pipeline. Args: images (`[List[PIL.Image.Image]`, `np.ndarray`]): List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. nsfw_content_detected (`[List[bool]]`): List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or `None` if safety checking could not be performed. """ images: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]] def coords_grid(batch, ht, wd, device): # Adapted from https://github.com/princeton-vl/RAFT/blob/master/core/utils/utils.py coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) coords = torch.stack(coords[::-1], dim=0).float() return coords[None].repeat(batch, 1, 1, 1) def warp_single_latent(latent, reference_flow): """ Warp latent of a single frame with given flow Args: latent: latent code of a single frame reference_flow: flow which to warp the latent with Returns: warped: warped latent """ _, _, H, W = reference_flow.size() _, _, h, w = latent.size() coords0 = coords_grid(1, H, W, device=latent.device).to(latent.dtype) coords_t0 = coords0 + reference_flow coords_t0[:, 0] /= W coords_t0[:, 1] /= H coords_t0 = coords_t0 * 2.0 - 1.0 coords_t0 = F.interpolate(coords_t0, size=(h, w), mode="bilinear") coords_t0 = torch.permute(coords_t0, (0, 2, 3, 1)) warped = grid_sample(latent, coords_t0, mode="nearest", padding_mode="reflection") return warped def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): """ Create translation motion field Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference device: device dtype: dtype Returns: """ seq_length = len(frame_ids) reference_flow = torch.zeros((seq_length, 2, 512, 512), device=device, dtype=dtype) for fr_idx in range(seq_length): reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * (frame_ids[fr_idx]) reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * (frame_ids[fr_idx]) return reference_flow def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): """ Creates translation motion and warps the latents accordingly Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference latents: latent codes of frames Returns: warped_latents: warped latents """ motion_field = create_motion_field( motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, frame_ids=frame_ids, device=latents.device, dtype=latents.dtype, ) warped_latents = latents.clone().detach() for i in range(len(warped_latents)): warped_latents[i] = warp_single_latent(latents[i][None], motion_field[i][None]) return warped_latents class TextToVideoZeroPipeline( DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, FromSingleFileMixin, ): _last_supported_version = "0.33.1" r""" Pipeline for zero-shot text-to-video generation using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer (`CLIPTokenizer`): A [`~transformers.CLIPTokenizer`] to tokenize text. unet ([`UNet2DConditionModel`]): A [`UNet3DConditionModel`] to denoise the encoded video latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`CLIPImageProcessor`]): A [`CLIPImageProcessor`] to extract features from generated images; used as inputs to the `safety_checker`. """ def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) def forward_loop(self, x_t0, t0, t1, generator): """ Perform DDPM forward process from time t0 to t1. This is the same as adding noise with corresponding variance. Args: x_t0: Latent code at time t0. t0: Timestep at t0. t1: Timestamp at t1. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. Returns: x_t1: Forward process applied to x_t0 from time t0 to t1. """ eps = randn_tensor(x_t0.size(), generator=generator, dtype=x_t0.dtype, device=x_t0.device) alpha_vec = torch.prod(self.scheduler.alphas[t0:t1]) x_t1 = torch.sqrt(alpha_vec) * x_t0 + torch.sqrt(1 - alpha_vec) * eps return x_t1 def backward_loop( self, latents, timesteps, prompt_embeds, guidance_scale, callback, callback_steps, num_warmup_steps, extra_step_kwargs, cross_attention_kwargs=None, ): """ Perform backward process given list of time steps. Args: latents: Latents at time timesteps[0]. timesteps: Time steps along which to perform backward process. prompt_embeds: Pre-generated text embeddings. guidance_scale: A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. extra_step_kwargs: Extra_step_kwargs. cross_attention_kwargs: A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). num_warmup_steps: number of warmup steps. Returns: latents: Latents of backward process output at time timesteps[-1]. """ do_classifier_free_guidance = guidance_scale > 1.0 num_steps = (len(timesteps) - num_warmup_steps) // self.scheduler.order with self.progress_bar(total=num_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, ).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() return latents.clone().detach() # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs def check_inputs( self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], video_length: Optional[int] = 8, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, motion_field_strength_x: float = 12, motion_field_strength_y: float = 12, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: Optional[int] = 1, t0: int = 44, t1: int = 47, frame_ids: Optional[List[int]] = None, ): """ The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. video_length (`int`, *optional*, defaults to 8): The number of generated video frames. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in video generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of videos to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"np"`): The output format of the generated video. Choose between `"latent"` and `"np"`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. motion_field_strength_x (`float`, *optional*, defaults to 12): Strength of motion in generated video along x-axis. See the [paper](https://huggingface.co/papers/2303.13439), Sect. 3.3.1. motion_field_strength_y (`float`, *optional*, defaults to 12): Strength of motion in generated video along y-axis. See the [paper](https://huggingface.co/papers/2303.13439), Sect. 3.3.1. t0 (`int`, *optional*, defaults to 44): Timestep t0. Should be in the range [0, num_inference_steps - 1]. See the [paper](https://huggingface.co/papers/2303.13439), Sect. 3.3.1. t1 (`int`, *optional*, defaults to 47): Timestep t0. Should be in the range [t0 + 1, num_inference_steps - 1]. See the [paper](https://huggingface.co/papers/2303.13439), Sect. 3.3.1. frame_ids (`List[int]`, *optional*): Indexes of the frames that are being generated. This is used when generating longer videos chunk-by-chunk. Returns: [`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoPipelineOutput`]: The output contains a `ndarray` of the generated video, when `output_type` != `"latent"`, otherwise a latent code of generated videos and a list of `bool`s indicating whether the corresponding generated video contains "not-safe-for-work" (nsfw) content.. """ assert video_length > 0 if frame_ids is None: frame_ids = list(range(video_length)) assert len(frame_ids) == video_length assert num_videos_per_prompt == 1 # set the processor original_attn_proc = self.unet.attn_processors processor = ( CrossFrameAttnProcessor2_0(batch_size=2) if hasattr(F, "scaled_dot_product_attention") else CrossFrameAttnProcessor(batch_size=2) ) self.unet.set_attn_processor(processor) if isinstance(prompt, str): prompt = [prompt] if isinstance(negative_prompt, str): negative_prompt = [negative_prompt] # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters batch_size = 1 if isinstance(prompt, str) else len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt_embeds_tuple = self.encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order # Perform the first backward process up to time T_1 x_1_t1 = self.backward_loop( timesteps=timesteps[: -t1 - 1], prompt_embeds=prompt_embeds, latents=latents, guidance_scale=guidance_scale, callback=callback, callback_steps=callback_steps, extra_step_kwargs=extra_step_kwargs, num_warmup_steps=num_warmup_steps, ) scheduler_copy = copy.deepcopy(self.scheduler) # Perform the second backward process up to time T_0 x_1_t0 = self.backward_loop( timesteps=timesteps[-t1 - 1 : -t0 - 1], prompt_embeds=prompt_embeds, latents=x_1_t1, guidance_scale=guidance_scale, callback=callback, callback_steps=callback_steps, extra_step_kwargs=extra_step_kwargs, num_warmup_steps=0, ) # Propagate first frame latents at time T_0 to remaining frames x_2k_t0 = x_1_t0.repeat(video_length - 1, 1, 1, 1) # Add motion in latents at time T_0 x_2k_t0 = create_motion_field_and_warp_latents( motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, latents=x_2k_t0, frame_ids=frame_ids[1:], ) # Perform forward process up to time T_1 x_2k_t1 = self.forward_loop( x_t0=x_2k_t0, t0=timesteps[-t0 - 1].item(), t1=timesteps[-t1 - 1].item(), generator=generator, ) # Perform backward process from time T_1 to 0 x_1k_t1 = torch.cat([x_1_t1, x_2k_t1]) b, l, d = prompt_embeds.size() prompt_embeds = prompt_embeds[:, None].repeat(1, video_length, 1, 1).reshape(b * video_length, l, d) self.scheduler = scheduler_copy x_1k_0 = self.backward_loop( timesteps=timesteps[-t1 - 1 :], prompt_embeds=prompt_embeds, latents=x_1k_t1, guidance_scale=guidance_scale, callback=callback, callback_steps=callback_steps, extra_step_kwargs=extra_step_kwargs, num_warmup_steps=0, ) latents = x_1k_0 # manually for max memory savings if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.unet.to("cpu") empty_device_cache() if output_type == "latent": image = latents has_nsfw_concept = None else: image = self.decode_latents(latents) # Run safety checker image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) # Offload all models self.maybe_free_model_hooks() # make sure to set the original attention processors back self.unet.set_attn_processor(original_attn_proc) if not return_dict: return (image, has_nsfw_concept) return TextToVideoPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image
diffusers/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py", "repo_id": "diffusers", "token_count": 20339 }
183
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Adapted from https://github.com/huggingface/transformers/blob/52cb4034ada381fe1ffe8d428a1076e5411a8026/src/transformers/quantizers/base.py """ from abc import ABC, abstractmethod from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union from ..utils import is_torch_available from .quantization_config import QuantizationConfigMixin if TYPE_CHECKING: from ..models.modeling_utils import ModelMixin if is_torch_available(): import torch class DiffusersQuantizer(ABC): """ Abstract class of the HuggingFace quantizer. Supports for now quantizing HF diffusers models for inference and/or quantization. This class is used only for diffusers.models.modeling_utils.ModelMixin.from_pretrained and cannot be easily used outside the scope of that method yet. Attributes quantization_config (`diffusers.quantizers.quantization_config.QuantizationConfigMixin`): The quantization config that defines the quantization parameters of your model that you want to quantize. modules_to_not_convert (`List[str]`, *optional*): The list of module names to not convert when quantizing the model. required_packages (`List[str]`, *optional*): The list of required pip packages to install prior to using the quantizer requires_calibration (`bool`): Whether the quantization method requires to calibrate the model before using it. """ requires_calibration = False required_packages = None def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs): self.quantization_config = quantization_config # -- Handle extra kwargs below -- self.modules_to_not_convert = kwargs.pop("modules_to_not_convert", []) self.pre_quantized = kwargs.pop("pre_quantized", True) if not self.pre_quantized and self.requires_calibration: raise ValueError( f"The quantization method {quantization_config.quant_method} does require the model to be pre-quantized." f" You explicitly passed `pre_quantized=False` meaning your model weights are not quantized. Make sure to " f"pass `pre_quantized=True` while knowing what you are doing." ) def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": """ Some quantization methods require to explicitly set the dtype of the model to a target dtype. You need to override this method in case you want to make sure that behavior is preserved Args: torch_dtype (`torch.dtype`): The input dtype that is passed in `from_pretrained` """ return torch_dtype def update_device_map(self, device_map: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]: """ Override this method if you want to pass a override the existing device map with a new one. E.g. for bitsandbytes, since `accelerate` is a hard requirement, if no device_map is passed, the device_map is set to `"auto"`` Args: device_map (`Union[dict, str]`, *optional*): The device_map that is passed through the `from_pretrained` method. """ return device_map def adjust_target_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": """ Override this method if you want to adjust the `target_dtype` variable used in `from_pretrained` to compute the device_map in case the device_map is a `str`. E.g. for bitsandbytes we force-set `target_dtype` to `torch.int8` and for 4-bit we pass a custom enum `accelerate.CustomDtype.int4`. Args: torch_dtype (`torch.dtype`, *optional*): The torch_dtype that is used to compute the device_map. """ return torch_dtype def update_missing_keys(self, model, missing_keys: List[str], prefix: str) -> List[str]: """ Override this method if you want to adjust the `missing_keys`. Args: missing_keys (`List[str]`, *optional*): The list of missing keys in the checkpoint compared to the state dict of the model """ return missing_keys def get_special_dtypes_update(self, model, torch_dtype: "torch.dtype") -> Dict[str, "torch.dtype"]: """ returns dtypes for modules that are not quantized - used for the computation of the device_map in case one passes a str as a device_map. The method will use the `modules_to_not_convert` that is modified in `_process_model_before_weight_loading`. `diffusers` models don't have any `modules_to_not_convert` attributes yet but this can change soon in the future. Args: model (`~diffusers.models.modeling_utils.ModelMixin`): The model to quantize torch_dtype (`torch.dtype`): The dtype passed in `from_pretrained` method. """ return { name: torch_dtype for name, _ in model.named_parameters() if any(m in name for m in self.modules_to_not_convert) } def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]: """adjust max_memory argument for infer_auto_device_map() if extra memory is needed for quantization""" return max_memory def check_if_quantized_param( self, model: "ModelMixin", param_value: "torch.Tensor", param_name: str, state_dict: Dict[str, Any], **kwargs, ) -> bool: """ checks if a loaded state_dict component is part of quantized param + some validation; only defined for quantization methods that require to create a new parameters for quantization. """ return False def create_quantized_param(self, *args, **kwargs) -> "torch.nn.Parameter": """ takes needed components from state_dict and creates quantized param. """ return def check_quantized_param_shape(self, *args, **kwargs): """ checks if the quantized param has expected shape. """ return True def validate_environment(self, *args, **kwargs): """ This method is used to potentially check for potential conflicts with arguments that are passed in `from_pretrained`. You need to define it for all future quantizers that are integrated with diffusers. If no explicit check are needed, simply return nothing. """ return def preprocess_model(self, model: "ModelMixin", **kwargs): """ Setting model attributes and/or converting model before weights loading. At this point the model should be initialized on the meta device so you can freely manipulate the skeleton of the model in order to replace modules in-place. Make sure to override the abstract method `_process_model_before_weight_loading`. Args: model (`~diffusers.models.modeling_utils.ModelMixin`): The model to quantize kwargs (`dict`, *optional*): The keyword arguments that are passed along `_process_model_before_weight_loading`. """ model.is_quantized = True model.quantization_method = self.quantization_config.quant_method return self._process_model_before_weight_loading(model, **kwargs) def postprocess_model(self, model: "ModelMixin", **kwargs): """ Post-process the model post weights loading. Make sure to override the abstract method `_process_model_after_weight_loading`. Args: model (`~diffusers.models.modeling_utils.ModelMixin`): The model to quantize kwargs (`dict`, *optional*): The keyword arguments that are passed along `_process_model_after_weight_loading`. """ return self._process_model_after_weight_loading(model, **kwargs) def dequantize(self, model): """ Potentially dequantize the model to retrieve the original model, with some loss in accuracy / performance. Note not all quantization schemes support this. """ model = self._dequantize(model) # Delete quantizer and quantization config del model.hf_quantizer return model def get_cuda_warm_up_factor(self): """ The factor to be used in `caching_allocator_warmup` to get the number of bytes to pre-allocate to warm up cuda. A factor of 2 means we allocate all bytes in the empty model (since we allocate in fp16), a factor of 4 means we allocate half the memory of the weights residing in the empty model, etc... """ # By default we return 4, i.e. half the model size (this corresponds to the case where the model is not # really pre-processed, i.e. we do not have the info that weights are going to be 8 bits before actual # weight loading) return 4 def _dequantize(self, model): raise NotImplementedError( f"{self.quantization_config.quant_method} has no implementation of `dequantize`, please raise an issue on GitHub." ) @abstractmethod def _process_model_before_weight_loading(self, model, **kwargs): ... @abstractmethod def _process_model_after_weight_loading(self, model, **kwargs): ... @property @abstractmethod def is_serializable(self): ... @property @abstractmethod def is_trainable(self): ... @property def is_compileable(self) -> bool: """Flag indicating whether the quantized model can be compiled""" return False
diffusers/src/diffusers/quantizers/base.py/0
{ "file_path": "diffusers/src/diffusers/quantizers/base.py", "repo_id": "diffusers", "token_count": 3858 }
184
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_pt_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_pt_objects)) else: _import_structure["scheduling_karras_ve"] = ["KarrasVeScheduler"] _import_structure["scheduling_sde_vp"] = ["ScoreSdeVpScheduler"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/schedulers/deprecated/__init__.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/deprecated/__init__.py", "repo_id": "diffusers", "token_count": 555 }
185
# Copyright 2025 Zhejiang University Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class IPNDMScheduler(SchedulerMixin, ConfigMixin): """ A fourth-order Improved Pseudo Linear Multistep scheduler. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. Args: num_train_timesteps (`int`, defaults to 1000): The number of diffusion steps to train the model. trained_betas (`np.ndarray`, *optional*): Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. """ order = 1 @register_to_config def __init__( self, num_train_timesteps: int = 1000, trained_betas: Optional[Union[np.ndarray, List[float]]] = None ): # set `betas`, `alphas`, `timesteps` self.set_timesteps(num_train_timesteps) # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://huggingface.co/papers/2202.09778 # mainly at formula (9), (12), (13) and the Algorithm 2. self.pndm_order = 4 # running values self.ets = [] self._step_index = None self._begin_index = None @property def step_index(self): """ The index counter for current timestep. It will increase 1 after each scheduler step. """ return self._step_index @property def begin_index(self): """ The index for the first timestep. It should be set from pipeline with `set_begin_index` method. """ return self._begin_index # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index def set_begin_index(self, begin_index: int = 0): """ Sets the begin index for the scheduler. This function should be run from pipeline before the inference. Args: begin_index (`int`): The begin index for the scheduler. """ self._begin_index = begin_index def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. """ self.num_inference_steps = num_inference_steps steps = torch.linspace(1, 0, num_inference_steps + 1)[:-1] steps = torch.cat([steps, torch.tensor([0.0])]) if self.config.trained_betas is not None: self.betas = torch.tensor(self.config.trained_betas, dtype=torch.float32) else: self.betas = torch.sin(steps * math.pi / 2) ** 2 self.alphas = (1.0 - self.betas**2) ** 0.5 timesteps = (torch.atan2(self.betas, self.alphas) / math.pi * 2)[:-1] self.timesteps = timesteps.to(device) self.ets = [] self._step_index = None self._begin_index = None # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps indices = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) pos = 1 if len(indices) > 1 else 0 return indices[pos].item() # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def step( self, model_output: torch.Tensor, timestep: Union[int, torch.Tensor], sample: torch.Tensor, return_dict: bool = True, ) -> Union[SchedulerOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with the linear multistep method. It performs one forward pass multiple times to approximate the solution. Args: model_output (`torch.Tensor`): The direct output from learned diffusion model. timestep (`int`): The current discrete timestep in the diffusion chain. sample (`torch.Tensor`): A current instance of a sample created by the diffusion process. return_dict (`bool`): Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or tuple. Returns: [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) if self.step_index is None: self._init_step_index(timestep) timestep_index = self.step_index prev_timestep_index = self.step_index + 1 ets = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(ets) if len(self.ets) == 1: ets = self.ets[-1] elif len(self.ets) == 2: ets = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets) == 3: ets = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: ets = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) prev_sample = self._get_prev_sample(sample, timestep_index, prev_timestep_index, ets) # upon completion increase step index by one self._step_index += 1 if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def scale_model_input(self, sample: torch.Tensor, *args, **kwargs) -> torch.Tensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.Tensor`): The input sample. Returns: `torch.Tensor`: A scaled input sample. """ return sample def _get_prev_sample(self, sample, timestep_index, prev_timestep_index, ets): alpha = self.alphas[timestep_index] sigma = self.betas[timestep_index] next_alpha = self.alphas[prev_timestep_index] next_sigma = self.betas[prev_timestep_index] pred = (sample - sigma * ets) / max(alpha, 1e-8) prev_sample = next_alpha * pred + ets * next_sigma return prev_sample def __len__(self): return self.config.num_train_timesteps
diffusers/src/diffusers/schedulers/scheduling_ipndm.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_ipndm.py", "repo_id": "diffusers", "token_count": 3641 }
186
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class QuantoConfig(metaclass=DummyObject): _backends = ["optimum_quanto"] def __init__(self, *args, **kwargs): requires_backends(self, ["optimum_quanto"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["optimum_quanto"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["optimum_quanto"])
diffusers/src/diffusers/utils/dummy_optimum_quanto_objects.py/0
{ "file_path": "diffusers/src/diffusers/utils/dummy_optimum_quanto_objects.py", "repo_id": "diffusers", "token_count": 208 }
187
import os import tempfile from typing import Any, Callable, List, Optional, Tuple, Union from urllib.parse import unquote, urlparse import PIL.Image import PIL.ImageOps import requests from .constants import DIFFUSERS_REQUEST_TIMEOUT from .import_utils import BACKENDS_MAPPING, is_imageio_available def load_image( image: Union[str, PIL.Image.Image], convert_method: Optional[Callable[[PIL.Image.Image], PIL.Image.Image]] = None ) -> PIL.Image.Image: """ Loads `image` to a PIL Image. Args: image (`str` or `PIL.Image.Image`): The image to convert to the PIL Image format. convert_method (Callable[[PIL.Image.Image], PIL.Image.Image], *optional*): A conversion method to apply to the image after loading it. When set to `None` the image will be converted "RGB". Returns: `PIL.Image.Image`: A PIL Image. """ if isinstance(image, str): if image.startswith("http://") or image.startswith("https://"): image = PIL.Image.open(requests.get(image, stream=True, timeout=DIFFUSERS_REQUEST_TIMEOUT).raw) elif os.path.isfile(image): image = PIL.Image.open(image) else: raise ValueError( f"Incorrect path or URL. URLs must start with `http://` or `https://`, and {image} is not a valid path." ) elif isinstance(image, PIL.Image.Image): image = image else: raise ValueError( "Incorrect format used for the image. Should be a URL linking to an image, a local path, or a PIL image." ) image = PIL.ImageOps.exif_transpose(image) if convert_method is not None: image = convert_method(image) else: image = image.convert("RGB") return image def load_video( video: str, convert_method: Optional[Callable[[List[PIL.Image.Image]], List[PIL.Image.Image]]] = None, ) -> List[PIL.Image.Image]: """ Loads `video` to a list of PIL Image. Args: video (`str`): A URL or Path to a video to convert to a list of PIL Image format. convert_method (Callable[[List[PIL.Image.Image]], List[PIL.Image.Image]], *optional*): A conversion method to apply to the video after loading it. When set to `None` the images will be converted to "RGB". Returns: `List[PIL.Image.Image]`: The video as a list of PIL images. """ is_url = video.startswith("http://") or video.startswith("https://") is_file = os.path.isfile(video) was_tempfile_created = False if not (is_url or is_file): raise ValueError( f"Incorrect path or URL. URLs must start with `http://` or `https://`, and {video} is not a valid path." ) if is_url: response = requests.get(video, stream=True) if response.status_code != 200: raise ValueError(f"Failed to download video. Status code: {response.status_code}") parsed_url = urlparse(video) file_name = os.path.basename(unquote(parsed_url.path)) suffix = os.path.splitext(file_name)[1] or ".mp4" video_path = tempfile.NamedTemporaryFile(suffix=suffix, delete=False).name was_tempfile_created = True video_data = response.iter_content(chunk_size=8192) with open(video_path, "wb") as f: for chunk in video_data: f.write(chunk) video = video_path pil_images = [] if video.endswith(".gif"): gif = PIL.Image.open(video) try: while True: pil_images.append(gif.copy()) gif.seek(gif.tell() + 1) except EOFError: pass else: if is_imageio_available(): import imageio else: raise ImportError(BACKENDS_MAPPING["imageio"][1].format("load_video")) try: imageio.plugins.ffmpeg.get_exe() except AttributeError: raise AttributeError( "`Unable to find an ffmpeg installation on your machine. Please install via `pip install imageio-ffmpeg" ) with imageio.get_reader(video) as reader: # Read all frames for frame in reader: pil_images.append(PIL.Image.fromarray(frame)) if was_tempfile_created: os.remove(video_path) if convert_method is not None: pil_images = convert_method(pil_images) return pil_images # Taken from `transformers`. def get_module_from_name(module, tensor_name: str) -> Tuple[Any, str]: if "." in tensor_name: splits = tensor_name.split(".") for split in splits[:-1]: new_module = getattr(module, split) if new_module is None: raise ValueError(f"{module} has no attribute {split}.") module = new_module tensor_name = splits[-1] return module, tensor_name def get_submodule_by_name(root_module, module_path: str): current = root_module parts = module_path.split(".") for part in parts: if part.isdigit(): idx = int(part) current = current[idx] # e.g., for nn.ModuleList or nn.Sequential else: current = getattr(current, part) return current
diffusers/src/diffusers/utils/loading_utils.py/0
{ "file_path": "diffusers/src/diffusers/utils/loading_utils.py", "repo_id": "diffusers", "token_count": 2331 }
188
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput, SchedulerMixin, UNet2DModel class CustomLocalPipeline(DiffusionPipeline): r""" This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Parameters: unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of [`DDPMScheduler`], or [`DDIMScheduler`]. """ def __init__(self, unet: UNet2DModel, scheduler: SchedulerMixin): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, batch_size: int = 1, generator: Optional[torch.Generator] = None, num_inference_steps: int = 50, output_type: Optional[str] = "pil", return_dict: bool = True, **kwargs, ) -> Union[ImagePipelineOutput, Tuple]: r""" Args: batch_size (`int`, *optional*, defaults to 1): The number of images to generate. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. eta (`float`, *optional*, defaults to 0.0): The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM). num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images. """ # Sample gaussian noise to begin loop image = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=generator, ) image = image.to(self.device) # set step values self.scheduler.set_timesteps(num_inference_steps) for t in self.progress_bar(self.scheduler.timesteps): # 1. predict noise model_output model_output = self.unet(image, t).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 image = self.scheduler.step(model_output, t, image).prev_sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=image), "This is a local test"
diffusers/tests/fixtures/custom_pipeline/pipeline.py/0
{ "file_path": "diffusers/tests/fixtures/custom_pipeline/pipeline.py", "repo_id": "diffusers", "token_count": 1767 }
189
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import sys import unittest import numpy as np import torch from transformers import AutoTokenizer, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel from diffusers import ( FlowMatchEulerDiscreteScheduler, SD3Transformer2DModel, StableDiffusion3Img2ImgPipeline, StableDiffusion3Pipeline, ) from diffusers.utils import load_image from diffusers.utils.import_utils import is_accelerate_available from diffusers.utils.testing_utils import ( backend_empty_cache, is_flaky, nightly, numpy_cosine_similarity_distance, require_big_accelerator, require_peft_backend, require_torch_accelerator, torch_device, ) sys.path.append(".") from utils import PeftLoraLoaderMixinTests # noqa: E402 if is_accelerate_available(): from accelerate.utils import release_memory @require_peft_backend class SD3LoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): pipeline_class = StableDiffusion3Pipeline scheduler_cls = FlowMatchEulerDiscreteScheduler scheduler_kwargs = {} scheduler_classes = [FlowMatchEulerDiscreteScheduler] transformer_kwargs = { "sample_size": 32, "patch_size": 1, "in_channels": 4, "num_layers": 1, "attention_head_dim": 8, "num_attention_heads": 4, "caption_projection_dim": 32, "joint_attention_dim": 32, "pooled_projection_dim": 64, "out_channels": 4, } transformer_cls = SD3Transformer2DModel vae_kwargs = { "sample_size": 32, "in_channels": 3, "out_channels": 3, "block_out_channels": (4,), "layers_per_block": 1, "latent_channels": 4, "norm_num_groups": 1, "use_quant_conv": False, "use_post_quant_conv": False, "shift_factor": 0.0609, "scaling_factor": 1.5035, } has_three_text_encoders = True tokenizer_cls, tokenizer_id = CLIPTokenizer, "hf-internal-testing/tiny-random-clip" tokenizer_2_cls, tokenizer_2_id = CLIPTokenizer, "hf-internal-testing/tiny-random-clip" tokenizer_3_cls, tokenizer_3_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5" text_encoder_cls, text_encoder_id = CLIPTextModelWithProjection, "hf-internal-testing/tiny-sd3-text_encoder" text_encoder_2_cls, text_encoder_2_id = CLIPTextModelWithProjection, "hf-internal-testing/tiny-sd3-text_encoder-2" text_encoder_3_cls, text_encoder_3_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5" @property def output_shape(self): return (1, 32, 32, 3) @require_torch_accelerator def test_sd3_lora(self): """ Test loading the loras that are saved with the diffusers and peft formats. Related PR: https://github.com/huggingface/diffusers/pull/8584 """ components = self.get_dummy_components() pipe = self.pipeline_class(**components[0]) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) lora_model_id = "hf-internal-testing/tiny-sd3-loras" lora_filename = "lora_diffusers_format.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) pipe.unload_lora_weights() lora_filename = "lora_peft_format.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) @unittest.skip("Not supported in SD3.") def test_simple_inference_with_text_denoiser_block_scale(self): pass @unittest.skip("Not supported in SD3.") def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): pass @unittest.skip("Not supported in SD3.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass @unittest.skip("Not supported in SD3.") def test_modify_padding_mode(self): pass @is_flaky def test_multiple_wrong_adapter_name_raises_error(self): super().test_multiple_wrong_adapter_name_raises_error() @nightly @require_torch_accelerator @require_peft_backend @require_big_accelerator class SD3LoraIntegrationTests(unittest.TestCase): pipeline_class = StableDiffusion3Img2ImgPipeline repo_id = "stabilityai/stable-diffusion-3-medium-diffusers" def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, seed=0): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) return { "prompt": "corgi", "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "np", "generator": generator, "image": init_image, } def test_sd3_img2img_lora(self): pipe = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.float16) pipe.load_lora_weights("zwloong/sd3-lora-training-rank16-v2") pipe.fuse_lora() pipe.unload_lora_weights() pipe = pipe.to(torch_device) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images[0] image_slice = image[0, -3:, -3:] expected_slice = np.array([0.5649, 0.5405, 0.5488, 0.5688, 0.5449, 0.5513, 0.5337, 0.5107, 0.5059]) max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten()) assert max_diff < 1e-4, f"Outputs are not close enough, got {max_diff}" pipe.unload_lora_weights() release_memory(pipe)
diffusers/tests/lora/test_lora_layers_sd3.py/0
{ "file_path": "diffusers/tests/lora/test_lora_layers_sd3.py", "repo_id": "diffusers", "token_count": 2798 }
190
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from diffusers import AutoencoderKLMochi from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class AutoencoderKLMochiTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = AutoencoderKLMochi main_input_name = "sample" base_precision = 1e-2 def get_autoencoder_kl_mochi_config(self): return { "in_channels": 15, "out_channels": 3, "latent_channels": 4, "encoder_block_out_channels": (32, 32, 32, 32), "decoder_block_out_channels": (32, 32, 32, 32), "layers_per_block": (1, 1, 1, 1, 1), "act_fn": "silu", "scaling_factor": 1, } @property def dummy_input(self): batch_size = 2 num_frames = 7 num_channels = 3 sizes = (16, 16) image = floats_tensor((batch_size, num_channels, num_frames) + sizes).to(torch_device) return {"sample": image} @property def input_shape(self): return (3, 7, 16, 16) @property def output_shape(self): return (3, 7, 16, 16) def prepare_init_args_and_inputs_for_common(self): init_dict = self.get_autoencoder_kl_mochi_config() inputs_dict = self.dummy_input return init_dict, inputs_dict def test_gradient_checkpointing_is_applied(self): expected_set = { "MochiDecoder3D", "MochiDownBlock3D", "MochiEncoder3D", "MochiMidBlock3D", "MochiUpBlock3D", } super().test_gradient_checkpointing_is_applied(expected_set=expected_set) @unittest.skip("Unsupported test.") def test_forward_with_norm_groups(self): """ tests/models/autoencoders/test_models_autoencoder_mochi.py::AutoencoderKLMochiTests::test_forward_with_norm_groups - TypeError: AutoencoderKLMochi.__init__() got an unexpected keyword argument 'norm_num_groups' """ pass @unittest.skip("Unsupported test.") def test_model_parallelism(self): """ tests/models/autoencoders/test_models_autoencoder_mochi.py::AutoencoderKLMochiTests::test_outputs_equivalence - RuntimeError: values expected sparse tensor layout but got Strided """ pass @unittest.skip("Unsupported test.") def test_outputs_equivalence(self): """ tests/models/autoencoders/test_models_autoencoder_mochi.py::AutoencoderKLMochiTests::test_outputs_equivalence - RuntimeError: values expected sparse tensor layout but got Strided """ pass @unittest.skip("Unsupported test.") def test_sharded_checkpoints_device_map(self): """ tests/models/autoencoders/test_models_autoencoder_mochi.py::AutoencoderKLMochiTests::test_sharded_checkpoints_device_map - RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cuda:5! """
diffusers/tests/models/autoencoders/test_models_autoencoder_mochi.py/0
{ "file_path": "diffusers/tests/models/autoencoders/test_models_autoencoder_mochi.py", "repo_id": "diffusers", "token_count": 1581 }
191
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import PixArtTransformer2DModel, Transformer2DModel from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, slow, torch_device, ) from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class PixArtTransformer2DModelTests(ModelTesterMixin, unittest.TestCase): model_class = PixArtTransformer2DModel main_input_name = "hidden_states" # We override the items here because the transformer under consideration is small. model_split_percents = [0.7, 0.6, 0.6] @property def dummy_input(self): batch_size = 4 in_channels = 4 sample_size = 8 scheduler_num_train_steps = 1000 cross_attention_dim = 8 seq_len = 8 hidden_states = floats_tensor((batch_size, in_channels, sample_size, sample_size)).to(torch_device) timesteps = torch.randint(0, scheduler_num_train_steps, size=(batch_size,)).to(torch_device) encoder_hidden_states = floats_tensor((batch_size, seq_len, cross_attention_dim)).to(torch_device) return { "hidden_states": hidden_states, "timestep": timesteps, "encoder_hidden_states": encoder_hidden_states, "added_cond_kwargs": {"aspect_ratio": None, "resolution": None}, } @property def input_shape(self): return (4, 8, 8) @property def output_shape(self): return (8, 8, 8) def prepare_init_args_and_inputs_for_common(self): init_dict = { "sample_size": 8, "num_layers": 1, "patch_size": 2, "attention_head_dim": 2, "num_attention_heads": 2, "in_channels": 4, "cross_attention_dim": 8, "out_channels": 8, "attention_bias": True, "activation_fn": "gelu-approximate", "num_embeds_ada_norm": 8, "norm_type": "ada_norm_single", "norm_elementwise_affine": False, "norm_eps": 1e-6, "use_additional_conditions": False, "caption_channels": None, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_output(self): super().test_output( expected_output_shape=(self.dummy_input[self.main_input_name].shape[0],) + self.output_shape ) def test_gradient_checkpointing_is_applied(self): expected_set = {"PixArtTransformer2DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set) def test_correct_class_remapping_from_dict_config(self): init_dict, _ = self.prepare_init_args_and_inputs_for_common() model = Transformer2DModel.from_config(init_dict) assert isinstance(model, PixArtTransformer2DModel) def test_correct_class_remapping_from_pretrained_config(self): config = PixArtTransformer2DModel.load_config("PixArt-alpha/PixArt-XL-2-1024-MS", subfolder="transformer") model = Transformer2DModel.from_config(config) assert isinstance(model, PixArtTransformer2DModel) @slow def test_correct_class_remapping(self): model = Transformer2DModel.from_pretrained("PixArt-alpha/PixArt-XL-2-1024-MS", subfolder="transformer") assert isinstance(model, PixArtTransformer2DModel)
diffusers/tests/models/transformers/test_models_pixart_transformer2d.py/0
{ "file_path": "diffusers/tests/models/transformers/test_models_pixart_transformer2d.py", "repo_id": "diffusers", "token_count": 1656 }
192
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import HunyuanVideoFramepackTransformer3DModel from diffusers.utils.testing_utils import ( enable_full_determinism, torch_device, ) from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class HunyuanVideoTransformer3DTests(ModelTesterMixin, unittest.TestCase): model_class = HunyuanVideoFramepackTransformer3DModel main_input_name = "hidden_states" uses_custom_attn_processor = True model_split_percents = [0.5, 0.7, 0.9] @property def dummy_input(self): batch_size = 1 num_channels = 4 num_frames = 3 height = 4 width = 4 text_encoder_embedding_dim = 16 image_encoder_embedding_dim = 16 pooled_projection_dim = 8 sequence_length = 12 hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, text_encoder_embedding_dim)).to(torch_device) pooled_projections = torch.randn((batch_size, pooled_projection_dim)).to(torch_device) encoder_attention_mask = torch.ones((batch_size, sequence_length)).to(torch_device) image_embeds = torch.randn((batch_size, sequence_length, image_encoder_embedding_dim)).to(torch_device) indices_latents = torch.ones((3,)).to(torch_device) latents_clean = torch.randn((batch_size, num_channels, num_frames - 1, height, width)).to(torch_device) indices_latents_clean = torch.ones((num_frames - 1,)).to(torch_device) latents_history_2x = torch.randn((batch_size, num_channels, num_frames - 1, height, width)).to(torch_device) indices_latents_history_2x = torch.ones((num_frames - 1,)).to(torch_device) latents_history_4x = torch.randn((batch_size, num_channels, (num_frames - 1) * 4, height, width)).to( torch_device ) indices_latents_history_4x = torch.ones(((num_frames - 1) * 4,)).to(torch_device) timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) guidance = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) return { "hidden_states": hidden_states, "timestep": timestep, "encoder_hidden_states": encoder_hidden_states, "pooled_projections": pooled_projections, "encoder_attention_mask": encoder_attention_mask, "guidance": guidance, "image_embeds": image_embeds, "indices_latents": indices_latents, "latents_clean": latents_clean, "indices_latents_clean": indices_latents_clean, "latents_history_2x": latents_history_2x, "indices_latents_history_2x": indices_latents_history_2x, "latents_history_4x": latents_history_4x, "indices_latents_history_4x": indices_latents_history_4x, } @property def input_shape(self): return (4, 3, 4, 4) @property def output_shape(self): return (4, 3, 4, 4) def prepare_init_args_and_inputs_for_common(self): init_dict = { "in_channels": 4, "out_channels": 4, "num_attention_heads": 2, "attention_head_dim": 10, "num_layers": 1, "num_single_layers": 1, "num_refiner_layers": 1, "patch_size": 2, "patch_size_t": 1, "guidance_embeds": True, "text_embed_dim": 16, "pooled_projection_dim": 8, "rope_axes_dim": (2, 4, 4), "image_condition_type": None, "has_image_proj": True, "image_proj_dim": 16, "has_clean_x_embedder": True, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_gradient_checkpointing_is_applied(self): expected_set = {"HunyuanVideoFramepackTransformer3DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
diffusers/tests/models/transformers/test_models_transformer_hunyuan_video_framepack.py/0
{ "file_path": "diffusers/tests/models/transformers/test_models_transformer_hunyuan_video_framepack.py", "repo_id": "diffusers", "token_count": 2028 }
193
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import gc import os import tempfile import unittest from collections import OrderedDict import torch from huggingface_hub import snapshot_download from parameterized import parameterized from pytest import mark from diffusers import UNet2DConditionModel from diffusers.models.attention_processor import ( CustomDiffusionAttnProcessor, IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0, ) from diffusers.models.embeddings import ImageProjection, IPAdapterFaceIDImageProjection, IPAdapterPlusImageProjection from diffusers.utils import logging from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_max_memory_allocated, backend_reset_peak_memory_stats, enable_full_determinism, floats_tensor, is_peft_available, load_hf_numpy, require_peft_backend, require_torch_accelerator, require_torch_accelerator_with_fp16, skip_mps, slow, torch_all_close, torch_device, ) from ..test_modeling_common import ( LoraHotSwappingForModelTesterMixin, ModelTesterMixin, TorchCompileTesterMixin, UNetTesterMixin, ) if is_peft_available(): from peft import LoraConfig from peft.tuners.tuners_utils import BaseTunerLayer logger = logging.get_logger(__name__) enable_full_determinism() def get_unet_lora_config(): rank = 4 unet_lora_config = LoraConfig( r=rank, lora_alpha=rank, target_modules=["to_q", "to_k", "to_v", "to_out.0"], init_lora_weights=False, use_dora=False, ) return unet_lora_config def check_if_lora_correctly_set(model) -> bool: """ Checks if the LoRA layers are correctly set with peft """ for module in model.modules(): if isinstance(module, BaseTunerLayer): return True return False def create_ip_adapter_state_dict(model): # "ip_adapter" (cross-attention weights) ip_cross_attn_state_dict = {} key_id = 1 for name in model.attn_processors.keys(): cross_attention_dim = ( None if name.endswith("attn1.processor") or "motion_module" in name else model.config.cross_attention_dim ) if name.startswith("mid_block"): hidden_size = model.config.block_out_channels[-1] elif name.startswith("up_blocks"): block_id = int(name[len("up_blocks.")]) hidden_size = list(reversed(model.config.block_out_channels))[block_id] elif name.startswith("down_blocks"): block_id = int(name[len("down_blocks.")]) hidden_size = model.config.block_out_channels[block_id] if cross_attention_dim is not None: sd = IPAdapterAttnProcessor( hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, scale=1.0 ).state_dict() ip_cross_attn_state_dict.update( { f"{key_id}.to_k_ip.weight": sd["to_k_ip.0.weight"], f"{key_id}.to_v_ip.weight": sd["to_v_ip.0.weight"], } ) key_id += 2 # "image_proj" (ImageProjection layer weights) cross_attention_dim = model.config["cross_attention_dim"] image_projection = ImageProjection( cross_attention_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, num_image_text_embeds=4 ) ip_image_projection_state_dict = {} sd = image_projection.state_dict() ip_image_projection_state_dict.update( { "proj.weight": sd["image_embeds.weight"], "proj.bias": sd["image_embeds.bias"], "norm.weight": sd["norm.weight"], "norm.bias": sd["norm.bias"], } ) del sd ip_state_dict = {} ip_state_dict.update({"image_proj": ip_image_projection_state_dict, "ip_adapter": ip_cross_attn_state_dict}) return ip_state_dict def create_ip_adapter_plus_state_dict(model): # "ip_adapter" (cross-attention weights) ip_cross_attn_state_dict = {} key_id = 1 for name in model.attn_processors.keys(): cross_attention_dim = None if name.endswith("attn1.processor") else model.config.cross_attention_dim if name.startswith("mid_block"): hidden_size = model.config.block_out_channels[-1] elif name.startswith("up_blocks"): block_id = int(name[len("up_blocks.")]) hidden_size = list(reversed(model.config.block_out_channels))[block_id] elif name.startswith("down_blocks"): block_id = int(name[len("down_blocks.")]) hidden_size = model.config.block_out_channels[block_id] if cross_attention_dim is not None: sd = IPAdapterAttnProcessor( hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, scale=1.0 ).state_dict() ip_cross_attn_state_dict.update( { f"{key_id}.to_k_ip.weight": sd["to_k_ip.0.weight"], f"{key_id}.to_v_ip.weight": sd["to_v_ip.0.weight"], } ) key_id += 2 # "image_proj" (ImageProjection layer weights) cross_attention_dim = model.config["cross_attention_dim"] image_projection = IPAdapterPlusImageProjection( embed_dims=cross_attention_dim, output_dims=cross_attention_dim, dim_head=32, heads=2, num_queries=4 ) ip_image_projection_state_dict = OrderedDict() for k, v in image_projection.state_dict().items(): if "2.to" in k: k = k.replace("2.to", "0.to") elif "layers.0.ln0" in k: k = k.replace("layers.0.ln0", "layers.0.0.norm1") elif "layers.0.ln1" in k: k = k.replace("layers.0.ln1", "layers.0.0.norm2") elif "layers.1.ln0" in k: k = k.replace("layers.1.ln0", "layers.1.0.norm1") elif "layers.1.ln1" in k: k = k.replace("layers.1.ln1", "layers.1.0.norm2") elif "layers.2.ln0" in k: k = k.replace("layers.2.ln0", "layers.2.0.norm1") elif "layers.2.ln1" in k: k = k.replace("layers.2.ln1", "layers.2.0.norm2") elif "layers.3.ln0" in k: k = k.replace("layers.3.ln0", "layers.3.0.norm1") elif "layers.3.ln1" in k: k = k.replace("layers.3.ln1", "layers.3.0.norm2") elif "to_q" in k: parts = k.split(".") parts[2] = "attn" k = ".".join(parts) elif "to_out.0" in k: parts = k.split(".") parts[2] = "attn" k = ".".join(parts) k = k.replace("to_out.0", "to_out") else: k = k.replace("0.ff.0", "0.1.0") k = k.replace("0.ff.1.net.0.proj", "0.1.1") k = k.replace("0.ff.1.net.2", "0.1.3") k = k.replace("1.ff.0", "1.1.0") k = k.replace("1.ff.1.net.0.proj", "1.1.1") k = k.replace("1.ff.1.net.2", "1.1.3") k = k.replace("2.ff.0", "2.1.0") k = k.replace("2.ff.1.net.0.proj", "2.1.1") k = k.replace("2.ff.1.net.2", "2.1.3") k = k.replace("3.ff.0", "3.1.0") k = k.replace("3.ff.1.net.0.proj", "3.1.1") k = k.replace("3.ff.1.net.2", "3.1.3") # if "norm_cross" in k: # ip_image_projection_state_dict[k.replace("norm_cross", "norm1")] = v # elif "layer_norm" in k: # ip_image_projection_state_dict[k.replace("layer_norm", "norm2")] = v if "to_k" in k: parts = k.split(".") parts[2] = "attn" k = ".".join(parts) ip_image_projection_state_dict[k.replace("to_k", "to_kv")] = torch.cat([v, v], dim=0) elif "to_v" in k: continue else: ip_image_projection_state_dict[k] = v ip_state_dict = {} ip_state_dict.update({"image_proj": ip_image_projection_state_dict, "ip_adapter": ip_cross_attn_state_dict}) return ip_state_dict def create_ip_adapter_faceid_state_dict(model): # "ip_adapter" (cross-attention weights) # no LoRA weights ip_cross_attn_state_dict = {} key_id = 1 for name in model.attn_processors.keys(): cross_attention_dim = ( None if name.endswith("attn1.processor") or "motion_module" in name else model.config.cross_attention_dim ) if name.startswith("mid_block"): hidden_size = model.config.block_out_channels[-1] elif name.startswith("up_blocks"): block_id = int(name[len("up_blocks.")]) hidden_size = list(reversed(model.config.block_out_channels))[block_id] elif name.startswith("down_blocks"): block_id = int(name[len("down_blocks.")]) hidden_size = model.config.block_out_channels[block_id] if cross_attention_dim is not None: sd = IPAdapterAttnProcessor( hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, scale=1.0 ).state_dict() ip_cross_attn_state_dict.update( { f"{key_id}.to_k_ip.weight": sd["to_k_ip.0.weight"], f"{key_id}.to_v_ip.weight": sd["to_v_ip.0.weight"], } ) key_id += 2 # "image_proj" (ImageProjection layer weights) cross_attention_dim = model.config["cross_attention_dim"] image_projection = IPAdapterFaceIDImageProjection( cross_attention_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, mult=2, num_tokens=4 ) ip_image_projection_state_dict = {} sd = image_projection.state_dict() ip_image_projection_state_dict.update( { "proj.0.weight": sd["ff.net.0.proj.weight"], "proj.0.bias": sd["ff.net.0.proj.bias"], "proj.2.weight": sd["ff.net.2.weight"], "proj.2.bias": sd["ff.net.2.bias"], "norm.weight": sd["norm.weight"], "norm.bias": sd["norm.bias"], } ) del sd ip_state_dict = {} ip_state_dict.update({"image_proj": ip_image_projection_state_dict, "ip_adapter": ip_cross_attn_state_dict}) return ip_state_dict def create_custom_diffusion_layers(model, mock_weights: bool = True): train_kv = True train_q_out = True custom_diffusion_attn_procs = {} st = model.state_dict() for name, _ in model.attn_processors.items(): cross_attention_dim = None if name.endswith("attn1.processor") else model.config.cross_attention_dim if name.startswith("mid_block"): hidden_size = model.config.block_out_channels[-1] elif name.startswith("up_blocks"): block_id = int(name[len("up_blocks.")]) hidden_size = list(reversed(model.config.block_out_channels))[block_id] elif name.startswith("down_blocks"): block_id = int(name[len("down_blocks.")]) hidden_size = model.config.block_out_channels[block_id] layer_name = name.split(".processor")[0] weights = { "to_k_custom_diffusion.weight": st[layer_name + ".to_k.weight"], "to_v_custom_diffusion.weight": st[layer_name + ".to_v.weight"], } if train_q_out: weights["to_q_custom_diffusion.weight"] = st[layer_name + ".to_q.weight"] weights["to_out_custom_diffusion.0.weight"] = st[layer_name + ".to_out.0.weight"] weights["to_out_custom_diffusion.0.bias"] = st[layer_name + ".to_out.0.bias"] if cross_attention_dim is not None: custom_diffusion_attn_procs[name] = CustomDiffusionAttnProcessor( train_kv=train_kv, train_q_out=train_q_out, hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, ).to(model.device) custom_diffusion_attn_procs[name].load_state_dict(weights) if mock_weights: # add 1 to weights to mock trained weights with torch.no_grad(): custom_diffusion_attn_procs[name].to_k_custom_diffusion.weight += 1 custom_diffusion_attn_procs[name].to_v_custom_diffusion.weight += 1 else: custom_diffusion_attn_procs[name] = CustomDiffusionAttnProcessor( train_kv=False, train_q_out=False, hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, ) del st return custom_diffusion_attn_procs class UNet2DConditionModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = UNet2DConditionModel main_input_name = "sample" # We override the items here because the unet under consideration is small. model_split_percents = [0.5, 0.34, 0.4] @property def dummy_input(self): batch_size = 4 num_channels = 4 sizes = (16, 16) noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) time_step = torch.tensor([10]).to(torch_device) encoder_hidden_states = floats_tensor((batch_size, 4, 8)).to(torch_device) return {"sample": noise, "timestep": time_step, "encoder_hidden_states": encoder_hidden_states} @property def input_shape(self): return (4, 16, 16) @property def output_shape(self): return (4, 16, 16) def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": (4, 8), "norm_num_groups": 4, "down_block_types": ("CrossAttnDownBlock2D", "DownBlock2D"), "up_block_types": ("UpBlock2D", "CrossAttnUpBlock2D"), "cross_attention_dim": 8, "attention_head_dim": 2, "out_channels": 4, "in_channels": 4, "layers_per_block": 1, "sample_size": 16, } inputs_dict = self.dummy_input return init_dict, inputs_dict @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_enable_works(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.enable_xformers_memory_efficient_attention() assert ( model.mid_block.attentions[0].transformer_blocks[0].attn1.processor.__class__.__name__ == "XFormersAttnProcessor" ), "xformers is not enabled" def test_model_with_attention_head_dim_tuple(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["block_out_channels"] = (16, 32) init_dict["attention_head_dim"] = (8, 16) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.sample self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_model_with_use_linear_projection(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["use_linear_projection"] = True model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.sample self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_model_with_cross_attention_dim_tuple(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["cross_attention_dim"] = (8, 8) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.sample self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_model_with_simple_projection(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() batch_size, _, _, sample_size = inputs_dict["sample"].shape init_dict["class_embed_type"] = "simple_projection" init_dict["projection_class_embeddings_input_dim"] = sample_size inputs_dict["class_labels"] = floats_tensor((batch_size, sample_size)).to(torch_device) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.sample self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_model_with_class_embeddings_concat(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() batch_size, _, _, sample_size = inputs_dict["sample"].shape init_dict["class_embed_type"] = "simple_projection" init_dict["projection_class_embeddings_input_dim"] = sample_size init_dict["class_embeddings_concat"] = True inputs_dict["class_labels"] = floats_tensor((batch_size, sample_size)).to(torch_device) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.sample self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_model_attention_slicing(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["block_out_channels"] = (16, 32) init_dict["attention_head_dim"] = (8, 16) model = self.model_class(**init_dict) model.to(torch_device) model.eval() model.set_attention_slice("auto") with torch.no_grad(): output = model(**inputs_dict) assert output is not None model.set_attention_slice("max") with torch.no_grad(): output = model(**inputs_dict) assert output is not None model.set_attention_slice(2) with torch.no_grad(): output = model(**inputs_dict) assert output is not None def test_model_sliceable_head_dim(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["block_out_channels"] = (16, 32) init_dict["attention_head_dim"] = (8, 16) model = self.model_class(**init_dict) def check_sliceable_dim_attr(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): assert isinstance(module.sliceable_head_dim, int) for child in module.children(): check_sliceable_dim_attr(child) # retrieve number of attention layers for module in model.children(): check_sliceable_dim_attr(module) def test_gradient_checkpointing_is_applied(self): expected_set = { "CrossAttnUpBlock2D", "CrossAttnDownBlock2D", "UNetMidBlock2DCrossAttn", "UpBlock2D", "Transformer2DModel", "DownBlock2D", } attention_head_dim = (8, 16) block_out_channels = (16, 32) super().test_gradient_checkpointing_is_applied( expected_set=expected_set, attention_head_dim=attention_head_dim, block_out_channels=block_out_channels ) def test_special_attn_proc(self): class AttnEasyProc(torch.nn.Module): def __init__(self, num): super().__init__() self.weight = torch.nn.Parameter(torch.tensor(num)) self.is_run = False self.number = 0 self.counter = 0 def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, number=None): batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) hidden_states += self.weight self.is_run = True self.counter += 1 self.number = number return hidden_states # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["block_out_channels"] = (16, 32) init_dict["attention_head_dim"] = (8, 16) model = self.model_class(**init_dict) model.to(torch_device) processor = AttnEasyProc(5.0) model.set_attn_processor(processor) model(**inputs_dict, cross_attention_kwargs={"number": 123}).sample assert processor.counter == 8 assert processor.is_run assert processor.number == 123 @parameterized.expand( [ # fmt: off [torch.bool], [torch.long], [torch.float], # fmt: on ] ) def test_model_xattn_mask(self, mask_dtype): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**{**init_dict, "attention_head_dim": (8, 16), "block_out_channels": (16, 32)}) model.to(torch_device) model.eval() cond = inputs_dict["encoder_hidden_states"] with torch.no_grad(): full_cond_out = model(**inputs_dict).sample assert full_cond_out is not None keepall_mask = torch.ones(*cond.shape[:-1], device=cond.device, dtype=mask_dtype) full_cond_keepallmask_out = model(**{**inputs_dict, "encoder_attention_mask": keepall_mask}).sample assert full_cond_keepallmask_out.allclose(full_cond_out, rtol=1e-05, atol=1e-05), ( "a 'keep all' mask should give the same result as no mask" ) trunc_cond = cond[:, :-1, :] trunc_cond_out = model(**{**inputs_dict, "encoder_hidden_states": trunc_cond}).sample assert not trunc_cond_out.allclose(full_cond_out, rtol=1e-05, atol=1e-05), ( "discarding the last token from our cond should change the result" ) batch, tokens, _ = cond.shape mask_last = (torch.arange(tokens) < tokens - 1).expand(batch, -1).to(cond.device, mask_dtype) masked_cond_out = model(**{**inputs_dict, "encoder_attention_mask": mask_last}).sample assert masked_cond_out.allclose(trunc_cond_out, rtol=1e-05, atol=1e-05), ( "masking the last token from our cond should be equivalent to truncating that token out of the condition" ) # see diffusers.models.attention_processor::Attention#prepare_attention_mask # note: we may not need to fix mask padding to work for stable-diffusion cross-attn masks. # since the use-case (somebody passes in a too-short cross-attn mask) is pretty esoteric. # maybe it's fine that this only works for the unclip use-case. @mark.skip( reason="we currently pad mask by target_length tokens (what unclip needs), whereas stable-diffusion's cross-attn needs to instead pad by remaining_length." ) def test_model_xattn_padding(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**{**init_dict, "attention_head_dim": (8, 16)}) model.to(torch_device) model.eval() cond = inputs_dict["encoder_hidden_states"] with torch.no_grad(): full_cond_out = model(**inputs_dict).sample assert full_cond_out is not None batch, tokens, _ = cond.shape keeplast_mask = (torch.arange(tokens) == tokens - 1).expand(batch, -1).to(cond.device, torch.bool) keeplast_out = model(**{**inputs_dict, "encoder_attention_mask": keeplast_mask}).sample assert not keeplast_out.allclose(full_cond_out), "a 'keep last token' mask should change the result" trunc_mask = torch.zeros(batch, tokens - 1, device=cond.device, dtype=torch.bool) trunc_mask_out = model(**{**inputs_dict, "encoder_attention_mask": trunc_mask}).sample assert trunc_mask_out.allclose(keeplast_out), ( "a mask with fewer tokens than condition, will be padded with 'keep' tokens. a 'discard-all' mask missing the final token is thus equivalent to a 'keep last' mask." ) def test_custom_diffusion_processors(self): # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["block_out_channels"] = (16, 32) init_dict["attention_head_dim"] = (8, 16) model = self.model_class(**init_dict) model.to(torch_device) with torch.no_grad(): sample1 = model(**inputs_dict).sample custom_diffusion_attn_procs = create_custom_diffusion_layers(model, mock_weights=False) # make sure we can set a list of attention processors model.set_attn_processor(custom_diffusion_attn_procs) model.to(torch_device) # test that attn processors can be set to itself model.set_attn_processor(model.attn_processors) with torch.no_grad(): sample2 = model(**inputs_dict).sample assert (sample1 - sample2).abs().max() < 3e-3 def test_custom_diffusion_save_load(self): # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["block_out_channels"] = (16, 32) init_dict["attention_head_dim"] = (8, 16) torch.manual_seed(0) model = self.model_class(**init_dict) model.to(torch_device) with torch.no_grad(): old_sample = model(**inputs_dict).sample custom_diffusion_attn_procs = create_custom_diffusion_layers(model, mock_weights=False) model.set_attn_processor(custom_diffusion_attn_procs) with torch.no_grad(): sample = model(**inputs_dict).sample with tempfile.TemporaryDirectory() as tmpdirname: model.save_attn_procs(tmpdirname, safe_serialization=False) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_custom_diffusion_weights.bin"))) torch.manual_seed(0) new_model = self.model_class(**init_dict) new_model.load_attn_procs(tmpdirname, weight_name="pytorch_custom_diffusion_weights.bin") new_model.to(torch_device) with torch.no_grad(): new_sample = new_model(**inputs_dict).sample assert (sample - new_sample).abs().max() < 1e-4 # custom diffusion and no custom diffusion should be the same assert (sample - old_sample).abs().max() < 3e-3 @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_custom_diffusion_xformers_on_off(self): # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["block_out_channels"] = (16, 32) init_dict["attention_head_dim"] = (8, 16) torch.manual_seed(0) model = self.model_class(**init_dict) model.to(torch_device) custom_diffusion_attn_procs = create_custom_diffusion_layers(model, mock_weights=False) model.set_attn_processor(custom_diffusion_attn_procs) # default with torch.no_grad(): sample = model(**inputs_dict).sample model.enable_xformers_memory_efficient_attention() on_sample = model(**inputs_dict).sample model.disable_xformers_memory_efficient_attention() off_sample = model(**inputs_dict).sample assert (sample - on_sample).abs().max() < 1e-4 assert (sample - off_sample).abs().max() < 1e-4 def test_pickle(self): # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["block_out_channels"] = (16, 32) init_dict["attention_head_dim"] = (8, 16) model = self.model_class(**init_dict) model.to(torch_device) with torch.no_grad(): sample = model(**inputs_dict).sample sample_copy = copy.copy(sample) assert (sample - sample_copy).abs().max() < 1e-4 def test_asymmetrical_unet(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() # Add asymmetry to configs init_dict["transformer_layers_per_block"] = [[3, 2], 1] init_dict["reverse_transformer_layers_per_block"] = [[3, 4], 1] torch.manual_seed(0) model = self.model_class(**init_dict) model.to(torch_device) output = model(**inputs_dict).sample expected_shape = inputs_dict["sample"].shape # Check if input and output shapes are the same self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_ip_adapter(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["block_out_channels"] = (16, 32) init_dict["attention_head_dim"] = (8, 16) model = self.model_class(**init_dict) model.to(torch_device) # forward pass without ip-adapter with torch.no_grad(): sample1 = model(**inputs_dict).sample # update inputs_dict for ip-adapter batch_size = inputs_dict["encoder_hidden_states"].shape[0] # for ip-adapter image_embeds has shape [batch_size, num_image, embed_dim] image_embeds = floats_tensor((batch_size, 1, model.config.cross_attention_dim)).to(torch_device) inputs_dict["added_cond_kwargs"] = {"image_embeds": [image_embeds]} # make ip_adapter_1 and ip_adapter_2 ip_adapter_1 = create_ip_adapter_state_dict(model) image_proj_state_dict_2 = {k: w + 1.0 for k, w in ip_adapter_1["image_proj"].items()} cross_attn_state_dict_2 = {k: w + 1.0 for k, w in ip_adapter_1["ip_adapter"].items()} ip_adapter_2 = {} ip_adapter_2.update({"image_proj": image_proj_state_dict_2, "ip_adapter": cross_attn_state_dict_2}) # forward pass ip_adapter_1 model._load_ip_adapter_weights([ip_adapter_1]) assert model.config.encoder_hid_dim_type == "ip_image_proj" assert model.encoder_hid_proj is not None assert model.down_blocks[0].attentions[0].transformer_blocks[0].attn2.processor.__class__.__name__ in ( "IPAdapterAttnProcessor", "IPAdapterAttnProcessor2_0", ) with torch.no_grad(): sample2 = model(**inputs_dict).sample # forward pass with ip_adapter_2 model._load_ip_adapter_weights([ip_adapter_2]) with torch.no_grad(): sample3 = model(**inputs_dict).sample # forward pass with ip_adapter_1 again model._load_ip_adapter_weights([ip_adapter_1]) with torch.no_grad(): sample4 = model(**inputs_dict).sample # forward pass with multiple ip-adapters and multiple images model._load_ip_adapter_weights([ip_adapter_1, ip_adapter_2]) # set the scale for ip_adapter_2 to 0 so that result should be same as only load ip_adapter_1 for attn_processor in model.attn_processors.values(): if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)): attn_processor.scale = [1, 0] image_embeds_multi = image_embeds.repeat(1, 2, 1) inputs_dict["added_cond_kwargs"] = {"image_embeds": [image_embeds_multi, image_embeds_multi]} with torch.no_grad(): sample5 = model(**inputs_dict).sample # forward pass with single ip-adapter & single image when image_embeds is not a list and a 2-d tensor image_embeds = image_embeds.squeeze(1) inputs_dict["added_cond_kwargs"] = {"image_embeds": image_embeds} model._load_ip_adapter_weights(ip_adapter_1) with torch.no_grad(): sample6 = model(**inputs_dict).sample assert not sample1.allclose(sample2, atol=1e-4, rtol=1e-4) assert not sample2.allclose(sample3, atol=1e-4, rtol=1e-4) assert sample2.allclose(sample4, atol=1e-4, rtol=1e-4) assert sample2.allclose(sample5, atol=1e-4, rtol=1e-4) assert sample2.allclose(sample6, atol=1e-4, rtol=1e-4) def test_ip_adapter_plus(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["block_out_channels"] = (16, 32) init_dict["attention_head_dim"] = (8, 16) model = self.model_class(**init_dict) model.to(torch_device) # forward pass without ip-adapter with torch.no_grad(): sample1 = model(**inputs_dict).sample # update inputs_dict for ip-adapter batch_size = inputs_dict["encoder_hidden_states"].shape[0] # for ip-adapter-plus image_embeds has shape [batch_size, num_image, sequence_length, embed_dim] image_embeds = floats_tensor((batch_size, 1, 1, model.config.cross_attention_dim)).to(torch_device) inputs_dict["added_cond_kwargs"] = {"image_embeds": [image_embeds]} # make ip_adapter_1 and ip_adapter_2 ip_adapter_1 = create_ip_adapter_plus_state_dict(model) image_proj_state_dict_2 = {k: w + 1.0 for k, w in ip_adapter_1["image_proj"].items()} cross_attn_state_dict_2 = {k: w + 1.0 for k, w in ip_adapter_1["ip_adapter"].items()} ip_adapter_2 = {} ip_adapter_2.update({"image_proj": image_proj_state_dict_2, "ip_adapter": cross_attn_state_dict_2}) # forward pass ip_adapter_1 model._load_ip_adapter_weights([ip_adapter_1]) assert model.config.encoder_hid_dim_type == "ip_image_proj" assert model.encoder_hid_proj is not None assert model.down_blocks[0].attentions[0].transformer_blocks[0].attn2.processor.__class__.__name__ in ( "IPAdapterAttnProcessor", "IPAdapterAttnProcessor2_0", ) with torch.no_grad(): sample2 = model(**inputs_dict).sample # forward pass with ip_adapter_2 model._load_ip_adapter_weights([ip_adapter_2]) with torch.no_grad(): sample3 = model(**inputs_dict).sample # forward pass with ip_adapter_1 again model._load_ip_adapter_weights([ip_adapter_1]) with torch.no_grad(): sample4 = model(**inputs_dict).sample # forward pass with multiple ip-adapters and multiple images model._load_ip_adapter_weights([ip_adapter_1, ip_adapter_2]) # set the scale for ip_adapter_2 to 0 so that result should be same as only load ip_adapter_1 for attn_processor in model.attn_processors.values(): if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)): attn_processor.scale = [1, 0] image_embeds_multi = image_embeds.repeat(1, 2, 1, 1) inputs_dict["added_cond_kwargs"] = {"image_embeds": [image_embeds_multi, image_embeds_multi]} with torch.no_grad(): sample5 = model(**inputs_dict).sample # forward pass with single ip-adapter & single image when image_embeds is a 3-d tensor image_embeds = image_embeds[:,].squeeze(1) inputs_dict["added_cond_kwargs"] = {"image_embeds": image_embeds} model._load_ip_adapter_weights(ip_adapter_1) with torch.no_grad(): sample6 = model(**inputs_dict).sample assert not sample1.allclose(sample2, atol=1e-4, rtol=1e-4) assert not sample2.allclose(sample3, atol=1e-4, rtol=1e-4) assert sample2.allclose(sample4, atol=1e-4, rtol=1e-4) assert sample2.allclose(sample5, atol=1e-4, rtol=1e-4) assert sample2.allclose(sample6, atol=1e-4, rtol=1e-4) @parameterized.expand( [ ("hf-internal-testing/unet2d-sharded-dummy", None), ("hf-internal-testing/tiny-sd-unet-sharded-latest-format", "fp16"), ] ) @require_torch_accelerator def test_load_sharded_checkpoint_from_hub(self, repo_id, variant): _, inputs_dict = self.prepare_init_args_and_inputs_for_common() loaded_model = self.model_class.from_pretrained(repo_id, variant=variant) loaded_model = loaded_model.to(torch_device) new_output = loaded_model(**inputs_dict) assert loaded_model assert new_output.sample.shape == (4, 4, 16, 16) @parameterized.expand( [ ("hf-internal-testing/unet2d-sharded-dummy-subfolder", None), ("hf-internal-testing/tiny-sd-unet-sharded-latest-format-subfolder", "fp16"), ] ) @require_torch_accelerator def test_load_sharded_checkpoint_from_hub_subfolder(self, repo_id, variant): _, inputs_dict = self.prepare_init_args_and_inputs_for_common() loaded_model = self.model_class.from_pretrained(repo_id, subfolder="unet", variant=variant) loaded_model = loaded_model.to(torch_device) new_output = loaded_model(**inputs_dict) assert loaded_model assert new_output.sample.shape == (4, 4, 16, 16) @require_torch_accelerator def test_load_sharded_checkpoint_from_hub_local(self): _, inputs_dict = self.prepare_init_args_and_inputs_for_common() ckpt_path = snapshot_download("hf-internal-testing/unet2d-sharded-dummy") loaded_model = self.model_class.from_pretrained(ckpt_path, local_files_only=True) loaded_model = loaded_model.to(torch_device) new_output = loaded_model(**inputs_dict) assert loaded_model assert new_output.sample.shape == (4, 4, 16, 16) @require_torch_accelerator def test_load_sharded_checkpoint_from_hub_local_subfolder(self): _, inputs_dict = self.prepare_init_args_and_inputs_for_common() ckpt_path = snapshot_download("hf-internal-testing/unet2d-sharded-dummy-subfolder") loaded_model = self.model_class.from_pretrained(ckpt_path, subfolder="unet", local_files_only=True) loaded_model = loaded_model.to(torch_device) new_output = loaded_model(**inputs_dict) assert loaded_model assert new_output.sample.shape == (4, 4, 16, 16) @require_torch_accelerator @parameterized.expand( [ ("hf-internal-testing/unet2d-sharded-dummy", None), ("hf-internal-testing/tiny-sd-unet-sharded-latest-format", "fp16"), ] ) def test_load_sharded_checkpoint_device_map_from_hub(self, repo_id, variant): _, inputs_dict = self.prepare_init_args_and_inputs_for_common() loaded_model = self.model_class.from_pretrained(repo_id, variant=variant, device_map="auto") new_output = loaded_model(**inputs_dict) assert loaded_model assert new_output.sample.shape == (4, 4, 16, 16) @require_torch_accelerator @parameterized.expand( [ ("hf-internal-testing/unet2d-sharded-dummy-subfolder", None), ("hf-internal-testing/tiny-sd-unet-sharded-latest-format-subfolder", "fp16"), ] ) def test_load_sharded_checkpoint_device_map_from_hub_subfolder(self, repo_id, variant): _, inputs_dict = self.prepare_init_args_and_inputs_for_common() loaded_model = self.model_class.from_pretrained(repo_id, variant=variant, subfolder="unet", device_map="auto") new_output = loaded_model(**inputs_dict) assert loaded_model assert new_output.sample.shape == (4, 4, 16, 16) @require_torch_accelerator def test_load_sharded_checkpoint_device_map_from_hub_local(self): _, inputs_dict = self.prepare_init_args_and_inputs_for_common() ckpt_path = snapshot_download("hf-internal-testing/unet2d-sharded-dummy") loaded_model = self.model_class.from_pretrained(ckpt_path, local_files_only=True, device_map="auto") new_output = loaded_model(**inputs_dict) assert loaded_model assert new_output.sample.shape == (4, 4, 16, 16) @require_torch_accelerator def test_load_sharded_checkpoint_device_map_from_hub_local_subfolder(self): _, inputs_dict = self.prepare_init_args_and_inputs_for_common() ckpt_path = snapshot_download("hf-internal-testing/unet2d-sharded-dummy-subfolder") loaded_model = self.model_class.from_pretrained( ckpt_path, local_files_only=True, subfolder="unet", device_map="auto" ) new_output = loaded_model(**inputs_dict) assert loaded_model assert new_output.sample.shape == (4, 4, 16, 16) @require_peft_backend def test_load_attn_procs_raise_warning(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) # forward pass without LoRA with torch.no_grad(): non_lora_sample = model(**inputs_dict).sample unet_lora_config = get_unet_lora_config() model.add_adapter(unet_lora_config) assert check_if_lora_correctly_set(model), "Lora not correctly set in UNet." # forward pass with LoRA with torch.no_grad(): lora_sample_1 = model(**inputs_dict).sample with tempfile.TemporaryDirectory() as tmpdirname: model.save_attn_procs(tmpdirname) model.unload_lora() with self.assertWarns(FutureWarning) as warning: model.load_attn_procs(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) warning_message = str(warning.warnings[0].message) assert "Using the `load_attn_procs()` method has been deprecated" in warning_message # import to still check for the rest of the stuff. assert check_if_lora_correctly_set(model), "Lora not correctly set in UNet." with torch.no_grad(): lora_sample_2 = model(**inputs_dict).sample assert not torch.allclose(non_lora_sample, lora_sample_1, atol=1e-4, rtol=1e-4), ( "LoRA injected UNet should produce different results." ) assert torch.allclose(lora_sample_1, lora_sample_2, atol=1e-4, rtol=1e-4), ( "Loading from a saved checkpoint should produce identical results." ) @require_peft_backend def test_save_attn_procs_raise_warning(self): init_dict, _ = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) unet_lora_config = get_unet_lora_config() model.add_adapter(unet_lora_config) assert check_if_lora_correctly_set(model), "Lora not correctly set in UNet." with tempfile.TemporaryDirectory() as tmpdirname: with self.assertWarns(FutureWarning) as warning: model.save_attn_procs(tmpdirname) warning_message = str(warning.warnings[0].message) assert "Using the `save_attn_procs()` method has been deprecated" in warning_message class UNet2DConditionModelCompileTests(TorchCompileTesterMixin, unittest.TestCase): model_class = UNet2DConditionModel def prepare_init_args_and_inputs_for_common(self): return UNet2DConditionModelTests().prepare_init_args_and_inputs_for_common() class UNet2DConditionModelLoRAHotSwapTests(LoraHotSwappingForModelTesterMixin, unittest.TestCase): model_class = UNet2DConditionModel def prepare_init_args_and_inputs_for_common(self): return UNet2DConditionModelTests().prepare_init_args_and_inputs_for_common() @slow class UNet2DConditionModelIntegrationTests(unittest.TestCase): def get_file_format(self, seed, shape): return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy" def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_latents(self, seed=0, shape=(4, 4, 64, 64), fp16=False): dtype = torch.float16 if fp16 else torch.float32 image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype) return image def get_unet_model(self, fp16=False, model_id="CompVis/stable-diffusion-v1-4"): variant = "fp16" if fp16 else None torch_dtype = torch.float16 if fp16 else torch.float32 model = UNet2DConditionModel.from_pretrained( model_id, subfolder="unet", torch_dtype=torch_dtype, variant=variant ) model.to(torch_device).eval() return model @require_torch_accelerator def test_set_attention_slice_auto(self): backend_empty_cache(torch_device) backend_reset_max_memory_allocated(torch_device) backend_reset_peak_memory_stats(torch_device) unet = self.get_unet_model() unet.set_attention_slice("auto") latents = self.get_latents(33) encoder_hidden_states = self.get_encoder_hidden_states(33) timestep = 1 with torch.no_grad(): _ = unet(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample mem_bytes = backend_max_memory_allocated(torch_device) assert mem_bytes < 5 * 10**9 @require_torch_accelerator def test_set_attention_slice_max(self): backend_empty_cache(torch_device) backend_reset_max_memory_allocated(torch_device) backend_reset_peak_memory_stats(torch_device) unet = self.get_unet_model() unet.set_attention_slice("max") latents = self.get_latents(33) encoder_hidden_states = self.get_encoder_hidden_states(33) timestep = 1 with torch.no_grad(): _ = unet(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample mem_bytes = backend_max_memory_allocated(torch_device) assert mem_bytes < 5 * 10**9 @require_torch_accelerator def test_set_attention_slice_int(self): backend_empty_cache(torch_device) backend_reset_max_memory_allocated(torch_device) backend_reset_peak_memory_stats(torch_device) unet = self.get_unet_model() unet.set_attention_slice(2) latents = self.get_latents(33) encoder_hidden_states = self.get_encoder_hidden_states(33) timestep = 1 with torch.no_grad(): _ = unet(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample mem_bytes = backend_max_memory_allocated(torch_device) assert mem_bytes < 5 * 10**9 @require_torch_accelerator def test_set_attention_slice_list(self): backend_empty_cache(torch_device) backend_reset_max_memory_allocated(torch_device) backend_reset_peak_memory_stats(torch_device) # there are 32 sliceable layers slice_list = 16 * [2, 3] unet = self.get_unet_model() unet.set_attention_slice(slice_list) latents = self.get_latents(33) encoder_hidden_states = self.get_encoder_hidden_states(33) timestep = 1 with torch.no_grad(): _ = unet(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample mem_bytes = backend_max_memory_allocated(torch_device) assert mem_bytes < 5 * 10**9 def get_encoder_hidden_states(self, seed=0, shape=(4, 77, 768), fp16=False): dtype = torch.float16 if fp16 else torch.float32 hidden_states = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype) return hidden_states @parameterized.expand( [ # fmt: off [33, 4, [-0.4424, 0.1510, -0.1937, 0.2118, 0.3746, -0.3957, 0.0160, -0.0435]], [47, 0.55, [-0.1508, 0.0379, -0.3075, 0.2540, 0.3633, -0.0821, 0.1719, -0.0207]], [21, 0.89, [-0.6479, 0.6364, -0.3464, 0.8697, 0.4443, -0.6289, -0.0091, 0.1778]], [9, 1000, [0.8888, -0.5659, 0.5834, -0.7469, 1.1912, -0.3923, 1.1241, -0.4424]], # fmt: on ] ) @require_torch_accelerator_with_fp16 def test_compvis_sd_v1_4(self, seed, timestep, expected_slice): model = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4") latents = self.get_latents(seed) encoder_hidden_states = self.get_encoder_hidden_states(seed) timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) with torch.no_grad(): sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample assert sample.shape == latents.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=1e-3) @parameterized.expand( [ # fmt: off [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) @require_torch_accelerator_with_fp16 def test_compvis_sd_v1_4_fp16(self, seed, timestep, expected_slice): model = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4", fp16=True) latents = self.get_latents(seed, fp16=True) encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True) timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) with torch.no_grad(): sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample assert sample.shape == latents.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=5e-3) @parameterized.expand( [ # fmt: off [33, 4, [-0.4430, 0.1570, -0.1867, 0.2376, 0.3205, -0.3681, 0.0525, -0.0722]], [47, 0.55, [-0.1415, 0.0129, -0.3136, 0.2257, 0.3430, -0.0536, 0.2114, -0.0436]], [21, 0.89, [-0.7091, 0.6664, -0.3643, 0.9032, 0.4499, -0.6541, 0.0139, 0.1750]], [9, 1000, [0.8878, -0.5659, 0.5844, -0.7442, 1.1883, -0.3927, 1.1192, -0.4423]], # fmt: on ] ) @require_torch_accelerator @skip_mps def test_compvis_sd_v1_5(self, seed, timestep, expected_slice): model = self.get_unet_model(model_id="stable-diffusion-v1-5/stable-diffusion-v1-5") latents = self.get_latents(seed) encoder_hidden_states = self.get_encoder_hidden_states(seed) timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) with torch.no_grad(): sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample assert sample.shape == latents.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=1e-3) @parameterized.expand( [ # fmt: off [83, 4, [-0.2695, -0.1669, 0.0073, -0.3181, -0.1187, -0.1676, -0.1395, -0.5972]], [17, 0.55, [-0.1290, -0.2588, 0.0551, -0.0916, 0.3286, 0.0238, -0.3669, 0.0322]], [8, 0.89, [-0.5283, 0.1198, 0.0870, -0.1141, 0.9189, -0.0150, 0.5474, 0.4319]], [3, 1000, [-0.5601, 0.2411, -0.5435, 0.1268, 1.1338, -0.2427, -0.0280, -1.0020]], # fmt: on ] ) @require_torch_accelerator_with_fp16 def test_compvis_sd_v1_5_fp16(self, seed, timestep, expected_slice): model = self.get_unet_model(model_id="stable-diffusion-v1-5/stable-diffusion-v1-5", fp16=True) latents = self.get_latents(seed, fp16=True) encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True) timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) with torch.no_grad(): sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample assert sample.shape == latents.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=5e-3) @parameterized.expand( [ # fmt: off [33, 4, [-0.7639, 0.0106, -0.1615, -0.3487, -0.0423, -0.7972, 0.0085, -0.4858]], [47, 0.55, [-0.6564, 0.0795, -1.9026, -0.6258, 1.8235, 1.2056, 1.2169, 0.9073]], [21, 0.89, [0.0327, 0.4399, -0.6358, 0.3417, 0.4120, -0.5621, -0.0397, -1.0430]], [9, 1000, [0.1600, 0.7303, -1.0556, -0.3515, -0.7440, -1.2037, -1.8149, -1.8931]], # fmt: on ] ) @require_torch_accelerator @skip_mps def test_compvis_sd_inpaint(self, seed, timestep, expected_slice): model = self.get_unet_model(model_id="stable-diffusion-v1-5/stable-diffusion-inpainting") latents = self.get_latents(seed, shape=(4, 9, 64, 64)) encoder_hidden_states = self.get_encoder_hidden_states(seed) timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) with torch.no_grad(): sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample assert sample.shape == (4, 4, 64, 64) output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=3e-3) @parameterized.expand( [ # fmt: off [83, 4, [-0.1047, -1.7227, 0.1067, 0.0164, -0.5698, -0.4172, -0.1388, 1.1387]], [17, 0.55, [0.0975, -0.2856, -0.3508, -0.4600, 0.3376, 0.2930, -0.2747, -0.7026]], [8, 0.89, [-0.0952, 0.0183, -0.5825, -0.1981, 0.1131, 0.4668, -0.0395, -0.3486]], [3, 1000, [0.4790, 0.4949, -1.0732, -0.7158, 0.7959, -0.9478, 0.1105, -0.9741]], # fmt: on ] ) @require_torch_accelerator_with_fp16 def test_compvis_sd_inpaint_fp16(self, seed, timestep, expected_slice): model = self.get_unet_model(model_id="stable-diffusion-v1-5/stable-diffusion-inpainting", fp16=True) latents = self.get_latents(seed, shape=(4, 9, 64, 64), fp16=True) encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True) timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) with torch.no_grad(): sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample assert sample.shape == (4, 4, 64, 64) output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=5e-3) @parameterized.expand( [ # fmt: off [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) @require_torch_accelerator_with_fp16 def test_stabilityai_sd_v2_fp16(self, seed, timestep, expected_slice): model = self.get_unet_model(model_id="stabilityai/stable-diffusion-2", fp16=True) latents = self.get_latents(seed, shape=(4, 4, 96, 96), fp16=True) encoder_hidden_states = self.get_encoder_hidden_states(seed, shape=(4, 77, 1024), fp16=True) timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) with torch.no_grad(): sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample assert sample.shape == latents.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=5e-3)
diffusers/tests/models/unets/test_models_unet_2d_condition.py/0
{ "file_path": "diffusers/tests/models/unets/test_models_unet_2d_condition.py", "repo_id": "diffusers", "token_count": 27765 }
194
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest from importlib import import_module class DependencyTester(unittest.TestCase): def test_diffusers_import(self): try: import diffusers # noqa: F401 except ImportError: assert False def test_backend_registration(self): import diffusers from diffusers.dependency_versions_table import deps all_classes = inspect.getmembers(diffusers, inspect.isclass) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": backend = "k-diffusion" elif backend == "invisible_watermark": backend = "invisible-watermark" elif backend == "opencv": backend = "opencv-python" assert backend in deps, f"{backend} is not in the deps table!" def test_pipeline_imports(self): import diffusers import diffusers.pipelines all_classes = inspect.getmembers(diffusers, inspect.isclass) for cls_name, cls_module in all_classes: if hasattr(diffusers.pipelines, cls_name): pipeline_folder_module = ".".join(str(cls_module.__module__).split(".")[:3]) _ = import_module(pipeline_folder_module, str(cls_name))
diffusers/tests/others/test_dependencies.py/0
{ "file_path": "diffusers/tests/others/test_dependencies.py", "repo_id": "diffusers", "token_count": 835 }
195
import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AnimateDiffVideoToVideoPipeline, AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, LCMScheduler, MotionAdapter, StableDiffusionPipeline, UNet2DConditionModel, UNetMotionModel, ) from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.utils import is_xformers_available, logging from diffusers.utils.testing_utils import require_accelerator, torch_device from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, VIDEO_TO_VIDEO_BATCH_PARAMS from ..test_pipelines_common import IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin def to_np(tensor): if isinstance(tensor, torch.Tensor): tensor = tensor.detach().cpu().numpy() return tensor class AnimateDiffVideoToVideoPipelineFastTests( IPAdapterTesterMixin, PipelineTesterMixin, PipelineFromPipeTesterMixin, unittest.TestCase ): pipeline_class = AnimateDiffVideoToVideoPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = VIDEO_TO_VIDEO_BATCH_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) def get_dummy_components(self): cross_attention_dim = 8 block_out_channels = (8, 8) torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=block_out_channels, layers_per_block=2, sample_size=8, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=cross_attention_dim, norm_num_groups=2, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="linear", clip_sample=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=block_out_channels, in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=cross_attention_dim, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) motion_adapter = MotionAdapter( block_out_channels=block_out_channels, motion_layers_per_block=2, motion_norm_num_groups=2, motion_num_attention_heads=4, ) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "motion_adapter": motion_adapter, "text_encoder": text_encoder, "tokenizer": tokenizer, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0, num_frames: int = 2): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) video_height = 32 video_width = 32 video = [Image.new("RGB", (video_width, video_height))] * num_frames inputs = { "video": video, "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 7.5, "output_type": "pt", } return inputs def test_from_pipe_consistent_config(self): assert self.original_pipeline_class == StableDiffusionPipeline original_repo = "hf-internal-testing/tinier-stable-diffusion-pipe" original_kwargs = {"requires_safety_checker": False} # create original_pipeline_class(sd) pipe_original = self.original_pipeline_class.from_pretrained(original_repo, **original_kwargs) # original_pipeline_class(sd) -> pipeline_class pipe_components = self.get_dummy_components() pipe_additional_components = {} for name, component in pipe_components.items(): if name not in pipe_original.components: pipe_additional_components[name] = component pipe = self.pipeline_class.from_pipe(pipe_original, **pipe_additional_components) # pipeline_class -> original_pipeline_class(sd) original_pipe_additional_components = {} for name, component in pipe_original.components.items(): if name not in pipe.components or not isinstance(component, pipe.components[name].__class__): original_pipe_additional_components[name] = component pipe_original_2 = self.original_pipeline_class.from_pipe(pipe, **original_pipe_additional_components) # compare the config original_config = {k: v for k, v in pipe_original.config.items() if not k.startswith("_")} original_config_2 = {k: v for k, v in pipe_original_2.config.items() if not k.startswith("_")} assert original_config_2 == original_config def test_motion_unet_loading(self): components = self.get_dummy_components() pipe = AnimateDiffVideoToVideoPipeline(**components) assert isinstance(pipe.unet, UNetMotionModel) @unittest.skip("Attention slicing is not enabled in this pipeline") def test_attention_slicing_forward_pass(self): pass def test_ip_adapter(self): expected_pipe_slice = None if torch_device == "cpu": expected_pipe_slice = np.array( [ 0.5569, 0.6250, 0.4145, 0.5613, 0.5563, 0.5213, 0.5092, 0.4950, 0.4950, 0.5685, 0.3858, 0.4864, 0.6458, 0.4312, 0.5518, 0.5608, 0.4418, 0.5378, ] ) return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) def test_inference_batch_single_identical( self, batch_size=2, expected_max_diff=1e-4, additional_params_copy_to_batched_inputs=["num_inference_steps"], ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for components in pipe.components.values(): if hasattr(components, "set_default_attn_processor"): components.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is has been used in self.get_dummy_inputs inputs["generator"] = self.get_generator(0) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs batched_inputs = {} batched_inputs.update(inputs) for name in self.batch_params: if name not in inputs: continue value = inputs[name] if name == "prompt": len_prompt = len(value) batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] batched_inputs[name][-1] = 100 * "very long" else: batched_inputs[name] = batch_size * [value] if "generator" in inputs: batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] if "batch_size" in inputs: batched_inputs["batch_size"] = batch_size for arg in additional_params_copy_to_batched_inputs: batched_inputs[arg] = inputs[arg] output = pipe(**inputs) output_batch = pipe(**batched_inputs) assert output_batch[0].shape[0] == batch_size max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff @require_accelerator def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to("cpu") # pipeline creates a new motion UNet under the hood. So we need to check the device from pipe.components model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == "cpu" for device in model_devices)) output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) pipe.to(torch_device) model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == torch_device for device in model_devices)) output_device = pipe(**self.get_dummy_inputs(torch_device))[0] self.assertTrue(np.isnan(to_np(output_device)).sum() == 0) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) # pipeline creates a new motion UNet under the hood. So we need to check the dtype from pipe.components model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) pipe.to(dtype=torch.float16) model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) def test_prompt_embeds(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) inputs = self.get_dummy_inputs(torch_device) inputs.pop("prompt") inputs["prompt_embeds"] = torch.randn((1, 4, pipe.text_encoder.config.hidden_size), device=torch_device) pipe(**inputs) def test_latent_inputs(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) inputs = self.get_dummy_inputs(torch_device) sample_size = pipe.unet.config.sample_size inputs["latents"] = torch.randn((1, 4, 1, sample_size, sample_size), device=torch_device) inputs.pop("video") pipe(**inputs) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_without_offload = pipe(**inputs).frames[0] output_without_offload = ( output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload ) pipe.enable_xformers_memory_efficient_attention() inputs = self.get_dummy_inputs(torch_device) output_with_offload = pipe(**inputs).frames[0] output_with_offload = ( output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload ) max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, 1e-4, "XFormers attention should not affect the inference results") def test_free_init(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) inputs_normal = self.get_dummy_inputs(torch_device) frames_normal = pipe(**inputs_normal).frames[0] pipe.enable_free_init( num_iters=2, use_fast_sampling=True, method="butterworth", order=4, spatial_stop_frequency=0.25, temporal_stop_frequency=0.25, ) inputs_enable_free_init = self.get_dummy_inputs(torch_device) frames_enable_free_init = pipe(**inputs_enable_free_init).frames[0] pipe.disable_free_init() inputs_disable_free_init = self.get_dummy_inputs(torch_device) frames_disable_free_init = pipe(**inputs_disable_free_init).frames[0] sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_init)).sum() max_diff_disabled = np.abs(to_np(frames_normal) - to_np(frames_disable_free_init)).max() self.assertGreater( sum_enabled, 1e1, "Enabling of FreeInit should lead to results different from the default pipeline results" ) self.assertLess( max_diff_disabled, 1e-4, "Disabling of FreeInit should lead to results similar to the default pipeline results", ) def test_free_init_with_schedulers(self): components = self.get_dummy_components() pipe: AnimateDiffVideoToVideoPipeline = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) inputs_normal = self.get_dummy_inputs(torch_device) frames_normal = pipe(**inputs_normal).frames[0] schedulers_to_test = [ DPMSolverMultistepScheduler.from_config( components["scheduler"].config, timestep_spacing="linspace", beta_schedule="linear", algorithm_type="dpmsolver++", steps_offset=1, clip_sample=False, ), LCMScheduler.from_config( components["scheduler"].config, timestep_spacing="linspace", beta_schedule="linear", steps_offset=1, clip_sample=False, ), ] components.pop("scheduler") for scheduler in schedulers_to_test: components["scheduler"] = scheduler pipe: AnimateDiffVideoToVideoPipeline = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) pipe.enable_free_init(num_iters=2, use_fast_sampling=False) inputs = self.get_dummy_inputs(torch_device) frames_enable_free_init = pipe(**inputs).frames[0] sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_init)).sum() self.assertGreater( sum_enabled, 1e1, "Enabling of FreeInit should lead to results different from the default pipeline results", ) def test_free_noise_blocks(self): components = self.get_dummy_components() pipe: AnimateDiffVideoToVideoPipeline = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) pipe.enable_free_noise() for block in pipe.unet.down_blocks: for motion_module in block.motion_modules: for transformer_block in motion_module.transformer_blocks: self.assertTrue( isinstance(transformer_block, FreeNoiseTransformerBlock), "Motion module transformer blocks must be an instance of `FreeNoiseTransformerBlock` after enabling FreeNoise.", ) pipe.disable_free_noise() for block in pipe.unet.down_blocks: for motion_module in block.motion_modules: for transformer_block in motion_module.transformer_blocks: self.assertFalse( isinstance(transformer_block, FreeNoiseTransformerBlock), "Motion module transformer blocks must not be an instance of `FreeNoiseTransformerBlock` after disabling FreeNoise.", ) def test_free_noise(self): components = self.get_dummy_components() pipe: AnimateDiffVideoToVideoPipeline = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) inputs_normal = self.get_dummy_inputs(torch_device, num_frames=16) inputs_normal["num_inference_steps"] = 2 inputs_normal["strength"] = 0.5 frames_normal = pipe(**inputs_normal).frames[0] for context_length in [8, 9]: for context_stride in [4, 6]: pipe.enable_free_noise(context_length, context_stride) inputs_enable_free_noise = self.get_dummy_inputs(torch_device, num_frames=16) inputs_enable_free_noise["num_inference_steps"] = 2 inputs_enable_free_noise["strength"] = 0.5 frames_enable_free_noise = pipe(**inputs_enable_free_noise).frames[0] pipe.disable_free_noise() inputs_disable_free_noise = self.get_dummy_inputs(torch_device, num_frames=16) inputs_disable_free_noise["num_inference_steps"] = 2 inputs_disable_free_noise["strength"] = 0.5 frames_disable_free_noise = pipe(**inputs_disable_free_noise).frames[0] sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_noise)).sum() max_diff_disabled = np.abs(to_np(frames_normal) - to_np(frames_disable_free_noise)).max() self.assertGreater( sum_enabled, 1e1, "Enabling of FreeNoise should lead to results different from the default pipeline results", ) self.assertLess( max_diff_disabled, 1e-4, "Disabling of FreeNoise should lead to results similar to the default pipeline results", ) def test_free_noise_split_inference(self): components = self.get_dummy_components() pipe: AnimateDiffVideoToVideoPipeline = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) pipe.enable_free_noise(8, 4) inputs_normal = self.get_dummy_inputs(torch_device, num_frames=16) inputs_normal["num_inference_steps"] = 2 inputs_normal["strength"] = 0.5 frames_normal = pipe(**inputs_normal).frames[0] # Test FreeNoise with split inference memory-optimization pipe.enable_free_noise_split_inference(spatial_split_size=16, temporal_split_size=4) inputs_enable_split_inference = self.get_dummy_inputs(torch_device, num_frames=16) inputs_enable_split_inference["num_inference_steps"] = 2 inputs_enable_split_inference["strength"] = 0.5 frames_enable_split_inference = pipe(**inputs_enable_split_inference).frames[0] sum_split_inference = np.abs(to_np(frames_normal) - to_np(frames_enable_split_inference)).sum() self.assertLess( sum_split_inference, 1e-4, "Enabling FreeNoise Split Inference memory-optimizations should lead to results similar to the default pipeline results", ) def test_free_noise_multi_prompt(self): components = self.get_dummy_components() pipe: AnimateDiffVideoToVideoPipeline = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) context_length = 8 context_stride = 4 pipe.enable_free_noise(context_length, context_stride) # Make sure that pipeline works when prompt indices are within num_frames bounds inputs = self.get_dummy_inputs(torch_device, num_frames=16) inputs["prompt"] = {0: "Caterpillar on a leaf", 10: "Butterfly on a leaf"} inputs["num_inference_steps"] = 2 inputs["strength"] = 0.5 pipe(**inputs).frames[0] with self.assertRaises(ValueError): # Ensure that prompt indices are within bounds inputs = self.get_dummy_inputs(torch_device, num_frames=16) inputs["num_inference_steps"] = 2 inputs["strength"] = 0.5 inputs["prompt"] = {0: "Caterpillar on a leaf", 10: "Butterfly on a leaf", 42: "Error on a leaf"} pipe(**inputs).frames[0] def test_encode_prompt_works_in_isolation(self): extra_required_param_value_dict = { "device": torch.device(torch_device).type, "num_images_per_prompt": 1, "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, } return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict)
diffusers/tests/pipelines/animatediff/test_animatediff_video2video.py/0
{ "file_path": "diffusers/tests/pipelines/animatediff/test_animatediff_video2video.py", "repo_id": "diffusers", "token_count": 10423 }
196
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNet2DModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_accelerator, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class DDIMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = DDIMPipeline params = UNCONDITIONAL_IMAGE_GENERATION_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "latents", "callback", "callback_steps", } batch_params = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DModel( block_out_channels=(4, 8), layers_per_block=1, norm_num_groups=4, sample_size=8, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) scheduler = DDIMScheduler() components = {"unet": unet, "scheduler": scheduler} return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "batch_size": 1, "generator": generator, "num_inference_steps": 2, "output_type": "np", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 8, 8, 3)) expected_slice = np.array([0.0, 9.979e-01, 0.0, 9.999e-01, 9.986e-01, 9.991e-01, 7.106e-04, 0.0, 0.0]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=3e-3) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_accelerator class DDIMPipelineIntegrationTests(unittest.TestCase): def test_inference_cifar10(self): model_id = "google/ddpm-cifar10-32" unet = UNet2DModel.from_pretrained(model_id) scheduler = DDIMScheduler() ddim = DDIMPipeline(unet=unet, scheduler=scheduler) ddim.to(torch_device) ddim.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = ddim(generator=generator, eta=0.0, output_type="np").images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_inference_ema_bedroom(self): model_id = "google/ddpm-ema-bedroom-256" unet = UNet2DModel.from_pretrained(model_id) scheduler = DDIMScheduler.from_pretrained(model_id) ddpm = DDIMPipeline(unet=unet, scheduler=scheduler) ddpm.to(torch_device) ddpm.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = ddpm(generator=generator, output_type="np").images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
diffusers/tests/pipelines/ddim/test_ddim.py/0
{ "file_path": "diffusers/tests/pipelines/ddim/test_ddim.py", "repo_id": "diffusers", "token_count": 2222 }
197
import unittest import numpy as np import torch from PIL import Image from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, FluxControlPipeline, FluxTransformer2DModel from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import PipelineTesterMixin, check_qkv_fused_layers_exist class FluxControlPipelineFastTests(unittest.TestCase, PipelineTesterMixin): pipeline_class = FluxControlPipeline params = frozenset(["prompt", "height", "width", "guidance_scale", "prompt_embeds", "pooled_prompt_embeds"]) batch_params = frozenset(["prompt"]) # there is no xformers processor for Flux test_xformers_attention = False test_layerwise_casting = True test_group_offloading = True def get_dummy_components(self): torch.manual_seed(0) transformer = FluxTransformer2DModel( patch_size=1, in_channels=8, out_channels=4, num_layers=1, num_single_layers=1, attention_head_dim=16, num_attention_heads=2, joint_attention_dim=32, pooled_projection_dim=32, axes_dims_rope=[4, 4, 8], ) clip_text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=32, ) torch.manual_seed(0) text_encoder = CLIPTextModel(clip_text_encoder_config) torch.manual_seed(0) text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) vae = AutoencoderKL( sample_size=32, in_channels=3, out_channels=3, block_out_channels=(4,), layers_per_block=1, latent_channels=1, norm_num_groups=1, use_quant_conv=False, use_post_quant_conv=False, shift_factor=0.0609, scaling_factor=1.5035, ) scheduler = FlowMatchEulerDiscreteScheduler() return { "scheduler": scheduler, "text_encoder": text_encoder, "text_encoder_2": text_encoder_2, "tokenizer": tokenizer, "tokenizer_2": tokenizer_2, "transformer": transformer, "vae": vae, } def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) control_image = Image.new("RGB", (16, 16), 0) inputs = { "prompt": "A painting of a squirrel eating a burger", "control_image": control_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "height": 8, "width": 8, "max_sequence_length": 48, "output_type": "np", } return inputs def test_flux_different_prompts(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) output_same_prompt = pipe(**inputs).images[0] inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = "a different prompt" output_different_prompts = pipe(**inputs).images[0] max_diff = np.abs(output_same_prompt - output_different_prompts).max() # Outputs should be different here # For some reasons, they don't show large differences assert max_diff > 1e-6 def test_fused_qkv_projections(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images original_image_slice = image[0, -3:, -3:, -1] # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added # to the pipeline level. pipe.transformer.fuse_qkv_projections() self.assertTrue( check_qkv_fused_layers_exist(pipe.transformer, ["to_qkv"]), ("Something wrong with the fused attention layers. Expected all the attention projections to be fused."), ) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice_fused = image[0, -3:, -3:, -1] pipe.transformer.unfuse_qkv_projections() inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( "Fusion of QKV projections shouldn't affect the outputs." ) assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." ) assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( "Original outputs should match when fused QKV projections are disabled." ) def test_flux_image_output_shape(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) height_width_pairs = [(32, 32), (72, 57)] for height, width in height_width_pairs: expected_height = height - height % (pipe.vae_scale_factor * 2) expected_width = width - width % (pipe.vae_scale_factor * 2) inputs.update({"height": height, "width": width}) image = pipe(**inputs).images[0] output_height, output_width, _ = image.shape assert (output_height, output_width) == (expected_height, expected_width)
diffusers/tests/pipelines/flux/test_pipeline_flux_control.py/0
{ "file_path": "diffusers/tests/pipelines/flux/test_pipeline_flux_control.py", "repo_id": "diffusers", "token_count": 3089 }
198
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyV22PriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 100 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config) @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "num_attention_heads": 2, "attention_head_dim": 12, "embedding_dim": self.text_embedder_hidden_size, "num_layers": 1, } model = PriorTransformer(**model_kwargs) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 model.clip_std = nn.Parameter(torch.ones(model.clip_std.shape)) return model @property def dummy_image_encoder(self): torch.manual_seed(0) config = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, image_size=224, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=14, ) model = CLIPVisionModelWithProjection(config) return model @property def dummy_image_processor(self): image_processor = CLIPImageProcessor( crop_size=224, do_center_crop=True, do_normalize=True, do_resize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, ) return image_processor def get_dummy_components(self): prior = self.dummy_prior image_encoder = self.dummy_image_encoder text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer image_processor = self.dummy_image_processor scheduler = UnCLIPScheduler( variance_type="fixed_small_log", prediction_type="sample", num_train_timesteps=1000, clip_sample=True, clip_sample_range=10.0, ) components = { "prior": prior, "image_encoder": image_encoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, "image_processor": image_processor, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs class KandinskyV22PriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22PriorPipeline params = ["prompt"] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "num_images_per_prompt", "generator", "num_inference_steps", "latents", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] callback_cfg_params = ["prompt_embeds", "text_encoder_hidden_states", "text_mask"] test_xformers_attention = False supports_dduf = False def get_dummy_components(self): dummies = Dummies() return dummies.get_dummy_components() def get_dummy_inputs(self, device, seed=0): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) def test_kandinsky_prior(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.image_embeds image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -10:] image_from_tuple_slice = image_from_tuple[0, -10:] assert image.shape == (1, 32) expected_slice = np.array( [-0.5948, 0.1875, -0.1523, -1.1995, -1.4061, -0.6367, -1.4607, -0.6406, 0.8793, -0.3891] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" test_mean_pixel_difference = False self._test_attention_slicing_forward_pass( test_max_difference=test_max_difference, test_mean_pixel_difference=test_mean_pixel_difference, ) # override default test because no output_type "latent", use "pt" instead def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) if not ("callback_on_step_end_tensor_inputs" in sig.parameters and "callback_on_step_end" in sig.parameters): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_test(pipe, i, t, callback_kwargs): missing_callback_inputs = set() for v in pipe._callback_tensor_inputs: if v not in callback_kwargs: missing_callback_inputs.add(v) self.assertTrue( len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}" ) last_i = pipe.num_timesteps - 1 if i == last_i: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs = self.get_dummy_inputs(torch_device) inputs["callback_on_step_end"] = callback_inputs_test inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["num_inference_steps"] = 2 inputs["output_type"] = "pt" output = pipe(**inputs)[0] assert output.abs().sum() == 0
diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_prior.py/0
{ "file_path": "diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_prior.py", "repo_id": "diffusers", "token_count": 4062 }
199