Update README.md
Browse files
README.md
CHANGED
|
@@ -38,7 +38,7 @@ Example: `python -m deepcompressor.app.diffusion.ptq examples/diffusion/configs/
|
|
| 38 |
|
| 39 |
Folder Structure
|
| 40 |
|
| 41 |
-
- refer [black-forest-labs/FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev)
|
| 42 |
|
| 43 |
- refer [black-forest-labs/FLUX.1-Kontext-dev](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev/tree/main)
|
| 44 |
|
|
@@ -51,11 +51,7 @@ potential fix: app.diffusion.pipeline.config.py
|
|
| 51 |
```python
|
| 52 |
@staticmethod
|
| 53 |
def _default_build(
|
| 54 |
-
name: str,
|
| 55 |
-
path: str,
|
| 56 |
-
dtype: str | torch.dtype,
|
| 57 |
-
device: str | torch.device,
|
| 58 |
-
shift_activations: bool
|
| 59 |
) -> DiffusionPipeline:
|
| 60 |
if not path:
|
| 61 |
if name == "sdxl":
|
|
@@ -64,6 +60,8 @@ potential fix: app.diffusion.pipeline.config.py
|
|
| 64 |
path = "stabilityai/sdxl-turbo"
|
| 65 |
elif name == "pixart-sigma":
|
| 66 |
path = "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS"
|
|
|
|
|
|
|
| 67 |
elif name == "flux.1-dev":
|
| 68 |
path = "black-forest-labs/FLUX.1-dev"
|
| 69 |
elif name == "flux.1-canny-dev":
|
|
@@ -76,24 +74,22 @@ potential fix: app.diffusion.pipeline.config.py
|
|
| 76 |
path = "black-forest-labs/FLUX.1-schnell"
|
| 77 |
else:
|
| 78 |
raise ValueError(f"Path for {name} is not specified.")
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
pipeline = FluxControlPipeline.from_pretrained(path, torch_dtype=dtype)
|
| 83 |
elif name == "flux.1-fill-dev":
|
| 84 |
pipeline = FluxFillPipeline.from_pretrained(path, torch_dtype=dtype)
|
| 85 |
elif name.startswith("sana-"):
|
| 86 |
if dtype == torch.bfloat16:
|
| 87 |
-
pipeline = SanaPipeline.from_pretrained(
|
| 88 |
-
path, variant="bf16", torch_dtype=dtype, use_safetensors=True
|
| 89 |
-
)
|
| 90 |
pipeline.vae.to(dtype)
|
| 91 |
pipeline.text_encoder.to(dtype)
|
| 92 |
else:
|
| 93 |
pipeline = SanaPipeline.from_pretrained(path, torch_dtype=dtype)
|
| 94 |
else:
|
| 95 |
pipeline = AutoPipelineForText2Image.from_pretrained(path, torch_dtype=dtype)
|
| 96 |
-
|
| 97 |
# Debug output
|
| 98 |
print(">>> DEVICE:", device)
|
| 99 |
print(">>> PIPELINE TYPE:", type(pipeline))
|
|
@@ -128,14 +124,14 @@ potential fix: app.diffusion.pipeline.config.py
|
|
| 128 |
|
| 129 |
Debug Log
|
| 130 |
```
|
| 131 |
-
25-07-
|
| 132 |
-
25-07-
|
| 133 |
-
Loading
|
| 134 |
-
Loading pipeline components...: 57%|ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | 4/7 [00:00<00:00, 9.47it/s]
|
| 135 |
You set `add_prefix_space`. The tokenizer needs to be converted from the slow tokenizers
|
| 136 |
-
Loading
|
|
|
|
| 137 |
>>> DEVICE: cuda
|
| 138 |
-
>>> PIPELINE TYPE: <class 'diffusers.pipelines.flux.
|
| 139 |
>>> Moving transformer to cuda using to_empty()
|
| 140 |
>>> WARNING: transformer.to_empty(cuda) failed: Module.to_empty() takes 1 positional argument but 2 were given
|
| 141 |
>>> Falling back to transformer.to(cuda)
|
|
@@ -146,16 +142,16 @@ Loading pipeline components...: 100%|ββββββββββββββ
|
|
| 146 |
>>> Moving text_encoder to cuda using to_empty()
|
| 147 |
>>> WARNING: text_encoder.to_empty(cuda) failed: Module.to_empty() takes 1 positional argument but 2 were given
|
| 148 |
>>> Falling back to text_encoder.to(cuda)
|
| 149 |
-
25-07-
|
| 150 |
-
25-07-
|
| 151 |
-
25-07-
|
| 152 |
-
25-07-
|
| 153 |
-
25-07-
|
| 154 |
-
25-07-
|
| 155 |
-
25-07-
|
| 156 |
-
25-07-
|
| 157 |
-
25-07-
|
| 158 |
-
25-07-
|
| 159 |
```
|
| 160 |
|
| 161 |
2) KeyError: <class 'diffusers.models.transformers.transformer_flux.FluxAttention'>
|
|
|
|
| 38 |
|
| 39 |
Folder Structure
|
| 40 |
|
| 41 |
+
- refer [black-forest-labs/FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev/tree/main)
|
| 42 |
|
| 43 |
- refer [black-forest-labs/FLUX.1-Kontext-dev](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev/tree/main)
|
| 44 |
|
|
|
|
| 51 |
```python
|
| 52 |
@staticmethod
|
| 53 |
def _default_build(
|
| 54 |
+
name: str, path: str, dtype: str | torch.dtype, device: str | torch.device, shift_activations: bool
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
) -> DiffusionPipeline:
|
| 56 |
if not path:
|
| 57 |
if name == "sdxl":
|
|
|
|
| 60 |
path = "stabilityai/sdxl-turbo"
|
| 61 |
elif name == "pixart-sigma":
|
| 62 |
path = "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS"
|
| 63 |
+
elif name == "flux.1-kontext-dev":
|
| 64 |
+
path = "black-forest-labs/FLUX.1-Kontext-dev"
|
| 65 |
elif name == "flux.1-dev":
|
| 66 |
path = "black-forest-labs/FLUX.1-dev"
|
| 67 |
elif name == "flux.1-canny-dev":
|
|
|
|
| 74 |
path = "black-forest-labs/FLUX.1-schnell"
|
| 75 |
else:
|
| 76 |
raise ValueError(f"Path for {name} is not specified.")
|
| 77 |
+
if name in ["flux.1-kontext-dev"]:
|
| 78 |
+
pipeline = FluxKontextPipeline.from_pretrained(path, torch_dtype=dtype)
|
| 79 |
+
elif name in ["flux.1-canny-dev", "flux.1-depth-dev"]:
|
| 80 |
pipeline = FluxControlPipeline.from_pretrained(path, torch_dtype=dtype)
|
| 81 |
elif name == "flux.1-fill-dev":
|
| 82 |
pipeline = FluxFillPipeline.from_pretrained(path, torch_dtype=dtype)
|
| 83 |
elif name.startswith("sana-"):
|
| 84 |
if dtype == torch.bfloat16:
|
| 85 |
+
pipeline = SanaPipeline.from_pretrained(path, variant="bf16", torch_dtype=dtype, use_safetensors=True)
|
|
|
|
|
|
|
| 86 |
pipeline.vae.to(dtype)
|
| 87 |
pipeline.text_encoder.to(dtype)
|
| 88 |
else:
|
| 89 |
pipeline = SanaPipeline.from_pretrained(path, torch_dtype=dtype)
|
| 90 |
else:
|
| 91 |
pipeline = AutoPipelineForText2Image.from_pretrained(path, torch_dtype=dtype)
|
| 92 |
+
|
| 93 |
# Debug output
|
| 94 |
print(">>> DEVICE:", device)
|
| 95 |
print(">>> PIPELINE TYPE:", type(pipeline))
|
|
|
|
| 124 |
|
| 125 |
Debug Log
|
| 126 |
```
|
| 127 |
+
25-07-22 20:11:56 | I | === Start Evaluating ===
|
| 128 |
+
25-07-22 20:11:56 | I | * Building diffusion model pipeline
|
| 129 |
+
Loading pipeline components...: 0%| | 0/7 [00:00<?, ?it/s]
|
|
|
|
| 130 |
You set `add_prefix_space`. The tokenizer needs to be converted from the slow tokenizers
|
| 131 |
+
Loading checkpoint shards: 100%|ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 2/2 [00:00<00:00, 18.92it/s]
|
| 132 |
+
Loading pipeline components...: 100%|βββββββββββββββββββββββββββββββββββββββββββββββββββββ| 7/7 [00:00<00:00, 9.50it/s]
|
| 133 |
>>> DEVICE: cuda
|
| 134 |
+
>>> PIPELINE TYPE: <class 'diffusers.pipelines.flux.pipeline_flux_kontext.FluxKontextPipeline'>
|
| 135 |
>>> Moving transformer to cuda using to_empty()
|
| 136 |
>>> WARNING: transformer.to_empty(cuda) failed: Module.to_empty() takes 1 positional argument but 2 were given
|
| 137 |
>>> Falling back to transformer.to(cuda)
|
|
|
|
| 142 |
>>> Moving text_encoder to cuda using to_empty()
|
| 143 |
>>> WARNING: text_encoder.to_empty(cuda) failed: Module.to_empty() takes 1 positional argument but 2 were given
|
| 144 |
>>> Falling back to text_encoder.to(cuda)
|
| 145 |
+
25-07-22 20:11:59 | I | Replacing fused Linear with ConcatLinear.
|
| 146 |
+
25-07-22 20:11:59 | I | + Replacing fused Linear in single_transformer_blocks.0 with ConcatLinear.
|
| 147 |
+
25-07-22 20:11:59 | I | - in_features = 3072/15360
|
| 148 |
+
25-07-22 20:11:59 | I | - out_features = 3072
|
| 149 |
+
25-07-22 20:11:59 | I | + Replacing fused Linear in single_transformer_blocks.1 with ConcatLinear.
|
| 150 |
+
25-07-22 20:11:59 | I | - in_features = 3072/15360
|
| 151 |
+
25-07-22 20:11:59 | I | - out_features = 3072
|
| 152 |
+
25-07-22 20:11:59 | I | + Replacing fused Linear in single_transformer_blocks.2 with ConcatLinear.
|
| 153 |
+
25-07-22 20:11:59 | I | - in_features = 3072/15360
|
| 154 |
+
25-07-22 20:11:59 | I | - out_features = 3072
|
| 155 |
```
|
| 156 |
|
| 157 |
2) KeyError: <class 'diffusers.models.transformers.transformer_flux.FluxAttention'>
|