Instructions to use codeShare/FLUX.2-klein-9b-SDNQ-4bit with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Diffusers
How to use codeShare/FLUX.2-klein-9b-SDNQ-4bit with Diffusers:
pip install -U diffusers transformers accelerate
import torch from diffusers import DiffusionPipeline # switch to "mps" for apple devices pipe = DiffusionPipeline.from_pretrained("codeShare/FLUX.2-klein-9b-SDNQ-4bit", dtype=torch.bfloat16, device_map="cuda") prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" image = pipe(prompt).images[0] - Notebooks
- Google Colab
- Kaggle
- Local Apps
- Draw Things
- DiffusionBee
Upload text_encoder component
Browse files- text_encoder/config.json +20 -20
- text_encoder/model.safetensors +1 -1
text_encoder/config.json
CHANGED
|
@@ -324,38 +324,38 @@
|
|
| 324 |
},
|
| 325 |
"modules_quant_config": {},
|
| 326 |
"modules_to_not_convert": [
|
| 327 |
-
"
|
| 328 |
-
".y_embedder",
|
| 329 |
-
"model.embed_tokens.weight",
|
| 330 |
".txt_in",
|
| 331 |
-
"
|
| 332 |
-
"lm_head",
|
| 333 |
-
"multi_modal_projector",
|
| 334 |
-
".condition_embedder",
|
| 335 |
-
".img_in",
|
| 336 |
".emb_out",
|
| 337 |
-
"wte",
|
| 338 |
".final_layer",
|
| 339 |
-
".
|
| 340 |
-
".
|
| 341 |
-
"patch_emb",
|
| 342 |
".vid_in",
|
| 343 |
-
".
|
| 344 |
-
"
|
| 345 |
-
"
|
| 346 |
-
".
|
| 347 |
".proj_out",
|
| 348 |
".img_out",
|
| 349 |
-
".
|
| 350 |
".emb_in",
|
| 351 |
-
".
|
| 352 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 353 |
],
|
| 354 |
"non_blocking": false,
|
| 355 |
"quant_conv": false,
|
| 356 |
"quant_embedding": false,
|
| 357 |
"quant_method": "sdnq",
|
| 358 |
-
"quantization_device": "
|
| 359 |
"quantized_matmul_dtype": null,
|
| 360 |
"return_device": "cpu",
|
| 361 |
"sdnq_version": "0.1.8",
|
|
|
|
| 324 |
},
|
| 325 |
"modules_quant_config": {},
|
| 326 |
"modules_to_not_convert": [
|
| 327 |
+
"patch_embed",
|
|
|
|
|
|
|
| 328 |
".txt_in",
|
| 329 |
+
".context_embedder",
|
| 330 |
+
"lm_head.weight",
|
|
|
|
|
|
|
|
|
|
| 331 |
".emb_out",
|
|
|
|
| 332 |
".final_layer",
|
| 333 |
+
".txt_out",
|
| 334 |
+
".img_in",
|
|
|
|
| 335 |
".vid_in",
|
| 336 |
+
".norm_out",
|
| 337 |
+
"wte",
|
| 338 |
+
".y_embedder",
|
| 339 |
+
".time_embed",
|
| 340 |
".proj_out",
|
| 341 |
".img_out",
|
| 342 |
+
".condition_embedder",
|
| 343 |
".emb_in",
|
| 344 |
+
"model.embed_tokens.weight",
|
| 345 |
+
".vid_out",
|
| 346 |
+
"time_text_embed",
|
| 347 |
+
"lm_head",
|
| 348 |
+
"multi_modal_projector",
|
| 349 |
+
".t_embedder",
|
| 350 |
+
".x_embedder",
|
| 351 |
+
"patch_embedding",
|
| 352 |
+
"patch_emb"
|
| 353 |
],
|
| 354 |
"non_blocking": false,
|
| 355 |
"quant_conv": false,
|
| 356 |
"quant_embedding": false,
|
| 357 |
"quant_method": "sdnq",
|
| 358 |
+
"quantization_device": "cuda",
|
| 359 |
"quantized_matmul_dtype": null,
|
| 360 |
"return_device": "cpu",
|
| 361 |
"sdnq_version": "0.1.8",
|
text_encoder/model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 6835992696
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2461c3f756edec000ebd5d0315c43811024ff91be72496b67e7d9be606a3c1be
|
| 3 |
size 6835992696
|