Update src/pipeline.py
Browse files- src/pipeline.py +4 -3
src/pipeline.py
CHANGED
|
@@ -55,7 +55,7 @@ os.environ["TOKENIZERS_PARALLELISM"] = "True"
|
|
| 55 |
torch._dynamo.config.suppress_errors = True
|
| 56 |
|
| 57 |
Pipeline = None
|
| 58 |
-
|
| 59 |
Revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
|
| 60 |
ckpt_id = "manbeast3b/Flux.1.schnell-quant2"
|
| 61 |
ckpt_revision = "44eb293715147878512da10bf3bc47cd14ec8c55"
|
|
@@ -938,7 +938,8 @@ def aot_compile(name, model, **sample_kwargs):
|
|
| 938 |
# inductor_configs=options,
|
| 939 |
# )
|
| 940 |
|
| 941 |
-
|
|
|
|
| 942 |
|
| 943 |
@torch.no_grad()
|
| 944 |
def f(model, **kwargs):
|
|
@@ -978,7 +979,7 @@ def load_pipeline() -> Pipeline:
|
|
| 978 |
# print(f"{time=} seconds.")
|
| 979 |
|
| 980 |
# pipeline = FluxPipeline.from_pretrained(ckpt_id, revision=ckpt_revision, transformer=None, local_files_only=True, torch_dtype=torch.bfloat16,)
|
| 981 |
-
pipeline = FluxPipeline.from_pretrained(
|
| 982 |
pipeline.to("cuda")
|
| 983 |
pipeline.to(memory_format=torch.channels_last)
|
| 984 |
# pipeline.transformer = compiled_func
|
|
|
|
| 55 |
torch._dynamo.config.suppress_errors = True
|
| 56 |
|
| 57 |
Pipeline = None
|
| 58 |
+
Ids = "black-forest-labs/FLUX.1-schnell"
|
| 59 |
Revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
|
| 60 |
ckpt_id = "manbeast3b/Flux.1.schnell-quant2"
|
| 61 |
ckpt_revision = "44eb293715147878512da10bf3bc47cd14ec8c55"
|
|
|
|
| 938 |
# inductor_configs=options,
|
| 939 |
# )
|
| 940 |
|
| 941 |
+
Ids = "black-forest-labs/FLUX.1-schnell"
|
| 942 |
+
Revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
|
| 943 |
|
| 944 |
@torch.no_grad()
|
| 945 |
def f(model, **kwargs):
|
|
|
|
| 979 |
# print(f"{time=} seconds.")
|
| 980 |
|
| 981 |
# pipeline = FluxPipeline.from_pretrained(ckpt_id, revision=ckpt_revision, transformer=None, local_files_only=True, torch_dtype=torch.bfloat16,)
|
| 982 |
+
pipeline = FluxPipeline.from_pretrained(Ids, revision=Revision, local_files_only=True, torch_dtype=torch.bfloat16,) # transformer=None,
|
| 983 |
pipeline.to("cuda")
|
| 984 |
pipeline.to(memory_format=torch.channels_last)
|
| 985 |
# pipeline.transformer = compiled_func
|