Upload src/pipeline.py with huggingface_hub
Browse files- src/pipeline.py +5 -6
src/pipeline.py
CHANGED
|
@@ -44,15 +44,14 @@ def load_pipeline() -> Pipeline:
|
|
| 44 |
quantization_config = BitsAndBytesConfig(
|
| 45 |
load_in_8bit=True, llm_int8_threshold=10.,
|
| 46 |
)
|
|
|
|
| 47 |
transformer = FluxTransformer2DModel.from_pretrained(
|
| 48 |
-
|
| 49 |
-
revision=REVISION,
|
| 50 |
-
subfolder="transformer",
|
| 51 |
torch_dtype=torch.bfloat16,
|
| 52 |
quantization_config=quantization_config,
|
| 53 |
-
local_files_only=True,
|
| 54 |
-
use_safetensors=True,
|
| 55 |
-
|
| 56 |
|
| 57 |
|
| 58 |
pipeline = FluxPipeline.from_pretrained(
|
|
|
|
| 44 |
quantization_config = BitsAndBytesConfig(
|
| 45 |
load_in_8bit=True, llm_int8_threshold=10.,
|
| 46 |
)
|
| 47 |
+
path = os.path.join(HF_HUB_CACHE, "models--black-forest-labs--FLUX.1-schnell/snapshots/741f7c3ce8b383c54771c7003378a50191e9efe9/transformer")
|
| 48 |
transformer = FluxTransformer2DModel.from_pretrained(
|
| 49 |
+
path,
|
|
|
|
|
|
|
| 50 |
torch_dtype=torch.bfloat16,
|
| 51 |
quantization_config=quantization_config,
|
| 52 |
+
# local_files_only=True,
|
| 53 |
+
# use_safetensors=True,
|
| 54 |
+
)
|
| 55 |
|
| 56 |
|
| 57 |
pipeline = FluxPipeline.from_pretrained(
|