| import torch | |
| from diffusers import FluxPipeline | |
| pipe = FluxPipeline.from_pretrained("/mnt/workspace/checkpoints/black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16) | |
| pipe.enable_model_cpu_offload() #save some VRAM by offloading the model to CPU. Remove this if you have enough GPU power | |
| prompt = "A cat holding a sign that says hello world" | |
| image = pipe( | |
| prompt, | |
| height=384, # 384 192 96 | |
| width=640, # 640 320 160 | |
| guidance_scale=3.5, | |
| num_inference_steps=50, | |
| max_sequence_length=512, | |
| generator=torch.Generator("cpu").manual_seed(0) | |
| ).images[0] | |
| image.save("flux-dev.png") | |