Update README.md
Browse files
README.md
CHANGED
|
@@ -32,20 +32,21 @@ import torchvision.transforms.functional as F
|
|
| 32 |
from PIL import Image
|
| 33 |
from flux2_tiny_autoencoder import Flux2TinyAutoEncoder
|
| 34 |
|
|
|
|
| 35 |
tiny_vae = Flux2TinyAutoEncoder.from_pretrained(
|
| 36 |
"fal/FLUX.2-Tiny-AutoEncoder",
|
| 37 |
-
|
| 38 |
-
)
|
| 39 |
-
tiny_vae.eval()
|
| 40 |
|
| 41 |
-
pil_image = Image.open("
|
| 42 |
image_tensor = F.to_tensor(pil_image)
|
| 43 |
image_tensor = image_tensor.unsqueeze(0) * 2.0 - 1.0
|
|
|
|
| 44 |
|
| 45 |
with torch.inference_mode():
|
| 46 |
latents = tiny_vae.encode(image_tensor, return_dict=False)
|
| 47 |
recon = tiny_vae.decode(latents, return_dict=False)
|
| 48 |
recon = recon.squeeze(0).clamp(-1, 1) / 2.0 + 0.5
|
|
|
|
| 49 |
|
| 50 |
recon_image = F.to_pil_image(recon)
|
| 51 |
recon_image.save("reconstituted.png")
|
|
|
|
| 32 |
from PIL import Image
|
| 33 |
from flux2_tiny_autoencoder import Flux2TinyAutoEncoder
|
| 34 |
|
| 35 |
+
device = torch.device("cuda")
|
| 36 |
tiny_vae = Flux2TinyAutoEncoder.from_pretrained(
|
| 37 |
"fal/FLUX.2-Tiny-AutoEncoder",
|
| 38 |
+
).to(device=device, dtype=torch.bfloat16)
|
|
|
|
|
|
|
| 39 |
|
| 40 |
+
pil_image = Image.open("pexels.jpg")
|
| 41 |
image_tensor = F.to_tensor(pil_image)
|
| 42 |
image_tensor = image_tensor.unsqueeze(0) * 2.0 - 1.0
|
| 43 |
+
image_tensor = image_tensor.to(device, dtype=tiny_vae.dtype)
|
| 44 |
|
| 45 |
with torch.inference_mode():
|
| 46 |
latents = tiny_vae.encode(image_tensor, return_dict=False)
|
| 47 |
recon = tiny_vae.decode(latents, return_dict=False)
|
| 48 |
recon = recon.squeeze(0).clamp(-1, 1) / 2.0 + 0.5
|
| 49 |
+
recon = recon.float().detach().cpu()
|
| 50 |
|
| 51 |
recon_image = F.to_pil_image(recon)
|
| 52 |
recon_image.save("reconstituted.png")
|