Diffusers
Safetensors
PixCellPipeline
AlexGraikos commited on
Commit
2654681
·
verified ·
1 Parent(s): 9099de3

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +109 -3
README.md CHANGED
@@ -1,3 +1,109 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ ---
4
+ ---
5
+ license: apache-2.0
6
+ ---
7
+
8
+ ### Load PixCell-1024 model
9
+
10
+ ```python
11
+ import torch
12
+
13
+ from diffusers import DiffusionPipeline
14
+ from diffusers import AutoencoderKL
15
+
16
+ device = torch.device('cuda')
17
+
18
+ # We do not host the weights of the SD3 VAE -- load it from StabilityAI
19
+ sd3_vae = AutoencoderKL.from_pretrained("stabilityai/stable-diffusion-3.5-large", subfolder="vae")
20
+
21
+ pipeline = DiffusionPipeline.from_pretrained(
22
+ "StonyBrook-CVLab/pixcell-1024-diffusers",
23
+ vae=sd3_vae,
24
+ custom_pipeline="StonyBrook-CVLab/pixcell-pipeline",
25
+ trust_remote_code=True,
26
+ )
27
+
28
+ pipeline.to(device);
29
+ ```
30
+
31
+ ### Load [[UNI-2h]](https://huggingface.co/MahmoodLab/UNI2-h) for conditioning
32
+ ```python
33
+ import timm
34
+ from timm.data import resolve_data_config
35
+ from timm.data.transforms_factory import create_transform
36
+
37
+ timm_kwargs = {
38
+ 'img_size': 224,
39
+ 'patch_size': 14,
40
+ 'depth': 24,
41
+ 'num_heads': 24,
42
+ 'init_values': 1e-5,
43
+ 'embed_dim': 1536,
44
+ 'mlp_ratio': 2.66667*2,
45
+ 'num_classes': 0,
46
+ 'no_embed_class': True,
47
+ 'mlp_layer': timm.layers.SwiGLUPacked,
48
+ 'act_layer': torch.nn.SiLU,
49
+ 'reg_tokens': 8,
50
+ 'dynamic_img_size': True
51
+ }
52
+ uni_model = timm.create_model("hf-hub:MahmoodLab/UNI2-h", pretrained=True, **timm_kwargs)
53
+ transform = create_transform(**resolve_data_config(uni_model.pretrained_cfg, model=uni_model))
54
+ uni_model.eval()
55
+ uni_model.to(device);
56
+ ```
57
+
58
+ ### Unconditional generation
59
+ ```python
60
+ uncond = pipeline.get_unconditional_embedding(1)
61
+ samples = pipeline(uni_embeds=uncond, negative_uni_embeds=None, guidance_scale=1.0)
62
+ ```
63
+
64
+ ### Conditional generation
65
+ ```python
66
+ # Load image
67
+ import numpy as np
68
+ import einops
69
+ from PIL import Image
70
+ from huggingface_hub import hf_hub_download
71
+
72
+ # This is an example image we provide
73
+ path = hf_hub_download(repo_id="StonyBrook-CVLab/pixcell-1024-diffusers", filename="test_image.jpg")
74
+ image = Image.open(path)
75
+
76
+ # Extract UNI from random patches in the image
77
+ n_patches = 1
78
+ patches = []
79
+ uni_emb = []
80
+ for k in range(n_patches):
81
+ # Extract random crop
82
+ sz = pipeline.transformer.config.sample_size * pipeline.vae_scale_factor
83
+
84
+ x1 = np.random.randint(0, image.size[0] - sz+1)
85
+ y1 = np.random.randint(0, image.size[1] - sz+1)
86
+ image_patch = image.crop((x1, y1, x1+sz, y1+sz))
87
+ patches.append(image_patch)
88
+ print("Extracted patch:", patches[-1].size)
89
+
90
+ # Rearrange 1024x1024 image into 16 256x256 patches
91
+ uni_patches = np.array(image_patch)
92
+ uni_patches = einops.rearrange(uni_patches, '(d1 h) (d2 w) c -> (d1 d2) h w c', d1=4, d2=4)
93
+
94
+ # Extract UNIs for each patch
95
+ uni_images = torch.cat(
96
+ [transform(Image.fromarray(x)).unsqueeze(0) for x in uni_patches],
97
+ dim=0)
98
+ with torch.inference_mode():
99
+ feature_emb = uni_model(uni_images.to(device))
100
+ uni_emb.append(feature_emb)
101
+
102
+ uni_emb = torch.stack(uni_emb, dim=0)
103
+ print("Extracted UNI:", uni_emb.shape)
104
+
105
+ # Get unconditional embedding for classifier-free guidance
106
+ uncond = pipeline.get_unconditional_embedding(uni_emb.shape[0])
107
+ # Generate new samples
108
+ samples = pipeline(uni_embeds=uni_emb, negative_uni_embeds=uncond, guidance_scale=1.5, num_images_per_prompt=1)
109
+ ```