wendell0218 commited on
Commit
3a096c5
·
verified ·
1 Parent(s): dfb291f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +130 -0
README.md CHANGED
@@ -57,6 +57,136 @@ Key Contributions:
57
 
58
  ## ✨️ Quickstart
59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
 
61
  ## 🤝 Acknowledgment
62
 
 
57
 
58
  ## ✨️ Quickstart
59
 
60
+ **1. Prepare Environment**
61
+
62
+ We recommend using Python 3.10 and setting up a virtual environment:
63
+
64
+ ```bash
65
+ # clone our repo
66
+ git clone https://github.com/wendell0218/FocusDiff.git
67
+ cd FocusDiff
68
+
69
+ # prepare python environment
70
+ conda create -n focus-diff python=3.10
71
+ conda activate focus-diff
72
+ pip install -r requirements.txt
73
+ ```
74
+
75
+ **2. Prepare Pretrained Model**
76
+
77
+ FocusDiff utilizes `Janus-Pro-7B` as the pretrained model for subsequent supervised fine-tuning. You can download the corresponding model using the following command:
78
+ ```bash
79
+ GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/deepseek-ai/Janus-Pro-7B
80
+ cd Janus-Pro-7B
81
+ git lfs pull
82
+ ```
83
+
84
+ **3. Start Generation!**
85
+
86
+ ```python
87
+ import os
88
+ import torch
89
+ import PIL.Image
90
+ import numpy as np
91
+ from transformers import AutoModelForCausalLM
92
+ from janus.models import MultiModalityCausalLM, VLChatProcessor
93
+
94
+ @torch.inference_mode()
95
+ def generate(
96
+ mmgpt: MultiModalityCausalLM,
97
+ vl_chat_processor: VLChatProcessor,
98
+ prompt: str,
99
+ temperature: float = 1.0,
100
+ parallel_size: int = 4,
101
+ cfg_weight: float = 5.0,
102
+ image_token_num_per_image: int = 576,
103
+ img_size: int = 384,
104
+ patch_size: int = 16,
105
+ img_top_k: int = 1,
106
+ img_top_p: float = 1.0,
107
+ ):
108
+ images = []
109
+ input_ids = vl_chat_processor.tokenizer.encode(prompt)
110
+ input_ids = torch.LongTensor(input_ids)
111
+ tokens = torch.zeros((parallel_size*2, len(input_ids)), dtype=torch.int).cuda()
112
+ for i in range(parallel_size*2):
113
+ tokens[i, :] = input_ids
114
+ if i % 2 != 0:
115
+ tokens[i, 1:-1] = vl_chat_processor.pad_id
116
+ inputs_embeds = mmgpt.language_model.get_input_embeddings()(tokens)
117
+ generated_tokens = torch.zeros((parallel_size, image_token_num_per_image), dtype=torch.int).cuda()
118
+ for i in range(image_token_num_per_image):
119
+ outputs = mmgpt.language_model.model(inputs_embeds=inputs_embeds, use_cache=True, past_key_values=outputs.past_key_values if i != 0 else None)
120
+ hidden_states = outputs.last_hidden_state
121
+ logits = mmgpt.gen_head(hidden_states[:, -1, :])
122
+ logit_cond = logits[0::2, :]
123
+ logit_uncond = logits[1::2, :]
124
+ logits = logit_uncond + cfg_weight * (logit_cond-logit_uncond)
125
+ if img_top_k:
126
+ v, _ = torch.topk(logits, min(img_top_k, logits.size(-1)))
127
+ logits[logits < v[:, [-1]]] = float("-inf")
128
+ probs = torch.softmax(logits / temperature, dim=-1)
129
+ if img_top_p:
130
+ probs_sort, probs_idx = torch.sort(probs,
131
+ dim=-1,
132
+ descending=True)
133
+ probs_sum = torch.cumsum(probs_sort, dim=-1)
134
+ mask = probs_sum - probs_sort > img_top_p
135
+ probs_sort[mask] = 0.0
136
+ probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
137
+ next_token = torch.multinomial(probs_sort, num_samples=1)
138
+ next_token = torch.gather(probs_idx, -1, next_token)
139
+ else:
140
+ next_token = torch.multinomial(probs, num_samples=1)
141
+ generated_tokens[:, i] = next_token.squeeze(dim=-1)
142
+ next_token = torch.cat([next_token.unsqueeze(dim=1), next_token.unsqueeze(dim=1)], dim=1).view(-1)
143
+ img_embeds = mmgpt.prepare_gen_img_embeds(next_token)
144
+ inputs_embeds = img_embeds.unsqueeze(dim=1)
145
+ dec = mmgpt.gen_vision_model.decode_code(generated_tokens.to(dtype=torch.int), shape=[parallel_size, 8, img_size//patch_size, img_size//patch_size])
146
+ dec = dec.to(torch.float32).cpu().numpy().transpose(0, 2, 3, 1)
147
+ dec = np.clip((dec + 1) / 2 * 255, 0, 255)
148
+ visual_img = np.zeros((parallel_size, img_size, img_size, 3), dtype=np.uint8)
149
+ visual_img[:, :, :] = dec
150
+ for i in range(parallel_size):
151
+ images.append(PIL.Image.fromarray(visual_img[i]))
152
+
153
+ return images
154
+
155
+
156
+ if __name__ == "__main__":
157
+ import argparse
158
+ parser = argparse.ArgumentParser()
159
+
160
+ parser.add_argument("--model_path", type=str, default="deepseek-ai/Janus-Pro-7B")
161
+ parser.add_argument("--ckpt_path", type=str, default=None)
162
+ parser.add_argument("--caption", type=str, default="a brown giraffe and a white stop sign")
163
+ parser.add_argument("--gen_path", type=str, default='results/samples')
164
+ parser.add_argument("--cfg", type=float, default=5.0)
165
+ parser.add_argument("--parallel_size", type=int, default=4)
166
+
167
+ args = parser.parse_args()
168
+ vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(args.model_path)
169
+ vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(args.model_path, trust_remote_code=True)
170
+ if args.ckpt_path is not None:
171
+ state_dict = torch.load(f"{args.ckpt_path}", map_location="cpu")
172
+ vl_gpt.load_state_dict(state_dict)
173
+ vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()
174
+ prompt = f'<|User|>: {args.caption}\n\n<|Assistant|>:<begin_of_image>'
175
+ images = generate(
176
+ vl_gpt,
177
+ vl_chat_processor,
178
+ prompt,
179
+ parallel_size = args.parallel_size,
180
+ cfg_weight = args.cfg,
181
+ )
182
+ if not os.path.exists(args.gen_path):
183
+ os.makedirs(args.gen_path, exist_ok=True)
184
+ for i in range(args.parallel_size):
185
+ img_name = str(i).zfill(4)+".png"
186
+ save_path = os.path.join(args.gen_path, img_name)
187
+ images[i].save(save_path)
188
+ ```
189
+
190
 
191
  ## 🤝 Acknowledgment
192