| from transformers import GPT2LMHeadModel, AutoTokenizer | |
| import demo_util | |
| import numpy as np | |
| import torch | |
| from PIL import Image | |
| import os | |
| torch.backends.cuda.matmul.allow_tf32 = True | |
| torch.manual_seed(0) | |
| device = "cuda:1" | |
| dtype = torch.float16 | |
| config = demo_util.get_config("configs/titok_l32.yaml") | |
| titok_tokenizer = demo_util.get_titok_tokenizer(config) | |
| titok_tokenizer = titok_tokenizer.to(device) | |
| tokenizer = AutoTokenizer.from_pretrained("./image_tokenizer") | |
| model = GPT2LMHeadModel.from_pretrained("./checkpoint-20000").to(device).to(dtype).eval() | |
| def detokenize(tokens): | |
| encoded_tokens = torch.from_numpy(np.array(tokens).astype(np.int64)).view(1, 1, -1).to(device) | |
| reconstructed_image = titok_tokenizer.decode_tokens(encoded_tokens) | |
| reconstructed_image = torch.clamp(reconstructed_image, 0.0, 1.0) | |
| reconstructed_image = (reconstructed_image * 255.0).permute(0, 2, 3, 1).to("cpu", dtype=torch.uint8).numpy()[0] | |
| return Image.fromarray(reconstructed_image) | |
| prompt = "" | |
| inputs = tokenizer(f"{text}<|startofimage|>", return_tensors="pt").to(device) | |
| input_ids = inputs["input_ids"] | |
| init = input_ids.shape[-1] | |
| soi_token = tokenizer.encode("<|image:0|>")[0] | |
| for _ in range(33): | |
| logits = model(input_ids).logits[:, -1, :] | |
| probas = torch.nn.functional.softmax(logits, dim=-1) | |
| pred_idx = torch.argmax(probas, dim=-1, keepdim=True) | |
| input_ids = torch.cat((input_ids, pred_idx), dim=-1) | |
| tokenizer.decode(input_ids[0]) | |
| tokens = input_ids[:, init:-1].detach().cpu().squeeze().numpy() - soi_token | |
| if np.any(tokens < 0) or np.any(tokens >= 4096): | |
| print("Illegal Image Tokens") | |
| else: | |
| img = detokenize(tokens) | |
| img.save(f"./out.png") |