diff --git "a/2025/Spectral Image Tokenizer/layout.json" "b/2025/Spectral Image Tokenizer/layout.json" new file mode 100644--- /dev/null +++ "b/2025/Spectral Image Tokenizer/layout.json" @@ -0,0 +1,10739 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 225, + 103, + 386, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 103, + 386, + 121 + ], + "spans": [ + { + "bbox": [ + 225, + 103, + 386, + 121 + ], + "type": "text", + "content": "Spectral Image Tokenizer" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 100, + 150, + 173, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 150, + 173, + 163 + ], + "spans": [ + { + "bbox": [ + 100, + 150, + 173, + 163 + ], + "type": "text", + "content": "Carlos Esteves" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 250, + 150, + 343, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 250, + 150, + 343, + 163 + ], + "spans": [ + { + "bbox": [ + 250, + 150, + 343, + 163 + ], + "type": "text", + "content": "Mohammed Suhail" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 421, + 150, + 507, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 421, + 150, + 507, + 163 + ], + "spans": [ + { + "bbox": [ + 421, + 150, + 507, + 163 + ], + "type": "text", + "content": "Ameesh Makadia" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 200, + 169, + 394, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 169, + 394, + 182 + ], + "spans": [ + { + "bbox": [ + 200, + 169, + 394, + 182 + ], + "type": "text", + "content": "{machc, suhailmhd, makadia}@google.com" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 255, + 183, + 339, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 183, + 339, + 198 + ], + "spans": [ + { + "bbox": [ + 255, + 183, + 339, + 198 + ], + "type": "text", + "content": "Google Research" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 152, + 243, + 200, + 255 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 243, + 200, + 255 + ], + "spans": [ + { + "bbox": [ + 152, + 243, + 200, + 255 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 54, + 269, + 297, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 269, + 297, + 521 + ], + "spans": [ + { + "bbox": [ + 54, + 269, + 297, + 521 + ], + "type": "text", + "content": "Image tokenizers map images to sequences of discrete tokens, and are a crucial component of autoregressive transformer-based image generation. The tokens are typically associated with spatial locations in the input image, arranged in raster scan order, which is not ideal for autoregressive modeling. In this paper, we propose to tokenize the image spectrum instead, obtained from a discrete wavelet transform (DWT), such that the sequence of tokens represents the image in a coarse-to-fine fashion. Our tokenizer brings several advantages: 1) it leverages that natural images are more compressible at high frequencies, 2) it can take and reconstruct images of different resolutions without retraining, 3) it improves the conditioning for next-token prediction – instead of conditioning on a partial line-by-line reconstruction of the image, it takes a coarse reconstruction of the full image, 4) it enables partial decoding where the first few generated tokens can reconstruct a coarse version of the image, 5) it enables autoregressive models to be used for image upsampling. We evaluate the tokenizer reconstruction metrics as well as multiscale image generation, text-guided image upsampling and editing." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 547, + 135, + 559 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 547, + 135, + 559 + ], + "spans": [ + { + "bbox": [ + 56, + 547, + 135, + 559 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 568, + 296, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 568, + 296, + 640 + ], + "spans": [ + { + "bbox": [ + 55, + 568, + 296, + 640 + ], + "type": "text", + "content": "In natural language processing, tokenization associates sets of characters to entries in a vocabulary, which is straightforward since language is discrete. The sequence of tokens is suitably modeled as a categorical distribution with autoregressive (AR) transformers, which is the foundation of modern large language models (LLMs) [33, 46, 47]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "text", + "content": "While natural images are represented by discrete pixel values, they exhibit high dimensionality, redundancies, and noise so it's impractical to associate one token per pixel. This motivated a long line of research of learnable image tokenizers [15, 37, 37, 51, 57]. While there are successful autoregressive image generation models [15, 57, 58]," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 243, + 555, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 243, + 555, + 304 + ], + "spans": [ + { + "bbox": [ + 313, + 243, + 555, + 304 + ], + "type": "text", + "content": "images are not sequential like language, which motivated developing alternatives such as denoising diffusion models [11, 38, 43] and masked transformers [3, 4, 60]. Nevertheless, most of these alternatives also operate on the latent space of tokenizers like VQGAN [15] instead of raw pixels." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 304, + 555, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 304, + 555, + 388 + ], + "spans": [ + { + "bbox": [ + 313, + 304, + 555, + 388 + ], + "type": "text", + "content": "In this work, we revisit AR transformer-based image generation. Our main contribution is a tokenizer operating on the image spectrum, specifically on DWT coefficients, where the coarse-to-fine representation lends itself more naturally to a sequential interpretation. Our Spectral Image Tokenizer (SIT) has several useful properties and enables different applications:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 315, + 391, + 554, + 714 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 315, + 391, + 554, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 391, + 554, + 450 + ], + "spans": [ + { + "bbox": [ + 315, + 391, + 554, + 450 + ], + "type": "text", + "content": "P.1 Since the power spectrum of natural images decreases with frequency, high frequencies can be more heavily compressed with little effect in visual quality. SIT leverages this by associating tokens to larger patches at higher wavelet scales than at lower scales (see Fig. 2)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 315, + 450, + 554, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 450, + 554, + 533 + ], + "spans": [ + { + "bbox": [ + 315, + 450, + 554, + 533 + ], + "type": "text", + "content": "P.2 SIT is transformer-based [52]; by using an attention mask where each scale depends on itself and lower scales (\"Scale-Causal attention\"), SIT can be trained at a single resolution and used to tokenize images of multiple resolutions (any number of scales up to the trained maximum), and detokenize partial token sequences (up to some scale), reconstructing a coarse image." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 534, + 554, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 534, + 554, + 605 + ], + "spans": [ + { + "bbox": [ + 315, + 534, + 554, + 605 + ], + "type": "text", + "content": "P.3 Using SIT, we train an autoregressive generative transformer (AR-SIT) that models images coarse-to-fine. The next-token prediction is then conditioned on a coarse reconstruction of the image given by the partial token sequence, instead of the usual conditioning on the partial reconstruction of the previous rows of the image." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 315, + 605, + 553, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 605, + 553, + 653 + ], + "spans": [ + { + "bbox": [ + 315, + 605, + 553, + 653 + ], + "type": "text", + "content": "P.4 AR-SIT can quickly generate only the first few tokens and reconstruct a coarse version of the image, enabling applications like quickly showing multiple coarse generations and letting the user select which ones to refine." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 315, + 653, + 553, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 653, + 553, + 700 + ], + "spans": [ + { + "bbox": [ + 315, + 653, + 553, + 700 + ], + "type": "text", + "content": "P.5 AR-SIT can be used for text-based upsampling of an input low resolution image, by starting the decoding process with the few tokens output by SIT, and generating the rest of the sequence up to a desired resolution." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 315, + 700, + 553, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 700, + 553, + 714 + ], + "spans": [ + { + "bbox": [ + 315, + 700, + 553, + 714 + ], + "type": "text", + "content": "P.6 AR-SIT can be used for text-guided image editing, by" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 146, + 0, + 493, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 0, + 493, + 37 + ], + "spans": [ + { + "bbox": [ + 146, + 0, + 493, + 37 + ], + "type": "text", + "content": "This ICCV paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17181" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 56, + 68, + 233, + 245 + ], + "blocks": [ + { + "bbox": [ + 56, + 68, + 233, + 245 + ], + "lines": [ + { + "bbox": [ + 56, + 68, + 233, + 245 + ], + "spans": [ + { + "bbox": [ + 56, + 68, + 233, + 245 + ], + "type": "image", + "image_path": "2bfa86016e0efaaf3b1d858e61d41b14be8218e91de76c2783d796f1bade2604.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 252, + 555, + 330 + ], + "lines": [ + { + "bbox": [ + 55, + 252, + 555, + 330 + ], + "spans": [ + { + "bbox": [ + 55, + 252, + 555, + 330 + ], + "type": "text", + "content": "Figure 1. Left: we introduce a Spectral Image Tokenizer (SIT), that learns to encode and decode discrete wavelet transform (DWT) coefficients to and from a small set of discrete tokens, such that the sequence represents the image in a coarse-fine fashion. SIT is naturally multiscale and enables coarse-to-fine autoregressive image generation with our AR-SIT model. SIT also leverages the sparsity of high frequency coefficients in natural images. Right: details of the encoder/decoder transformer layers. The main architectural difference with respect to previous tokenizers is that the distributions of DWT approximation and details coefficients are distinct, hence we use specialized parameters for each in the quantizer codebooks and inner transformer layers. We also introduce a scale-causal attention where each token attends to its own scale and lower scales, which enables encoding, decoding, generating, and upsampling images at different resolutions." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 260, + 68, + 556, + 242 + ], + "blocks": [ + { + "bbox": [ + 260, + 68, + 556, + 242 + ], + "lines": [ + { + "bbox": [ + 260, + 68, + 556, + 242 + ], + "spans": [ + { + "bbox": [ + 260, + 68, + 556, + 242 + ], + "type": "image", + "image_path": "db8c6463a1c65f8aaf7b1abc40644785cb3a11c1ae22bbb2fa368a367c846a68.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 71, + 339, + 295, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 339, + 295, + 363 + ], + "spans": [ + { + "bbox": [ + 71, + 339, + 295, + 363 + ], + "type": "text", + "content": "encoding a given image up to a coarse scale, and generating the finer details conditioned on a new caption." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 366, + 296, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 366, + 296, + 535 + ], + "spans": [ + { + "bbox": [ + 55, + 366, + 296, + 535 + ], + "type": "text", + "content": "Currently, image generation is dominated by diffusion models such as Imagen 3 [23], DALL-E-3 [1] and Stable Diffusion 3 [16]. On the other hand, LLMs such as Gemini [46], GPT-4 [33], and Llama 3 [47] are based on autoregressive transformers. We believe autoregressive image generation is still worth pursuing because it might benefit from advances in LLMs, and multimodal applications might benefit of having a single architecture for all modalities. Recent work on image and video generation support this point [45, 48, 59]. Dieleman [12] recently interpreted denoising diffusion models as spectral autoregression, since, when looking at image spectra, the denoising procedure uncovers frequencies from low to high. In contrast, our method does literal spectral autoregression." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 548, + 139, + 560 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 548, + 139, + 560 + ], + "spans": [ + { + "bbox": [ + 55, + 548, + 139, + 560 + ], + "type": "text", + "content": "2. Related work" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 569, + 295, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 569, + 295, + 688 + ], + "spans": [ + { + "bbox": [ + 55, + 569, + 295, + 688 + ], + "type": "text", + "content": "Image tokenization Several methods have been developed to map images to a small set of discrete tokens suitable for generative modeling. VQ-VAE [51] introduced vector-quantization in the latent space of a Variational AutoEncoder to map images, audio and video to a set of discrete values. VQGAN [15] improved upon VQVAE by using perceptual and adversarial losses. We build on ViT-VQGAN [57], which improved upon VQGAN by using a Vision Transformer (ViT) [13] instead of convolutions, as well as codebook factorization and feature normalization." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 689, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 713 + ], + "type": "text", + "content": "In this paper, we are interested in multiscale image representations. VQ-VAE-2 [37] introduced multiscale latents" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 338, + 555, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 338, + 555, + 482 + ], + "spans": [ + { + "bbox": [ + 313, + 338, + 555, + 482 + ], + "type": "text", + "content": "by keeping and quantizing intermediate downsampled convolutional features. RQ-VAE [26] quantized a set of residuals such that the latent vector is represented in a coarse-to-fine fashion and reconstructed by adding the code embeddings for each residual. Similarly, VAR [48] and STAR [28] split the latent space in multi-scale quantized residuals, so that it can be reconstructed by upsampling and summing. FAR [56] splits the latents into frequencies to train a next-frequency generative model. DQ-VAE [22] encoded smooth regions with fewer tokens that are generated first, while QG-VAE [14] and SEMANTICIST [53] learn non-local tokens that are ordered by importance." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 483, + 555, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 483, + 555, + 554 + ], + "spans": [ + { + "bbox": [ + 313, + 483, + 555, + 554 + ], + "type": "text", + "content": "The crucial difference between our approach and the aforementioned is that we operate on spectral coefficients of the input and not on latent features. This allows our tokenizer to be truly multiscale; it can take inputs at different scales and reconstruct up to some scale, and leverage that higher frequencies are more compressible in natural images." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 555, + 554, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 555, + 554, + 662 + ], + "spans": [ + { + "bbox": [ + 313, + 555, + 554, + 662 + ], + "type": "text", + "content": "Tangentially related to our work, Wave-ViT [55] modified the ViT self-attention by applying a DWT to the input and concatenating coefficients, effectively exchanging space for depth to reduce the sequence length. Zhu and Soricut [62] modified the ViT patchifier in a similar way, and introduced patch embeddings that leverage the sparsity of high frequency coefficients. Both methods are for discriminative tasks such as image classification and segmentation, while we focus on generation." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 666, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 666, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 666, + 556, + 715 + ], + "type": "text", + "content": "Autoregressive image generation Early approaches PixelRNN and PixelCNN [49, 50] generate images pixel by pixel by modeling the conditional distribution of the pixel given the previous pixels with recurrent layers of causal" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "17182" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 296, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 296, + 191 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 296, + 191 + ], + "type": "text", + "content": "convolutions. PixelSNAIL [5] improved on this model by introducing self-attention layer to better model long-range dependencies. VQ-VAE [51] introduced a two-stage approach with a tokenizer and a separate stage to model the distribution of tokens. VQGAN [15] greatly improved these results by both improving the tokenizer and using a transformer to model the distribution. Finally, ViT-VQGAN [57] proposed a transformer-based tokenizer, which Parti [58] used with a large autoregressive transformer capable of high-quality text-to-image generation." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 192, + 296, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 192, + 296, + 275 + ], + "spans": [ + { + "bbox": [ + 55, + 192, + 296, + 275 + ], + "type": "text", + "content": "MaskGIT [3] and Muse [4] highlighted the disadvantages of the typical autoregressive models raster-order conditioning, and proposed to generate all tokens in parallel iteratively, where each iteration keeps the highest confidence tokens. We address the same problem with a tokenizer whose sequence represents the image in a coarse-to-fine order instead of raster-order." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 277, + 296, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 277, + 296, + 324 + ], + "spans": [ + { + "bbox": [ + 55, + 277, + 296, + 324 + ], + "type": "text", + "content": "Multiscale image generation Multiscale image generation ideas have appeared in the context of VAEs [2], GANs [24, 39], and diffusion models [18, 25, 40], but have not been sufficiently explored with AR transformers." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 325, + 296, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 325, + 296, + 492 + ], + "spans": [ + { + "bbox": [ + 55, + 325, + 296, + 492 + ], + "type": "text", + "content": "Nash et al. [32] represented an image as a sequence of quantized and thresholded DCT coefficients, where the compression comes from the fact that many coefficients are zero and are omitted. By sorting the sequence by frequency, the method can model images from coarse-to-fine like we do. However, the compressed representation is handcrafted and results in long sequences. In a similar vein, Mattar et al. [30] handcrafted a tokenizer based on DWT coefficients, introducing tokens to represent large chunks of zeros. It also results in long sequences and is only applied to generations of small grayscale images. In contrast with these methods, instead of handcrafting a compressed input representation, our tokenizer learns to encode to, and decode from, a short compressed coarse-to-fine sequence." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 503, + 134, + 517 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 503, + 134, + 517 + ], + "spans": [ + { + "bbox": [ + 55, + 503, + 134, + 517 + ], + "type": "text", + "content": "3. Background" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 523, + 296, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 523, + 296, + 559 + ], + "spans": [ + { + "bbox": [ + 55, + 523, + 296, + 559 + ], + "type": "text", + "content": "A discrete wavelet transform (DWT) is based on successive convolutions of the signal " + }, + { + "bbox": [ + 55, + 523, + 296, + 559 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 55, + 523, + 296, + 559 + ], + "type": "text", + "content": " with a pair of lowpass " + }, + { + "bbox": [ + 55, + 523, + 296, + 559 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 55, + 523, + 296, + 559 + ], + "type": "text", + "content": " and highpass " + }, + { + "bbox": [ + 55, + 523, + 296, + 559 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 55, + 523, + 296, + 559 + ], + "type": "text", + "content": " filters with the first step as follows" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 99, + 568, + 294, + 594 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 568, + 294, + 594 + ], + "spans": [ + { + "bbox": [ + 99, + 568, + 294, + 594 + ], + "type": "interline_equation", + "content": "f _ {\\operatorname {l o w} _ {1}} [ n ] = f \\star g = \\sum_ {k} f [ k ] g [ n - k ], \\tag {1}", + "image_path": "8740d0c8dba5d3a0c82b9e8125881e4cc0416bf0036228da73d5aab684082fe4.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 97, + 596, + 294, + 620 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 596, + 294, + 620 + ], + "spans": [ + { + "bbox": [ + 97, + 596, + 294, + 620 + ], + "type": "interline_equation", + "content": "f _ {\\operatorname {h i g h} _ {1}} [ n ] = f \\star h = \\sum_ {k} f [ k ] h [ n - k ]. \\tag {2}", + "image_path": "91a0b6ce73bcb3e3816add88d171897a71134f924a6ab485520e297b3061e72d.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "type": "text", + "content": "The high and low outputs are subsampled by a factor of two, and the operation is repeated for the low channel, such that at level " + }, + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "type": "text", + "content": " we compute " + }, + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{low}_L} = f_{\\mathrm{low}_{L - 1}} \\star g" + }, + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{high}_L} = f_{\\mathrm{low}_{L - 1}} \\star h" + }, + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "type": "text", + "content": ", subsample again and drop " + }, + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{low}_{L - 1}}" + }, + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "type": "text", + "content": ". The output at level " + }, + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "type": "text", + "content": " comprises the approximation coefficients " + }, + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{low}_L}" + }, + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "type": "text", + "content": " and the detail coefficients " + }, + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "type": "inline_equation", + "content": "\\{f_{\\mathrm{high}_k}\\}" + }, + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "type": "inline_equation", + "content": "1 \\leq k \\leq L" + }, + { + "bbox": [ + 55, + 629, + 296, + 713 + ], + "type": "text", + "content": ". This output has the same cardinality as the input and the" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 317, + 72, + 434, + 189 + ], + "blocks": [ + { + "bbox": [ + 317, + 72, + 434, + 189 + ], + "lines": [ + { + "bbox": [ + 317, + 72, + 434, + 189 + ], + "spans": [ + { + "bbox": [ + 317, + 72, + 434, + 189 + ], + "type": "image", + "image_path": "863d7df0e87b9ef660d64f4c69a21c033d45e2e444801ba5802f9b521fe6710d.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 198, + 555, + 320 + ], + "lines": [ + { + "bbox": [ + 313, + 198, + 555, + 320 + ], + "spans": [ + { + "bbox": [ + 313, + 198, + 555, + 320 + ], + "type": "text", + "content": "Figure 2. Input patchification. Left: typical patchification for Vision Transformers (ViT) [13], where the image is split in equal-sized patches. Right: we propose to patchify the coefficients of a discrete wavelet transform (DWT) instead. Each scale is shown in a different color. Scales other than the lowest contain three blocks representing horizontal, vertical and diagonal details; we concatenate the spatially correspondent patches of each block such that each scale is represented by the same sequence length. This results in larger patch sizes for higher scales, which are more compressible. The figure shows 3 scales and 16 tokens per scale; in our experiments we use 4 or 5 scales and 256 tokens per scale." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 435, + 72, + 552, + 188 + ], + "blocks": [ + { + "bbox": [ + 435, + 72, + 552, + 188 + ], + "lines": [ + { + "bbox": [ + 435, + 72, + 552, + 188 + ], + "spans": [ + { + "bbox": [ + 435, + 72, + 552, + 188 + ], + "type": "image", + "image_path": "3d53cd99caa83e3e495af4cbd9515dbd49fa7a9103b690f6877a7ea3f9b4392f.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 330, + 554, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 330, + 554, + 377 + ], + "spans": [ + { + "bbox": [ + 313, + 330, + 554, + 377 + ], + "type": "text", + "content": "transformation is invertible, where the forward transform is typically called \"analysis\" and the backward \"synthesis\". The simplest wavelet family is the Haar, where " + }, + { + "bbox": [ + 313, + 330, + 554, + 377 + ], + "type": "inline_equation", + "content": "g = [1,1]^{\\top}" + }, + { + "bbox": [ + 313, + 330, + 554, + 377 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 330, + 554, + 377 + ], + "type": "inline_equation", + "content": "h = [1, -1]^{\\top}" + }, + { + "bbox": [ + 313, + 330, + 554, + 377 + ], + "type": "text", + "content": " (optionally scaled to unit norm)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 378, + 554, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 378, + 554, + 485 + ], + "spans": [ + { + "bbox": [ + 313, + 378, + 554, + 485 + ], + "type": "text", + "content": "For image analysis we use a 2D DWT, which is obtained by simply convolving the rows and columns with " + }, + { + "bbox": [ + 313, + 378, + 554, + 485 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 313, + 378, + 554, + 485 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 378, + 554, + 485 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 313, + 378, + 554, + 485 + ], + "type": "text", + "content": ". The approximation coefficients " + }, + { + "bbox": [ + 313, + 378, + 554, + 485 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{low}_1} = f \\star (gg^\\top)" + }, + { + "bbox": [ + 313, + 378, + 554, + 485 + ], + "type": "text", + "content": ", and the details are divided into horizontal " + }, + { + "bbox": [ + 313, + 378, + 554, + 485 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{H}_1} = f \\star (gh^\\top)" + }, + { + "bbox": [ + 313, + 378, + 554, + 485 + ], + "type": "text", + "content": ", vertical " + }, + { + "bbox": [ + 313, + 378, + 554, + 485 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{V}_1} = f \\star (hg^\\top)" + }, + { + "bbox": [ + 313, + 378, + 554, + 485 + ], + "type": "text", + "content": " and diagonal " + }, + { + "bbox": [ + 313, + 378, + 554, + 485 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{D}_1} = f \\star (hh^\\top)" + }, + { + "bbox": [ + 313, + 378, + 554, + 485 + ], + "type": "text", + "content": " details. Subsequent levels apply the same operations to the approximation coefficients. Fig. 2 shows a two-level transform, where we can see that the approximation coefficients correspond to a coarse version of the image." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 486, + 554, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 486, + 554, + 510 + ], + "spans": [ + { + "bbox": [ + 313, + 486, + 554, + 510 + ], + "type": "text", + "content": "We refer to the textbooks by Mallat [29] and Daubechies [9] for more information about wavelets." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 522, + 370, + 534 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 522, + 370, + 534 + ], + "spans": [ + { + "bbox": [ + 314, + 522, + 370, + 534 + ], + "type": "text", + "content": "4. Method" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 543, + 554, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 543, + 554, + 615 + ], + "spans": [ + { + "bbox": [ + 313, + 543, + 554, + 615 + ], + "type": "text", + "content": "Our main contribution, SIT, is an image tokenizer that takes discrete wavelet transform (DWT) coefficients. The model follows ViT-VQGAN [57], with important changes that we describe in this section and visualize in Fig. 1. For image generation, we introduce AR-SIT which is based on Parti [58] with minor changes, using SIT as the tokenizer." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 624, + 383, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 624, + 383, + 635 + ], + "spans": [ + { + "bbox": [ + 313, + 624, + 383, + 635 + ], + "type": "text", + "content": "4.1. Tokenizer" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 642, + 554, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 642, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 642, + 554, + 713 + ], + "type": "text", + "content": "Patchification and featurization The first step is to map the input to a sequence of patches. We apply the Haar DWT on the input image and patchify each scale separately. While Haar is the simplest wavelet and lacks properties found in other wavelets useful for compression, we found no benefits of using other wavelet families such as CDF [8]." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17183" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 191 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 191 + ], + "type": "text", + "content": "Our design choice is to use the same number of patches for each scale. In a DWT, the higher scales correspond to high frequency details which are represented by more coefficients than the lower scales, but contribute less to the spatial pixel values. In other words, in natural images, most of the power spectrum is concentrated on lower frequencies. By representing each scale with the same number of tokens, we are compressing more the higher frequencies (since they are represented by more coefficients), similarly to what is done in image compression methods such as JPEG2000 [7]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 192, + 295, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 192, + 295, + 347 + ], + "spans": [ + { + "bbox": [ + 55, + 192, + 295, + 347 + ], + "type": "text", + "content": "The approximation (or lowpass) DWT coefficients correspond to a coarse version of the input image, which we consider the first scale. The following scales are divided in three blocks corresponding to horizontal, vertical, and diagonal details, where each coefficient relates to a specific spatial location. Thus, we can concatenate the three blocks such that each entry corresponds to the same spatial location. For example, the first scale will typically be split in patches that are " + }, + { + "bbox": [ + 55, + 192, + 295, + 347 + ], + "type": "inline_equation", + "content": "32 \\times 32 \\times 3" + }, + { + "bbox": [ + 55, + 192, + 295, + 347 + ], + "type": "text", + "content": ", with channels corresponding to RGB, the second scale will be " + }, + { + "bbox": [ + 55, + 192, + 295, + 347 + ], + "type": "inline_equation", + "content": "32 \\times 32 \\times 9" + }, + { + "bbox": [ + 55, + 192, + 295, + 347 + ], + "type": "text", + "content": ", where the channels correspond to the RGB of horizontal, vertical, and diagonal details, the third will be " + }, + { + "bbox": [ + 55, + 192, + 295, + 347 + ], + "type": "inline_equation", + "content": "64 \\times 64 \\times 9" + }, + { + "bbox": [ + 55, + 192, + 295, + 347 + ], + "type": "text", + "content": " and so on. Fig. 2 shows an example of our patchification scheme." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 348, + 295, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 348, + 295, + 408 + ], + "spans": [ + { + "bbox": [ + 55, + 348, + 295, + 408 + ], + "type": "text", + "content": "Since patches have different resolutions (higher scales will have larger patches), the usual ViT linear embedding to map patches to features cannot be shared across all patches so we have different parameters per scale. Formally, given an image " + }, + { + "bbox": [ + 55, + 348, + 295, + 408 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 55, + 348, + 295, + 408 + ], + "type": "text", + "content": ", our patchifiers compute" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 66, + 418, + 295, + 432 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 418, + 295, + 432 + ], + "spans": [ + { + "bbox": [ + 66, + 418, + 295, + 432 + ], + "type": "interline_equation", + "content": "f _ {\\text {l o w} L}, \\left\\{f _ {\\mathrm {H} _ {i}} \\right\\} _ {i \\leq L}, \\left\\{f _ {\\mathrm {V} _ {i}} \\right\\} _ {i \\leq L}, \\left\\{f _ {\\mathrm {D} _ {i}} \\right\\} _ {i \\leq L} = \\mathrm {D W T} (f), \\tag {3}", + "image_path": "6740c7e5657068b85c7352e7a6f4748ef4596cd6a50a8b486ff7eebd79bcfee0.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 66, + 434, + 294, + 446 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 434, + 294, + 446 + ], + "spans": [ + { + "bbox": [ + 66, + 434, + 294, + 446 + ], + "type": "interline_equation", + "content": "c _ {1} = P _ {0} \\left(f _ {\\text {l o w} L}\\right), \\tag {4}", + "image_path": "a5c47fc5e73378b7d5d3c7a3d5f09a99d052e7180b0d0fcf86c6e656203daa58.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 66, + 449, + 294, + 461 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 449, + 294, + 461 + ], + "spans": [ + { + "bbox": [ + 66, + 449, + 294, + 461 + ], + "type": "interline_equation", + "content": "c _ {s} = P _ {s} \\left(f _ {\\mathrm {H} _ {L - s + 2}}, f _ {\\mathrm {V} _ {L - s + 2}}, f _ {\\mathrm {D} _ {L - s + 2}}\\right), 1 < s \\leq L, \\tag {5}", + "image_path": "dddbf0fd25b8e36f26eef543de56a9c2a9d50be3ad5512345854f1aafaa456e2.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 471, + 295, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 471, + 295, + 555 + ], + "spans": [ + { + "bbox": [ + 55, + 471, + 295, + 555 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 471, + 295, + 555 + ], + "type": "inline_equation", + "content": "c_{s} = \\{c_{s}^{n}\\}_{1\\leq n\\leq N}" + }, + { + "bbox": [ + 55, + 471, + 295, + 555 + ], + "type": "text", + "content": " is the sequence of token embeddings at scale " + }, + { + "bbox": [ + 55, + 471, + 295, + 555 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 55, + 471, + 295, + 555 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 55, + 471, + 295, + 555 + ], + "type": "inline_equation", + "content": "c_{s}^{n}\\in \\mathbb{R}^{C}" + }, + { + "bbox": [ + 55, + 471, + 295, + 555 + ], + "type": "text", + "content": " is the embedding of the " + }, + { + "bbox": [ + 55, + 471, + 295, + 555 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 55, + 471, + 295, + 555 + ], + "type": "text", + "content": "-th token at the " + }, + { + "bbox": [ + 55, + 471, + 295, + 555 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 55, + 471, + 295, + 555 + ], + "type": "text", + "content": "-th scale, while " + }, + { + "bbox": [ + 55, + 471, + 295, + 555 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 55, + 471, + 295, + 555 + ], + "type": "text", + "content": " is the number of DWT levels, " + }, + { + "bbox": [ + 55, + 471, + 295, + 555 + ], + "type": "inline_equation", + "content": "S = L + 1" + }, + { + "bbox": [ + 55, + 471, + 295, + 555 + ], + "type": "text", + "content": " is the number of scales and " + }, + { + "bbox": [ + 55, + 471, + 295, + 555 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 55, + 471, + 295, + 555 + ], + "type": "text", + "content": " the number of tokens per scale. For brevity, when there is no ambiguity, we may omit the set indexing and use " + }, + { + "bbox": [ + 55, + 471, + 295, + 555 + ], + "type": "inline_equation", + "content": "\\{c_s\\}" + }, + { + "bbox": [ + 55, + 471, + 295, + 555 + ], + "type": "text", + "content": " to denote the set of tokens of all scales, for example." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 555, + 295, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 555, + 295, + 615 + ], + "spans": [ + { + "bbox": [ + 55, + 555, + 295, + 615 + ], + "type": "text", + "content": "The final projection after the decoder needs to map the features back to different sized patches, so it also has different parameters per scale. Those patches still represent DWT coefficients so an inverse DWT (IDWT) is finally applied to obtain an image output." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 617, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 617, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 617, + 295, + 713 + ], + "type": "text", + "content": "Flexible sequence length The tokenizer encoder and decoder transformers operate on the sequence of patch features of length " + }, + { + "bbox": [ + 55, + 617, + 295, + 713 + ], + "type": "inline_equation", + "content": "SN" + }, + { + "bbox": [ + 55, + 617, + 295, + 713 + ], + "type": "text", + "content": ". The sequence length is a major factor in resource utilization so we want to keep it constrained. Our method is flexible since we can choose the number of scales and the patch size per scale, while most ViT-based models such as ViT-VQGAN [57] are more restricted. They use the same patch size for the whole image; thus, keeping the" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 72, + 553, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 120 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 120 + ], + "type": "text", + "content": "same patch size and doubling each image dimension would increase the sequence length by a factor of 4, where our method is capable of including additional scales which only increases the sequence length by multiples of " + }, + { + "bbox": [ + 313, + 72, + 553, + 120 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 313, + 72, + 553, + 120 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 121, + 553, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 121, + 553, + 228 + ], + "spans": [ + { + "bbox": [ + 313, + 121, + 553, + 228 + ], + "type": "text", + "content": "For example, for a " + }, + { + "bbox": [ + 313, + 121, + 553, + 228 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 313, + 121, + 553, + 228 + ], + "type": "text", + "content": " input, ViT-VQGAN uses " + }, + { + "bbox": [ + 313, + 121, + 553, + 228 + ], + "type": "inline_equation", + "content": "8 \\times 8" + }, + { + "bbox": [ + 313, + 121, + 553, + 228 + ], + "type": "text", + "content": " patches to obtain a sequence of 1024 elements. Our SIT can use 4 scales and 256 tokens per scale for the same sequence length. When increasing the resolution to " + }, + { + "bbox": [ + 313, + 121, + 553, + 228 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 313, + 121, + 553, + 228 + ], + "type": "text", + "content": ", the baseline can either increase the patch size to " + }, + { + "bbox": [ + 313, + 121, + 553, + 228 + ], + "type": "inline_equation", + "content": "16 \\times 16" + }, + { + "bbox": [ + 313, + 121, + 553, + 228 + ], + "type": "text", + "content": ", resulting in the same sequence length, or keep it " + }, + { + "bbox": [ + 313, + 121, + 553, + 228 + ], + "type": "inline_equation", + "content": "8 \\times 8" + }, + { + "bbox": [ + 313, + 121, + 553, + 228 + ], + "type": "text", + "content": ", resulting in a " + }, + { + "bbox": [ + 313, + 121, + 553, + 228 + ], + "type": "inline_equation", + "content": "4 \\times" + }, + { + "bbox": [ + 313, + 121, + 553, + 228 + ], + "type": "text", + "content": " longer sequence. SIT, for example, can vary the number of scales from 4 to 6, resulting in sequence lengths of 1024, 1280, 1536 which are more manageable." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 233, + 553, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 233, + 553, + 293 + ], + "spans": [ + { + "bbox": [ + 313, + 233, + 553, + 293 + ], + "type": "text", + "content": "Transformers After featurization, the single sequence containing all scales passes through a transformer encoder, followed by quantization and a transformer decoder. We propose two optional modifications to the transformers, which are otherwise identical to the ones used in ViT [13]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 294, + 553, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 294, + 553, + 510 + ], + "spans": [ + { + "bbox": [ + 313, + 294, + 553, + 510 + ], + "type": "text", + "content": "First, we propose a scale-causal attention mask, where an element at some scale attends to all elements of its own scale and lower scales, represented by a block-triangular mask pattern. With dense attention, we write the application of the encoding transformer " + }, + { + "bbox": [ + 313, + 294, + 553, + 510 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{enc}}" + }, + { + "bbox": [ + 313, + 294, + 553, + 510 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 313, + 294, + 553, + 510 + ], + "type": "inline_equation", + "content": "\\{z_s\\} = T_{\\mathrm{enc}}(\\{c_s\\})" + }, + { + "bbox": [ + 313, + 294, + 553, + 510 + ], + "type": "text", + "content": ", and for scale-causal we write " + }, + { + "bbox": [ + 313, + 294, + 553, + 510 + ], + "type": "inline_equation", + "content": "z_s = T_{\\mathrm{enc}}(\\{c_k\\}_{1 \\leq k \\leq s})" + }, + { + "bbox": [ + 313, + 294, + 553, + 510 + ], + "type": "text", + "content": " for each " + }, + { + "bbox": [ + 313, + 294, + 553, + 510 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 313, + 294, + 553, + 510 + ], + "type": "text", + "content": ". The scale-causal attention can be applied independently to the encoder and decoder, enabling different applications. For the multiscale reconstruction experiments in Sec. 5.1, we need to both encode and decode multiple resolutions, so both encoder and decoder use scale-causal masks. For coarse-to-fine image generation in Sec. 5.2, only the decoder needs to be scale-causal in order to decode the partially generated sequence. For the text-guided image upsampling in Sec. 5.3, only the encoder needs to be scale-causal to encode the lower resolution inputs. For the image editing experiments in Sec. 5.4, the scale-causal encoder prevents information leaking from high to low scale." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 510, + 554, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 510, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 510, + 554, + 713 + ], + "type": "text", + "content": "Second, we propose to use different transformer parameters for the approximation (first scale) and details coefficients (other scales). In our model, the subsequences corresponding to coefficients of each type of coefficient come from quite distinct distributions, so it makes sense to treat them differently. This contrasts with the spatial representation of images where each patch can be considered as coming from the same distribution. Thus, the parameters of the key, query, and value embeddings, the layer norms, and the MLP on each transformer layer are not shared between the approximation and details coefficients. This Approximation-Details Transformer (ADTransformer) still takes a single sequence composed of all coefficients. We experimented with different transformers per sequence with cross-attention for information sharing, but it performed worse. This change leads to more memory utilization to store the extra parameters, but the training/inference speed" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17184" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 57, + 71, + 294, + 320 + ], + "blocks": [ + { + "bbox": [ + 57, + 71, + 294, + 320 + ], + "lines": [ + { + "bbox": [ + 57, + 71, + 294, + 320 + ], + "spans": [ + { + "bbox": [ + 57, + 71, + 294, + 320 + ], + "type": "table", + "html": "
LPIPS ↓PSNR ↑L1 ↓FID ↓IS ↑images/s ↑
Resolution: 512 × 512
ViT-VQGAN0.32022.40.0426.92151.5593
SIT-5 (Ours)0.26022.00.0462.65192.0410
SIT-6 (Ours)0.23923.10.0401.74203.7320
Resolution: 256 × 256
ViT-VQGAN (reported)-24.80.0321.99184.4-
ViT-VQGAN (reproduced)0.16725.00.0312.33184.0-
ViT-VQGAN (no LL)0.16323.80.0381.20194.6626
SIT-4 (Ours)0.14424.00.0371.20199.5596
SIT-5 (Ours)0.13524.50.0350.97202.3411
SIT-SC-5 (Ours)0.16124.10.0371.33193.7411
Resolution: 128 × 128
ViT-VQGAN0.18526.30.0303.77117.3626
SIT-SC-5 (ours)0.15927.10.0272.13129.3582
Resolution: 64 × 64
ViT-VQGAN0.12928.80.0233.5321.0627
SIT-SC-5 (ours)0.11131.30.0171.3930.1847
Resolution: 32 × 32
ViT-VQGAN0.21423.30.045-3.7627
SIT-SC-5 (ours)0.02936.80.0100.313.5825
Resolution: 16 × 16
ViT-VQGAN0.12724.90.039-1.7627
SIT-SC-5 (ours)0.01341.30.0060.091.82620
", + "image_path": "94af4ecd39a3b10381c810b05a753d77349002dbaf0731b256169d374b3e1895.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 470, + 290, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 470, + 290, + 483 + ], + "spans": [ + { + "bbox": [ + 55, + 470, + 290, + 483 + ], + "type": "text", + "content": "is similar because the number of operations is unchanged." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 485, + 296, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 485, + 296, + 628 + ], + "spans": [ + { + "bbox": [ + 55, + 485, + 296, + 628 + ], + "type": "text", + "content": "Quantizer The encoder outputs a sequence of features that are quantized to a fixed-sized codebook, similarly to ViT-VQGAN [57], VQVAE [51], and VQGAN [15]. We modify the quantizer such that approximation and details have different codebooks for the same reasons discussed previously. Thus, codebook sizes and feature dimensions are the same as the ViT-VQGAN baseline, but we have different features for the same code at different positions. Formally, we apply " + }, + { + "bbox": [ + 55, + 485, + 296, + 628 + ], + "type": "inline_equation", + "content": "q_{s}^{n} = Q_{\\mathrm{approx}}(z_{s}^{n})" + }, + { + "bbox": [ + 55, + 485, + 296, + 628 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 55, + 485, + 296, + 628 + ], + "type": "inline_equation", + "content": "s = 1" + }, + { + "bbox": [ + 55, + 485, + 296, + 628 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 485, + 296, + 628 + ], + "type": "inline_equation", + "content": "q_{s}^{n} = Q_{\\mathrm{details}}(z_{s}^{n})" + }, + { + "bbox": [ + 55, + 485, + 296, + 628 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 55, + 485, + 296, + 628 + ], + "type": "inline_equation", + "content": "s \\geq 2" + }, + { + "bbox": [ + 55, + 485, + 296, + 628 + ], + "type": "text", + "content": " for each pair " + }, + { + "bbox": [ + 55, + 485, + 296, + 628 + ], + "type": "inline_equation", + "content": "(s,n)" + }, + { + "bbox": [ + 55, + 485, + 296, + 628 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 55, + 485, + 296, + 628 + ], + "type": "inline_equation", + "content": "q_{s}^{n}" + }, + { + "bbox": [ + 55, + 485, + 296, + 628 + ], + "type": "text", + "content": " is chosen from the codebook for " + }, + { + "bbox": [ + 55, + 485, + 296, + 628 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 55, + 485, + 296, + 628 + ], + "type": "text", + "content": " and can be associated with its discrete position in the codebook, denoted " + }, + { + "bbox": [ + 55, + 485, + 296, + 628 + ], + "type": "inline_equation", + "content": "\\lfloor q_s^n \\rfloor" + }, + { + "bbox": [ + 55, + 485, + 296, + 628 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 629, + 296, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 629, + 296, + 677 + ], + "spans": [ + { + "bbox": [ + 55, + 629, + 296, + 677 + ], + "type": "text", + "content": "Training We follow the ViT-VQGAN training protocol and use the same weighting for the L2, perceptual, adversarial, and quantization losses. We remove the logit-laplace loss that was shown detrimental in follow-up work [58]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "text", + "content": "All losses are applied to the spatial domain images reconstructed by the inverse discrete wavelet transform on the decoder output: " + }, + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "inline_equation", + "content": "\\hat{f} = \\mathrm{IDWT}(T_{\\mathrm{dec}}(\\{q_s\\}))" + }, + { + "bbox": [ + 55, + 677, + 296, + 714 + ], + "type": "text", + "content": ". We noticed insta" + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 316, + 72, + 553, + 259 + ], + "blocks": [ + { + "bbox": [ + 55, + 327, + 296, + 460 + ], + "lines": [ + { + "bbox": [ + 55, + 327, + 296, + 460 + ], + "spans": [ + { + "bbox": [ + 55, + 327, + 296, + 460 + ], + "type": "text", + "content": "Table 1. Multiscale reconstruction on ImageNet. \"SC\" denotes scale-causal attention, which slightly reduces performance at the highest resolution but enables multiscale reconstruction without downsampling/upsampling or retraining. The ViT-VQGAN values from Yu et al. [57] used a logit-laplace loss (LL) which was later considered harmful [58], so we retrain without it. Our SIT improves reconstruction metrics, is significantly faster at lower resolutions, and robust when increasing resolution. The ViT-VQGAN baseline suffered from instability during training on " + }, + { + "bbox": [ + 55, + 327, + 296, + 460 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 55, + 327, + 296, + 460 + ], + "type": "text", + "content": " inputs so we selected the best values before divergence. We report test time throughput of an encoding/decoding cycle for the max batch size that fits on a TPU v5e." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 316, + 72, + 553, + 259 + ], + "lines": [ + { + "bbox": [ + 316, + 72, + 553, + 259 + ], + "spans": [ + { + "bbox": [ + 316, + 72, + 553, + 259 + ], + "type": "table", + "html": "
FID ↓IS ↑images/s ↑images/Gb ↑
Resolution: 256 × 256
Parti350M (reported)14.1---
Parti350M12.436.57.812.0
AR-SIT-SCD-4 (Ours)12.637.36.58.0
Resolution: 128 × 128
Parti350M11.233.57.612.0
AR-SIT-SCD-4 (Ours)11.433.212.612.0
Resolution: 64 × 64
Parti350M10.516.97.612.0
AR-SIT-SCD-4 (Ours)11.418.624.516.0
Resolution: 32 × 32
Parti350M5.82.97.77.7
AR-SIT-SCD-4 (Ours)7.63.274.728.0
", + "image_path": "4a11b64ad2dc3a729b5f5fed6063400c38fa36b2e38f86efa3d6d25bf0754280.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 267, + 555, + 368 + ], + "lines": [ + { + "bbox": [ + 313, + 267, + 555, + 368 + ], + "spans": [ + { + "bbox": [ + 313, + 267, + 555, + 368 + ], + "type": "text", + "content": "Table 2. Coarse-to-fine image generation on MS-COCO [27]. With SIT, the autoregressive generation is stopped early for a coarse version of the image. The Parti350M baseline does not have this property, so we generate at full resolution and downsample for comparison. AR-SIT matches the baseline performance at the training resolution but is several times faster and more memory efficient at lower resolutions, even when trained only on higher resolution data. We report throughput and memory utilization during generation given the max batch size that fits on a TPU v6e." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 376, + 555, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 376, + 555, + 425 + ], + "spans": [ + { + "bbox": [ + 313, + 376, + 555, + 425 + ], + "type": "text", + "content": "bility during training due to the adversarial loss, which was fixed by applying spectral normalization following Miyato et al. [31], which simply divides the discriminator weight matrices by their largest singular value." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 430, + 489, + 443 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 430, + 489, + 443 + ], + "spans": [ + { + "bbox": [ + 313, + 430, + 489, + 443 + ], + "type": "text", + "content": "4.2. Autoregressive image generation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 448, + 554, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 448, + 554, + 507 + ], + "spans": [ + { + "bbox": [ + 313, + 448, + 554, + 507 + ], + "type": "text", + "content": "We use our tokenizer for autoregressive image generation, by training a second stage transformer model similar to Parti [58], with some modifications. Formally, the autoregressive transformer " + }, + { + "bbox": [ + 313, + 448, + 554, + 507 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 313, + 448, + 554, + 507 + ], + "type": "text", + "content": " models categorical distributions over the discrete codes" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 337, + 514, + 553, + 529 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 514, + 553, + 529 + ], + "spans": [ + { + "bbox": [ + 337, + 514, + 553, + 529 + ], + "type": "interline_equation", + "content": "P \\left(\\left\\lfloor q _ {s} ^ {n} \\right\\rfloor \\mid \\left\\{\\left\\lfloor q _ {i} \\right\\rfloor \\right\\} _ {1 \\leq i < s} \\cup \\left\\{\\left\\lfloor q _ {s} ^ {i} \\right\\rfloor \\right\\} ^ {1 \\leq i < n}\\right) = T (c) \\tag {6}", + "image_path": "93b268194fac8120de06bff8b968dc9275e4e83d3875b90392881ee89227a293.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 533, + 554, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 533, + 554, + 606 + ], + "spans": [ + { + "bbox": [ + 313, + 533, + 554, + 606 + ], + "type": "text", + "content": "which can be sampled one code at a time for generation. " + }, + { + "bbox": [ + 313, + 533, + 554, + 606 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 313, + 533, + 554, + 606 + ], + "type": "text", + "content": " can be conditioned on a textual description processed by a transformer encoder for text-to-image generation. For training, we model the distribution of input codes as " + }, + { + "bbox": [ + 313, + 533, + 554, + 606 + ], + "type": "inline_equation", + "content": "P(\\{\\lfloor q_s\\rfloor \\}) = \\prod_{1\\leq s\\leq S}^{1\\leq n\\leq N}P(\\lfloor q_s^n\\rfloor)" + }, + { + "bbox": [ + 313, + 533, + 554, + 606 + ], + "type": "text", + "content": " and minimize the negative log-likelihood " + }, + { + "bbox": [ + 313, + 533, + 554, + 606 + ], + "type": "inline_equation", + "content": "-\\log P(\\{\\lfloor q_s\\rfloor \\})" + }, + { + "bbox": [ + 313, + 533, + 554, + 606 + ], + "type": "text", + "content": " over the training set." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 606, + 554, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 606, + 554, + 665 + ], + "spans": [ + { + "bbox": [ + 313, + 606, + 554, + 665 + ], + "type": "text", + "content": "With different codebooks for approximation and details, the same token id might have a different meaning depending on its position. Thus, the AR model has different token embeddings for approximation and details tokens. The last layer for logit prediction is also different." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 665, + 554, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 665, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 665, + 554, + 713 + ], + "type": "text", + "content": "For the generative applications in Secs. 5.2 and 5.3, we introduce mild changes in order to interrupt the generation after all tokens up to a certain scale are generated, and to start the generation with given tokens up to a certain scale." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17185" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 57, + 70, + 299, + 130 + ], + "blocks": [ + { + "bbox": [ + 57, + 70, + 299, + 130 + ], + "lines": [ + { + "bbox": [ + 57, + 70, + 299, + 130 + ], + "spans": [ + { + "bbox": [ + 57, + 70, + 299, + 130 + ], + "type": "image", + "image_path": "dc43ffe6e96642fcda4ec765bcd42878e8704676603cf14fde48296916c4cb76.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 131, + 252, + 138 + ], + "lines": [ + { + "bbox": [ + 104, + 131, + 252, + 138 + ], + "spans": [ + { + "bbox": [ + 104, + 131, + 252, + 138 + ], + "type": "text", + "content": "\"a zebra standing on dirty area with trees in the background.\"" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 58, + 140, + 299, + 201 + ], + "blocks": [ + { + "bbox": [ + 58, + 140, + 299, + 201 + ], + "lines": [ + { + "bbox": [ + 58, + 140, + 299, + 201 + ], + "spans": [ + { + "bbox": [ + 58, + 140, + 299, + 201 + ], + "type": "image", + "image_path": "e248b9896775e4fae5bad5b558a325a77445733fe2d81b49fc5fd2019b6f9431.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 100, + 201, + 256, + 207 + ], + "lines": [ + { + "bbox": [ + 100, + 201, + 256, + 207 + ], + "spans": [ + { + "bbox": [ + 100, + 201, + 256, + 207 + ], + "type": "text", + "content": "\"an elaborate metal vase holds a decorative bouquet of flowers.\"" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 58, + 209, + 299, + 270 + ], + "blocks": [ + { + "bbox": [ + 58, + 209, + 299, + 270 + ], + "lines": [ + { + "bbox": [ + 58, + 209, + 299, + 270 + ], + "spans": [ + { + "bbox": [ + 58, + 209, + 299, + 270 + ], + "type": "image", + "image_path": "de3eb6007c31642354f152e1bd512c4eea36619650b3f8dfad3b853f0ee1e278.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 270, + 252, + 277 + ], + "lines": [ + { + "bbox": [ + 104, + 270, + 252, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 270, + 252, + 277 + ], + "type": "text", + "content": "\"the wing of a plane located in the sky above a sea and city.\"" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 54, + 286, + 555, + 319 + ], + "lines": [ + { + "bbox": [ + 54, + 286, + 555, + 319 + ], + "spans": [ + { + "bbox": [ + 54, + 286, + 555, + 319 + ], + "type": "text", + "content": "Figure 3. Coarse-to-fine text-to-image on MS-COCO [27] prompts. Each quadruple shows generations from AR-SIT-SCD for the given prompt, with only the first " + }, + { + "bbox": [ + 54, + 286, + 555, + 319 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 54, + 286, + 555, + 319 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 54, + 286, + 555, + 319 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 54, + 286, + 555, + 319 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 54, + 286, + 555, + 319 + ], + "type": "inline_equation", + "content": "75\\%" + }, + { + "bbox": [ + 54, + 286, + 555, + 319 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 54, + 286, + 555, + 319 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 54, + 286, + 555, + 319 + ], + "type": "text", + "content": " of tokens generated, corresponding to resolutions of " + }, + { + "bbox": [ + 54, + 286, + 555, + 319 + ], + "type": "inline_equation", + "content": "32 \\times 32" + }, + { + "bbox": [ + 54, + 286, + 555, + 319 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 54, + 286, + 555, + 319 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 54, + 286, + 555, + 319 + ], + "type": "text", + "content": ". Training uses only " + }, + { + "bbox": [ + 54, + 286, + 555, + 319 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 54, + 286, + 555, + 319 + ], + "type": "text", + "content": " images. Our model enables quick generation of coarse image candidates that can be further improved if needed." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 310, + 70, + 553, + 131 + ], + "blocks": [ + { + "bbox": [ + 310, + 70, + 553, + 131 + ], + "lines": [ + { + "bbox": [ + 310, + 70, + 553, + 131 + ], + "spans": [ + { + "bbox": [ + 310, + 70, + 553, + 131 + ], + "type": "image", + "image_path": "25862973e1bf70f60bd7575e0f87c543fe55309df8ecab772e174e9998dc63f2.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 372, + 131, + 492, + 138 + ], + "lines": [ + { + "bbox": [ + 372, + 131, + 492, + 138 + ], + "spans": [ + { + "bbox": [ + 372, + 131, + 492, + 138 + ], + "type": "text", + "content": "\"a modern bathroom is shown with a square sink.\"" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 311, + 140, + 553, + 201 + ], + "blocks": [ + { + "bbox": [ + 311, + 140, + 553, + 201 + ], + "lines": [ + { + "bbox": [ + 311, + 140, + 553, + 201 + ], + "spans": [ + { + "bbox": [ + 311, + 140, + 553, + 201 + ], + "type": "image", + "image_path": "f33eabf0788eab36bb4215f991b3e7ffd68e526e0ff44c2644f486c2dba10819.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 395, + 201, + 470, + 208 + ], + "lines": [ + { + "bbox": [ + 395, + 201, + 470, + 208 + ], + "spans": [ + { + "bbox": [ + 395, + 201, + 470, + 208 + ], + "type": "text", + "content": "\"a toy boat sits on the ground.\"" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 311, + 209, + 553, + 270 + ], + "blocks": [ + { + "bbox": [ + 311, + 209, + 553, + 270 + ], + "lines": [ + { + "bbox": [ + 311, + 209, + 553, + 270 + ], + "spans": [ + { + "bbox": [ + 311, + 209, + 553, + 270 + ], + "type": "image", + "image_path": "710941c16491f7e36dfe7d85aeba52cd9c1e9fd64813e64988728e31bbccaf38.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 347, + 270, + 517, + 277 + ], + "lines": [ + { + "bbox": [ + 347, + 270, + 517, + 277 + ], + "spans": [ + { + "bbox": [ + 347, + 270, + 517, + 277 + ], + "type": "text", + "content": "\"This brown and white cat is standing on a desk in front of a computer.\"" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 325, + 136, + 339 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 325, + 136, + 339 + ], + "spans": [ + { + "bbox": [ + 55, + 325, + 136, + 339 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 55, + 346, + 296, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 346, + 296, + 441 + ], + "spans": [ + { + "bbox": [ + 55, + 346, + 296, + 441 + ], + "type": "text", + "content": "We focus our experiments on demonstrating the properties and applications enumerated in P.1-P.6 (Sec. 1). Our ideas build on ViT-based tokenizers, so the most fair comparisons are against ViT tokenizers and AR generative models; namely ViT-VQGAN [57] and Parti [58]. In these comparisons we are able to match the architectures and training protocol exactly. Nevertheless we also compare with a broader set of methods in Sec. 5.5." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 55, + 453, + 198, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 453, + 198, + 464 + ], + "spans": [ + { + "bbox": [ + 55, + 453, + 198, + 464 + ], + "type": "text", + "content": "5.1. Multiscale reconstruction" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 55, + 471, + 295, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 471, + 295, + 507 + ], + "spans": [ + { + "bbox": [ + 55, + 471, + 295, + 507 + ], + "type": "text", + "content": "We train our tokenizer on ImageNet [10] and evaluate its reconstruction performance. We follow the ViT-VQGAN [57] \"Small\" architecture and training protocol." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 55, + 508, + 295, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 508, + 295, + 628 + ], + "spans": [ + { + "bbox": [ + 55, + 508, + 295, + 628 + ], + "type": "text", + "content": "The model \"SIT-5\" is the Spectral Image Tokenizer with 5 scales from " + }, + { + "bbox": [ + 55, + 508, + 295, + 628 + ], + "type": "inline_equation", + "content": "16 \\times 16" + }, + { + "bbox": [ + 55, + 508, + 295, + 628 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 55, + 508, + 295, + 628 + ], + "type": "inline_equation", + "content": "128 \\times 128" + }, + { + "bbox": [ + 55, + 508, + 295, + 628 + ], + "type": "text", + "content": " resolutions, where each scale is represented by 256 tokens. The variation \"SIT-SC\" uses scale-causal attention on both the encoder and decoder, which enables handling inputs and outputs of different resolutions, even though it is only trained at " + }, + { + "bbox": [ + 55, + 508, + 295, + 628 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 508, + 295, + 628 + ], + "type": "text", + "content": ". For example, when the input image is " + }, + { + "bbox": [ + 55, + 508, + 295, + 628 + ], + "type": "inline_equation", + "content": "64 \\times 64" + }, + { + "bbox": [ + 55, + 508, + 295, + 628 + ], + "type": "text", + "content": ", only the first three scales are used, resulting in shorter sequence lengths which reduces memory utilization and processing time. Both models employ the ADTransformer described in Sec. 4.1." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 55, + 628, + 295, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 628, + 295, + 689 + ], + "spans": [ + { + "bbox": [ + 55, + 628, + 295, + 689 + ], + "type": "text", + "content": "The ViT-VQGAN baseline only works for " + }, + { + "bbox": [ + 55, + 628, + 295, + 689 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 628, + 295, + 689 + ], + "type": "text", + "content": " inputs, so to evaluate against it fairly, we upsampled the low-resolution inputs to that resolution. This brings the input patches away from the training distribution, which might explain the drop in reconstruction quality." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "content": "We also evaluate at " + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "content": ". The ViT-VQGAN baseline increases its patch size from " + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "inline_equation", + "content": "8 \\times 8" + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "inline_equation", + "content": "16 \\times 16" + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "content": ", keep-" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 326, + 553, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 326, + 553, + 386 + ], + "spans": [ + { + "bbox": [ + 313, + 326, + 553, + 386 + ], + "type": "text", + "content": "ing the sequence length constant. Interestingly, the baseline suffered heavy instability during training, which was not resolved by reducing the learning rate, using spectral normalization, or the logit-laplace loss. In contrast, our SIT variations trained successfully with no hyperparameter changes." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 387, + 554, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 387, + 554, + 422 + ], + "spans": [ + { + "bbox": [ + 313, + 387, + 554, + 422 + ], + "type": "text", + "content": "Tab. 1 shows the reconstruction metrics; Fig. 7 shows reconstruction samples at multiple resolutions. Results provide evidence for P.1 and demonstrate P.2 (Sec. 1)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 431, + 522, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 431, + 522, + 445 + ], + "spans": [ + { + "bbox": [ + 313, + 431, + 522, + 445 + ], + "type": "text", + "content": "5.2. Coarse-to-fine text-to-image generation" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 449, + 554, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 449, + 554, + 533 + ], + "spans": [ + { + "bbox": [ + 313, + 449, + 554, + 533 + ], + "type": "text", + "content": "We tackle text-to-image generation by using an autoregressive transformer to model the distribution of discrete tokens output by our tokenizer. Since the SIT sequence of tokens represents an image in a coarse-fine fashion, the autoregressive model generation has the same property, which means that we can interrupt the generation after a certain number of tokens and decode a coarse version of the generation." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 313, + 533, + 554, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 533, + 554, + 594 + ], + "spans": [ + { + "bbox": [ + 313, + 533, + 554, + 594 + ], + "type": "text", + "content": "For this to work, the SIT decoder must be scale-causal, while there is no constraint for the encoder so we use dense attention (we denote this variant Scale-Causal Decoder, or \"SIT-SCD\"). It uses a \"small\" encoder and \"large\" decoder as described in ViT-VQGAN [57], and 4 scales." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "text", + "content": "We follow the Parti350M [58] architecture and training protocol and name our generative model auto-regressive SIT, or \"AR-SIT\". SIT and AR-SIT are trained on a subset of WebLI [6] of around 128M images, where each image is seen at most once during training. Evaluation is on 30k examples of MS-COCO [27]. We show metrics and generations at different resolutions in Fig. 3 and Tab. 2. AR-SIT matches the baseline performance but is several times faster and more memory efficient at lower resolutions. These results demonstrate the property P.4 (Sec. 1)." + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "17186" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 58, + 70, + 136, + 150 + ], + "blocks": [ + { + "bbox": [ + 58, + 70, + 136, + 150 + ], + "lines": [ + { + "bbox": [ + 58, + 70, + 136, + 150 + ], + "spans": [ + { + "bbox": [ + 58, + 70, + 136, + 150 + ], + "type": "image", + "image_path": "f6682868634b64cec23c47bc161205192494d27dd60c2e7eb70a3346c8aad53c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 80, + 151, + 276, + 160 + ], + "lines": [ + { + "bbox": [ + 80, + 151, + 276, + 160 + ], + "spans": [ + { + "bbox": [ + 80, + 151, + 276, + 160 + ], + "type": "text", + "content": "\"an assortment of some colorful vases on display on a table\"" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 138, + 71, + 217, + 150 + ], + "blocks": [ + { + "bbox": [ + 138, + 71, + 217, + 150 + ], + "lines": [ + { + "bbox": [ + 138, + 71, + 217, + 150 + ], + "spans": [ + { + "bbox": [ + 138, + 71, + 217, + 150 + ], + "type": "image", + "image_path": "6864d3bfd29c46ef47320f2be74f6fdfb48f5d5df51d90f044cab6eda868f28a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 219, + 71, + 298, + 150 + ], + "blocks": [ + { + "bbox": [ + 219, + 71, + 298, + 150 + ], + "lines": [ + { + "bbox": [ + 219, + 71, + 298, + 150 + ], + "spans": [ + { + "bbox": [ + 219, + 71, + 298, + 150 + ], + "type": "image", + "image_path": "8b439c1c4445e8bb03f450c3747720ece9b8ca68277d231315d84b1d2d995342.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 58, + 163, + 137, + 243 + ], + "blocks": [ + { + "bbox": [ + 58, + 163, + 137, + 243 + ], + "lines": [ + { + "bbox": [ + 58, + 163, + 137, + 243 + ], + "spans": [ + { + "bbox": [ + 58, + 163, + 137, + 243 + ], + "type": "image", + "image_path": "517ebeae81c58a9d8c5eff6784c15ff05c0996997375cf294898fd3a0f22c118.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 86, + 243, + 270, + 253 + ], + "lines": [ + { + "bbox": [ + 86, + 243, + 270, + 253 + ], + "spans": [ + { + "bbox": [ + 86, + 243, + 270, + 253 + ], + "type": "text", + "content": "\"one train riding on the multiple train tracks side by side.\"" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 138, + 163, + 217, + 243 + ], + "blocks": [ + { + "bbox": [ + 138, + 163, + 217, + 243 + ], + "lines": [ + { + "bbox": [ + 138, + 163, + 217, + 243 + ], + "spans": [ + { + "bbox": [ + 138, + 163, + 217, + 243 + ], + "type": "image", + "image_path": "83ed571691f19876c737ebdc8743125915e5cd1d2239ea506d6fe5a93bb4bb8f.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 219, + 163, + 298, + 243 + ], + "blocks": [ + { + "bbox": [ + 219, + 163, + 298, + 243 + ], + "lines": [ + { + "bbox": [ + 219, + 163, + 298, + 243 + ], + "spans": [ + { + "bbox": [ + 219, + 163, + 298, + 243 + ], + "type": "image", + "image_path": "1a458d4fa8ab0ab3e5ed942ae2f094554c544dd10955bb5af9dcdb9da4d57813.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 58, + 256, + 137, + 335 + ], + "blocks": [ + { + "bbox": [ + 58, + 256, + 137, + 335 + ], + "lines": [ + { + "bbox": [ + 58, + 256, + 137, + 335 + ], + "spans": [ + { + "bbox": [ + 58, + 256, + 137, + 335 + ], + "type": "image", + "image_path": "3dd72cc01a64562a44a1ef23d88bfbc31770459cf258ae88a315876675ba6fcd.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 110, + 336, + 246, + 346 + ], + "lines": [ + { + "bbox": [ + 110, + 336, + 246, + 346 + ], + "spans": [ + { + "bbox": [ + 110, + 336, + 246, + 346 + ], + "type": "text", + "content": "\"an image of a fired hydrant on the street\"" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 54, + 354, + 555, + 389 + ], + "lines": [ + { + "bbox": [ + 54, + 354, + 555, + 389 + ], + "spans": [ + { + "bbox": [ + 54, + 354, + 555, + 389 + ], + "type": "text", + "content": "Figure 4. Text-guided image upsampling on MS-COCO [27]. Our coarse-to-fine generative models can take a low-resolution image, encode it as the first few tokens of a sequence, and generate the rest of sequence, which, when decoded, effectively upsamples the input. Each triplet shows the given " + }, + { + "bbox": [ + 54, + 354, + 555, + 389 + ], + "type": "inline_equation", + "content": "32 \\times 32" + }, + { + "bbox": [ + 54, + 354, + 555, + 389 + ], + "type": "text", + "content": " image, our " + }, + { + "bbox": [ + 54, + 354, + 555, + 389 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 54, + 354, + 555, + 389 + ], + "type": "text", + "content": " reconstruction and the ground truth." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 138, + 256, + 217, + 335 + ], + "blocks": [ + { + "bbox": [ + 138, + 256, + 217, + 335 + ], + "lines": [ + { + "bbox": [ + 138, + 256, + 217, + 335 + ], + "spans": [ + { + "bbox": [ + 138, + 256, + 217, + 335 + ], + "type": "image", + "image_path": "841d651e744429d21dc2987e5397e2c7a88724fd690efd869b771cbe44f39aa7.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 219, + 256, + 298, + 335 + ], + "blocks": [ + { + "bbox": [ + 219, + 256, + 298, + 335 + ], + "lines": [ + { + "bbox": [ + 219, + 256, + 298, + 335 + ], + "spans": [ + { + "bbox": [ + 219, + 256, + 298, + 335 + ], + "type": "image", + "image_path": "ac38bdf79291ffd3ab1d63ed72d49ddcd4bdc91df8eaf45bdd23a839dc5e1b20.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 312, + 70, + 391, + 150 + ], + "blocks": [ + { + "bbox": [ + 312, + 70, + 391, + 150 + ], + "lines": [ + { + "bbox": [ + 312, + 70, + 391, + 150 + ], + "spans": [ + { + "bbox": [ + 312, + 70, + 391, + 150 + ], + "type": "image", + "image_path": "7d60852496ac37397b718e1b30f498f7a70098acac0efb310597ad1dab13ca2a.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 338, + 151, + 527, + 160 + ], + "lines": [ + { + "bbox": [ + 338, + 151, + 527, + 160 + ], + "spans": [ + { + "bbox": [ + 338, + 151, + 527, + 160 + ], + "type": "text", + "content": "\"a large elephant standing on top of a grass covered field.\"" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 392, + 71, + 472, + 149 + ], + "blocks": [ + { + "bbox": [ + 392, + 71, + 472, + 149 + ], + "lines": [ + { + "bbox": [ + 392, + 71, + 472, + 149 + ], + "spans": [ + { + "bbox": [ + 392, + 71, + 472, + 149 + ], + "type": "image", + "image_path": "7147839eb4f3185aab90b428d0b70471f566d9b819fde23f2cde8f026ed5c2d3.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 473, + 71, + 553, + 150 + ], + "blocks": [ + { + "bbox": [ + 473, + 71, + 553, + 150 + ], + "lines": [ + { + "bbox": [ + 473, + 71, + 553, + 150 + ], + "spans": [ + { + "bbox": [ + 473, + 71, + 553, + 150 + ], + "type": "image", + "image_path": "7ca9cd0198cae27ae5efdec7451fd2e87c474908da76f7885961a5e3d24e50d4.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 312, + 163, + 392, + 243 + ], + "blocks": [ + { + "bbox": [ + 312, + 163, + 392, + 243 + ], + "lines": [ + { + "bbox": [ + 312, + 163, + 392, + 243 + ], + "spans": [ + { + "bbox": [ + 312, + 163, + 392, + 243 + ], + "type": "image", + "image_path": "09890d806c9622da8906b588b07da60bbe2e63f1dbd4ee7306ab2a57116e4b3d.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 340, + 243, + 525, + 253 + ], + "lines": [ + { + "bbox": [ + 340, + 243, + 525, + 253 + ], + "spans": [ + { + "bbox": [ + 340, + 243, + 525, + 253 + ], + "type": "text", + "content": "\"three tropical bird perched on top of high bare branches\"" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 392, + 163, + 472, + 243 + ], + "blocks": [ + { + "bbox": [ + 392, + 163, + 472, + 243 + ], + "lines": [ + { + "bbox": [ + 392, + 163, + 472, + 243 + ], + "spans": [ + { + "bbox": [ + 392, + 163, + 472, + 243 + ], + "type": "image", + "image_path": "59ac6e58680042f11f5f3a0119b5af6091a722258fd001e25739170221fc02b0.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 473, + 163, + 553, + 243 + ], + "blocks": [ + { + "bbox": [ + 473, + 163, + 553, + 243 + ], + "lines": [ + { + "bbox": [ + 473, + 163, + 553, + 243 + ], + "spans": [ + { + "bbox": [ + 473, + 163, + 553, + 243 + ], + "type": "image", + "image_path": "3587c8b075c0638cfffa457a21b7a195201df8f66d0c9cf958a65b203f57af7a.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 312, + 256, + 391, + 335 + ], + "blocks": [ + { + "bbox": [ + 312, + 256, + 391, + 335 + ], + "lines": [ + { + "bbox": [ + 312, + 256, + 391, + 335 + ], + "spans": [ + { + "bbox": [ + 312, + 256, + 391, + 335 + ], + "type": "image", + "image_path": "bea17c977521287e89bcf60654bbe228b5d095d91272b5755f17af6822cf67d5.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 350, + 336, + 515, + 346 + ], + "lines": [ + { + "bbox": [ + 350, + 336, + 515, + 346 + ], + "spans": [ + { + "bbox": [ + 350, + 336, + 515, + 346 + ], + "type": "text", + "content": "\"a chair with a pink blanket is sitting on a red floor.\"" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 392, + 256, + 472, + 335 + ], + "blocks": [ + { + "bbox": [ + 392, + 256, + 472, + 335 + ], + "lines": [ + { + "bbox": [ + 392, + 256, + 472, + 335 + ], + "spans": [ + { + "bbox": [ + 392, + 256, + 472, + 335 + ], + "type": "image", + "image_path": "843e97f846207e282a99007ba5204d3856f4d925c573a79c3727719130dec0b0.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 473, + 256, + 553, + 335 + ], + "blocks": [ + { + "bbox": [ + 473, + 256, + 553, + 335 + ], + "lines": [ + { + "bbox": [ + 473, + 256, + 553, + 335 + ], + "spans": [ + { + "bbox": [ + 473, + 256, + 553, + 335 + ], + "type": "image", + "image_path": "f367404a2ed76834b55032af67c3781a729a479906a959078f6746c6164bdd31.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "bbox": [ + 55, + 394, + 222, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 394, + 222, + 407 + ], + "spans": [ + { + "bbox": [ + 55, + 394, + 222, + 407 + ], + "type": "text", + "content": "5.3. Text-guided image upsampling" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 55, + 411, + 295, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 411, + 295, + 448 + ], + "spans": [ + { + "bbox": [ + 55, + 411, + 295, + 448 + ], + "type": "text", + "content": "We leverage the coarse-to-fine nature of SIT to apply a pretrained AR-SIT for text-guided image upsampling, where we are given a low resolution image and a caption." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 55, + 448, + 295, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 448, + 295, + 543 + ], + "spans": [ + { + "bbox": [ + 55, + 448, + 295, + 543 + ], + "type": "text", + "content": "The idea is to encode the low-resolution image, which will give the first tokens of our high resolution output. AR-SIT then takes these tokens and generates the rest of the sequence. For this to work, the SIT encoder must be scale-causal to properly tokenize low-resolution inputs, while there is no constraint for the decoder so we use dense attention. We denote this variant Scale-Causal Encoder, or \"SIT-SCE\". The model is otherwise as the one used in Sec. 5.2." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 55, + 544, + 295, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 544, + 295, + 604 + ], + "spans": [ + { + "bbox": [ + 55, + 544, + 295, + 604 + ], + "type": "text", + "content": "Here the inputs are " + }, + { + "bbox": [ + 55, + 544, + 295, + 604 + ], + "type": "inline_equation", + "content": "32 \\times 32" + }, + { + "bbox": [ + 55, + 544, + 295, + 604 + ], + "type": "text", + "content": ". Fig. 4 shows text-guided upsampling examples on MS-COCO [27]. We obtain an FID of 6.2 when evaluating these generations, compared to 12.6 when given only the prompts. Fig. 8 and Tab. 4 show additional results upsampling from " + }, + { + "bbox": [ + 55, + 544, + 295, + 604 + ], + "type": "inline_equation", + "content": "16 \\times 16" + }, + { + "bbox": [ + 55, + 544, + 295, + 604 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 55, + 611, + 200, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 611, + 200, + 624 + ], + "spans": [ + { + "bbox": [ + 55, + 611, + 200, + 624 + ], + "type": "text", + "content": "5.4. Text-guided image editing" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 55, + 629, + 295, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 629, + 295, + 677 + ], + "spans": [ + { + "bbox": [ + 55, + 629, + 295, + 677 + ], + "type": "text", + "content": "Our coarse-to-fine AR-SIT enables a text-guided image editing application, where we want to change image details while keeping the same overall appearance, which corresponds to freezing lower scales while generating higher." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 55, + 677, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 677, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 296, + 713 + ], + "type": "text", + "content": "We apply an AR-SIT trained only on the maximum likelihood objective as follows. The given image is tokenized only up to the first scale, corresponding to a coarse repre" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 313, + 395, + 555, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 395, + 555, + 442 + ], + "spans": [ + { + "bbox": [ + 313, + 395, + 555, + 442 + ], + "type": "text", + "content": "sensation. The tokenizer encoder must be scale-causal in order to avoid leakage from high to low scales, so we use SIT-SCE here. Now we use AR-SIT to generate the rest of the sequence, conditioning on the textual caption." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 313, + 444, + 555, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 444, + 555, + 528 + ], + "spans": [ + { + "bbox": [ + 313, + 444, + 555, + 528 + ], + "type": "text", + "content": "In this experiment, we use a 5-scale model such that the lowest resolution is " + }, + { + "bbox": [ + 313, + 444, + 555, + 528 + ], + "type": "inline_equation", + "content": "16 \\times 16" + }, + { + "bbox": [ + 313, + 444, + 555, + 528 + ], + "type": "text", + "content": "; we found that starting with higher resolutions limits the changes the model can generate. Fig. 5 shows some examples of text-guided editing on MS-COCO [27], where we lightly modify the original captions, for example by swapping \"elephants\" with \"cows\". We show additional results in Fig. 6 in the supplemental." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 313, + 538, + 501, + 551 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 538, + 501, + 551 + ], + "spans": [ + { + "bbox": [ + 313, + 538, + 501, + 551 + ], + "type": "text", + "content": "5.5. Class-conditional image generation" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 313, + 556, + 555, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 556, + 555, + 628 + ], + "spans": [ + { + "bbox": [ + 313, + 556, + 555, + 628 + ], + "type": "text", + "content": "Here we evaluate class-conditional generation on " + }, + { + "bbox": [ + 313, + 556, + 555, + 628 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 313, + 556, + 555, + 628 + ], + "type": "text", + "content": " ImageNet. We use the SIT-4 tokenizer from Tab. 1 and follow ViT-VQGAN's [57] VIM-Base AR architecture and training protocol. Tab. 3 shows we outperform the baseline in this fair comparison which is evidence for the improved conditioning described in P.3. Fig. 9 shows samples." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 313, + 630, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 630, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 630, + 555, + 713 + ], + "type": "text", + "content": "AR-SIT-4* departs from ViT-VQGAN and Parti hyperparameters to compare with more recent works, by increasing the perceptual and adversarial loss weights, doubling the tokenizer codebook, reducing its latent dimension, training with a constant learning rate, including 2D RoPE [44] and GeGLU [42], in both the tokenizer and generator. We train the AR model longer and since there is no text encoder, we" + } + ] + } + ], + "index": 36 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17187" + } + ] + } + ], + "index": 37 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 58, + 70, + 137, + 150 + ], + "blocks": [ + { + "bbox": [ + 58, + 70, + 137, + 150 + ], + "lines": [ + { + "bbox": [ + 58, + 70, + 137, + 150 + ], + "spans": [ + { + "bbox": [ + 58, + 70, + 137, + 150 + ], + "type": "image", + "image_path": "36ecb5f118fe41bcc2c59633b2689ae5406defc5a4d24fa7a3a57028d53d54c0.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 135, + 151, + 221, + 160 + ], + "lines": [ + { + "bbox": [ + 135, + 151, + 221, + 160 + ], + "spans": [ + { + "bbox": [ + 135, + 151, + 221, + 160 + ], + "type": "text", + "content": "\"a close-up of a dog face.\"" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 137, + 71, + 217, + 150 + ], + "blocks": [ + { + "bbox": [ + 137, + 71, + 217, + 150 + ], + "lines": [ + { + "bbox": [ + 137, + 71, + 217, + 150 + ], + "spans": [ + { + "bbox": [ + 137, + 71, + 217, + 150 + ], + "type": "image", + "image_path": "eaa16f39af62397d6cb9924f8124b28531b5cf0c7837c487bb5b87773e0227e3.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 217, + 71, + 298, + 150 + ], + "blocks": [ + { + "bbox": [ + 217, + 71, + 298, + 150 + ], + "lines": [ + { + "bbox": [ + 217, + 71, + 298, + 150 + ], + "spans": [ + { + "bbox": [ + 217, + 71, + 298, + 150 + ], + "type": "image", + "image_path": "8b7a5d722eedde08ddd90ded0d73e568da830eff1aa825e3f59c8feafb447751.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 58, + 167, + 137, + 243 + ], + "blocks": [ + { + "bbox": [ + 58, + 167, + 137, + 243 + ], + "lines": [ + { + "bbox": [ + 58, + 167, + 137, + 243 + ], + "spans": [ + { + "bbox": [ + 58, + 167, + 137, + 243 + ], + "type": "image", + "image_path": "e1bdf2e9ecfc4fc44af3a82b3207598dc95b2058bc453fd53d75c2e575b26e44.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 112, + 243, + 244, + 253 + ], + "lines": [ + { + "bbox": [ + 112, + 243, + 244, + 253 + ], + "spans": [ + { + "bbox": [ + 112, + 243, + 244, + 253 + ], + "type": "text", + "content": "\"a couple of cows are standing in a field\"" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 137, + 167, + 217, + 243 + ], + "blocks": [ + { + "bbox": [ + 137, + 167, + 217, + 243 + ], + "lines": [ + { + "bbox": [ + 137, + 167, + 217, + 243 + ], + "spans": [ + { + "bbox": [ + 137, + 167, + 217, + 243 + ], + "type": "image", + "image_path": "4b686611eccd2b3eb076ee9970ff9de5fa2e2ee260cf20e00d6c7b996ea6c511.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 217, + 167, + 298, + 243 + ], + "blocks": [ + { + "bbox": [ + 217, + 167, + 298, + 243 + ], + "lines": [ + { + "bbox": [ + 217, + 167, + 298, + 243 + ], + "spans": [ + { + "bbox": [ + 217, + 167, + 298, + 243 + ], + "type": "image", + "image_path": "fba81ff6c5e4cb8a1d079101361afbc84072e7398dc85afe295e398c9d708fd4.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 58, + 256, + 137, + 335 + ], + "blocks": [ + { + "bbox": [ + 58, + 256, + 137, + 335 + ], + "lines": [ + { + "bbox": [ + 58, + 256, + 137, + 335 + ], + "spans": [ + { + "bbox": [ + 58, + 256, + 137, + 335 + ], + "type": "image", + "image_path": "17dafd257af776ffd48630fd378c65e9aa0eae746acc11a8c705d00d85eeb144.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 123, + 337, + 233, + 346 + ], + "lines": [ + { + "bbox": [ + 123, + 337, + 233, + 346 + ], + "spans": [ + { + "bbox": [ + 123, + 337, + 233, + 346 + ], + "type": "text", + "content": "\"a robot on a grass covered field.\"" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 55, + 354, + 555, + 399 + ], + "lines": [ + { + "bbox": [ + 55, + 354, + 555, + 399 + ], + "spans": [ + { + "bbox": [ + 55, + 354, + 555, + 399 + ], + "type": "text", + "content": "Figure 5. Text-guided image editing on MS-COCO [27]. Our coarse-to-fine generative models can do text-guided editing by encoding a given image but keeping only the lower scales, and using a pre-trained AR-SIT to re-generate the higher scales conditioned on the textual prompt. Each triplet shows the given image, its " + }, + { + "bbox": [ + 55, + 354, + 555, + 399 + ], + "type": "inline_equation", + "content": "16 \\times 16" + }, + { + "bbox": [ + 55, + 354, + 555, + 399 + ], + "type": "text", + "content": " reconstruction using only the coefficients used to start the generation, and the edited image after generating the whole sequence." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 137, + 256, + 217, + 335 + ], + "blocks": [ + { + "bbox": [ + 137, + 256, + 217, + 335 + ], + "lines": [ + { + "bbox": [ + 137, + 256, + 217, + 335 + ], + "spans": [ + { + "bbox": [ + 137, + 256, + 217, + 335 + ], + "type": "image", + "image_path": "15650ebe8f5fb1e14f5cbff4550b8905037edd0aa3d798bc3ff42b08b442e51b.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 217, + 256, + 298, + 335 + ], + "blocks": [ + { + "bbox": [ + 217, + 256, + 298, + 335 + ], + "lines": [ + { + "bbox": [ + 217, + 256, + 298, + 335 + ], + "spans": [ + { + "bbox": [ + 217, + 256, + 298, + 335 + ], + "type": "image", + "image_path": "5ee81e4ebd6fea49160ac75d835979d3bf656adeb5b1e4f03fcd0468c8bb7029.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 312, + 71, + 391, + 150 + ], + "blocks": [ + { + "bbox": [ + 312, + 71, + 391, + 150 + ], + "lines": [ + { + "bbox": [ + 312, + 71, + 391, + 150 + ], + "spans": [ + { + "bbox": [ + 312, + 71, + 391, + 150 + ], + "type": "image", + "image_path": "a4f5f01bcb766309cac7140a75cfd6400b2de4c276273946f37403fb1790c353.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 391, + 71, + 472, + 150 + ], + "blocks": [ + { + "bbox": [ + 391, + 71, + 472, + 150 + ], + "lines": [ + { + "bbox": [ + 391, + 71, + 472, + 150 + ], + "spans": [ + { + "bbox": [ + 391, + 71, + 472, + 150 + ], + "type": "image", + "image_path": "a269ec68a99d50ebee0f6707b77c78d00284cdcb065d5851444d20dad58912dd.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 394, + 151, + 471, + 159 + ], + "lines": [ + { + "bbox": [ + 394, + 151, + 471, + 159 + ], + "spans": [ + { + "bbox": [ + 394, + 151, + 471, + 159 + ], + "type": "text", + "content": "\"some boats on a lake.\"" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 472, + 71, + 553, + 150 + ], + "blocks": [ + { + "bbox": [ + 472, + 71, + 553, + 150 + ], + "lines": [ + { + "bbox": [ + 472, + 71, + 553, + 150 + ], + "spans": [ + { + "bbox": [ + 472, + 71, + 553, + 150 + ], + "type": "image", + "image_path": "ea23faefa17f90054e42473d5aea37b128c8b8013a982acaeddc599c646f77d1.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 312, + 163, + 391, + 243 + ], + "blocks": [ + { + "bbox": [ + 312, + 163, + 391, + 243 + ], + "lines": [ + { + "bbox": [ + 312, + 163, + 391, + 243 + ], + "spans": [ + { + "bbox": [ + 312, + 163, + 391, + 243 + ], + "type": "image", + "image_path": "0d30afa8e30a582f30149e98842e7a669a3e05b5821eb8de5f7de5dd36265abf.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 368, + 243, + 496, + 253 + ], + "lines": [ + { + "bbox": [ + 368, + 243, + 496, + 253 + ], + "spans": [ + { + "bbox": [ + 368, + 243, + 496, + 253 + ], + "type": "text", + "content": "\"a green pasture filled with wildflowers.\"" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 391, + 163, + 472, + 243 + ], + "blocks": [ + { + "bbox": [ + 391, + 163, + 472, + 243 + ], + "lines": [ + { + "bbox": [ + 391, + 163, + 472, + 243 + ], + "spans": [ + { + "bbox": [ + 391, + 163, + 472, + 243 + ], + "type": "image", + "image_path": "ff89ab629159c0c08a685fa6f925aba8900b484de8f29dc646b6ec676b76173c.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 472, + 163, + 553, + 243 + ], + "blocks": [ + { + "bbox": [ + 472, + 163, + 553, + 243 + ], + "lines": [ + { + "bbox": [ + 472, + 163, + 553, + 243 + ], + "spans": [ + { + "bbox": [ + 472, + 163, + 553, + 243 + ], + "type": "image", + "image_path": "b5f7951518fe619b80069c74c9078348110e9869631ac65634c35b482c067907.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 312, + 256, + 391, + 335 + ], + "blocks": [ + { + "bbox": [ + 312, + 256, + 391, + 335 + ], + "lines": [ + { + "bbox": [ + 312, + 256, + 391, + 335 + ], + "spans": [ + { + "bbox": [ + 312, + 256, + 391, + 335 + ], + "type": "image", + "image_path": "acb38a9ac3421cb4c5aa1397614270a07ffe99a30a75b4517d1a1f8d2e534564.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 361, + 336, + 504, + 346 + ], + "lines": [ + { + "bbox": [ + 361, + 336, + 504, + 346 + ], + "spans": [ + { + "bbox": [ + 361, + 336, + 504, + 346 + ], + "type": "text", + "content": "\"a very cute dog laying in a very small sink.\"" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 391, + 256, + 472, + 335 + ], + "blocks": [ + { + "bbox": [ + 391, + 256, + 472, + 335 + ], + "lines": [ + { + "bbox": [ + 391, + 256, + 472, + 335 + ], + "spans": [ + { + "bbox": [ + 391, + 256, + 472, + 335 + ], + "type": "image", + "image_path": "1a1b11602e9329617c11e9f5b02cf0445c2342b98f5671840a234d1a3534b577.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 472, + 256, + 553, + 335 + ], + "blocks": [ + { + "bbox": [ + 472, + 256, + 553, + 335 + ], + "lines": [ + { + "bbox": [ + 472, + 256, + 553, + 335 + ], + "spans": [ + { + "bbox": [ + 472, + 256, + 553, + 335 + ], + "type": "image", + "image_path": "caa571ccd6daeecd2b24d2945ccbd8468bc354ee21baf542d4cdf8af06cfa34d.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "type": "table", + "bbox": [ + 57, + 404, + 294, + 514 + ], + "blocks": [ + { + "bbox": [ + 57, + 404, + 294, + 514 + ], + "lines": [ + { + "bbox": [ + 57, + 404, + 294, + 514 + ], + "spans": [ + { + "bbox": [ + 57, + 404, + 294, + 514 + ], + "type": "table", + "html": "
paramsseq lenCFGFID ↓IS ↑
AR-ViT-VQGAN (reported)650M1024-8.81110.8
AR-ViT-VQGAN (reproduced)650M1024-8.37111.8
AR-SIT-4 (Ours)650M1024-6.95138.3
RQ-VAE [26]480M256-15.7286.8
DQ-VAE [22]355M640-7.34152.8
LlamaGen-L [45]343M2561.54.08198.5
VAR [48]310M6802.03.30274.4
AR-SIT-4* (Ours)350M10241.54.06190.9
", + "image_path": "fe7dc44f4c4262e5cf1e48adcced02f419151f31341155e9b61616f04ff51987.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "table_body" + } + ], + "index": 25 + }, + { + "bbox": [ + 55, + 523, + 295, + 612 + ], + "lines": [ + { + "bbox": [ + 55, + 523, + 295, + 612 + ], + "spans": [ + { + "bbox": [ + 55, + 523, + 295, + 612 + ], + "type": "text", + "content": "Table 3. ImageNet " + }, + { + "bbox": [ + 55, + 523, + 295, + 612 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 523, + 295, + 612 + ], + "type": "text", + "content": " class conditional generation. Top half shows a fair comparison against ViT-VQGAN, which we clearly outperform. AR-SIT-4* uses different hyperparameters for comparison against a broader class of methods. While we do not outperform all of them, there are differences in the architectures and training schedules that influence the results and are orthogonal to our contributions. Moreover, none of the baselines has the multiscale capabilities described in P.2 and P.4 (Sec. 1)." + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 55, + 617, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 617, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 617, + 296, + 714 + ], + "type": "text", + "content": "double the number of decoder layers. The bottom half of Tab. 3 compares with similarly-sized AR models including some with multiscale/residual latents (although none exhibit properties P.2 and P.4). AR-SIT-4* performance matches LlamaGen but not to VAR, but the models are not exactly comparable since they use a convolutional tokenizers and other improvements such as AdaLN [34] and attention normalization; VAR also trains the tokenizer on larger" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 313, + 406, + 553, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 406, + 553, + 430 + ], + "spans": [ + { + "bbox": [ + 313, + 406, + 553, + 430 + ], + "type": "text", + "content": "data than ImageNet. These are orthogonal to our contributions and can be incorporated in future work." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 313, + 431, + 554, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 431, + 554, + 480 + ], + "spans": [ + { + "bbox": [ + 313, + 431, + 554, + 480 + ], + "type": "text", + "content": "We also train an AR-SIT-5* on " + }, + { + "bbox": [ + 313, + 431, + 554, + 480 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 313, + 431, + 554, + 480 + ], + "type": "text", + "content": " ImageNet and obtain an FID of 5.97 and IS of 215.22, showing robustness at higher resolutions, while most prior work requires separate stages for upsampling. Fig. 10 shows samples." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 313, + 497, + 467, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 497, + 467, + 510 + ], + "spans": [ + { + "bbox": [ + 313, + 497, + 467, + 510 + ], + "type": "text", + "content": "6. Conclusion and limitations" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 313, + 520, + 554, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 520, + 554, + 604 + ], + "spans": [ + { + "bbox": [ + 313, + 520, + 554, + 604 + ], + "type": "text", + "content": "We presented a spectral image tokenizer (SIT), and an autoregressive generative transformer trained with it (AR-SIT). SIT is naturally multiscale and leverages spectral properties of natural images for improved reconstruction quality. AR-SIT enables applications such as rapid generation of coarse images that can be refined later, and text-guided image upsampling and editing." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 313, + 605, + 555, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 605, + 555, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 605, + 555, + 714 + ], + "type": "text", + "content": "While our methods improved tokenizer reconstruction accuracy and class-conditional generation, the text-to-image metrics were similar to the fair baseline. Chang et al. [4] similarly observed that a better tokenizer does not necessarily lead to a better generative model. Nevertheless, our method has multiscale properties and enables new applications not possible with prior work. We only experiment with realtively small AR-SITs of 350-650M parameters, while the Parti [58] baseline goes up to 22B parameters." + } + ] + } + ], + "index": 32 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17188" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 165, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 165, + 85 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 165, + 85 + ], + "type": "text", + "content": "7. Acknowledgments" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 91, + 296, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 91, + 296, + 128 + ], + "spans": [ + { + "bbox": [ + 55, + 91, + 296, + 128 + ], + "type": "text", + "content": "We thank Leonardo Zepeda-Nuñez for reviewing this manuscript and offering interesting discussions and suggestions, and Jon Barron for sharing useful code we relied on." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 140, + 115, + 152 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 140, + 115, + 152 + ], + "spans": [ + { + "bbox": [ + 56, + 140, + 115, + 152 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 57, + 159, + 296, + 712 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 61, + 159, + 296, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 159, + 296, + 215 + ], + "spans": [ + { + "bbox": [ + 61, + 159, + 296, + 215 + ], + "type": "text", + "content": "[1] James Betker, Gabriel Goh, Li Jing, † TimBrooks, Jianfeng Wang, Linjie Li, † LongOuyang, † JuntangZhuang, † JoyceLee, † YufeiGuo, † WesamManassra, † PrafullaDhariwal, † CaseyChu, † YunxinJiao, and Aditya Ramesh. Improving image generation with better captions. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 61, + 217, + 296, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 217, + 296, + 258 + ], + "spans": [ + { + "bbox": [ + 61, + 217, + 296, + 258 + ], + "type": "text", + "content": "[2] Lei Cai, Hongyang Gao, and Shuiwang Ji. Multi-Stage Variational Auto-Encoders for Coarse-to-Fine Image Generation, page 630–638. Society for Industrial and Applied Mathematics, 2019. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 61, + 261, + 295, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 261, + 295, + 293 + ], + "spans": [ + { + "bbox": [ + 61, + 261, + 295, + 293 + ], + "type": "text", + "content": "[3] Huiwen Chang, Han Zhang, Lu Jiang, Ce Liu, and William T. Freeman. Maskgit: Masked generative image transformer. In CVPR, 2022. 1, 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 61, + 296, + 296, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 296, + 296, + 361 + ], + "spans": [ + { + "bbox": [ + 61, + 296, + 296, + 361 + ], + "type": "text", + "content": "[4] Huiwen Chang, Han Zhang, Jarred Barber, Aaron Maschinot, Jose Lezama, Lu Jiang, Ming-Hsuan Yang, Kevin Patrick Murphy, William T. Freeman, Michael Rubinstein, Yuanzhen Li, and Dilip Krishnan. Muse: Text-to-image generation via masked generative transformers. In ICML, 2023. 1, 3, 8" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 363, + 296, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 363, + 296, + 396 + ], + "spans": [ + { + "bbox": [ + 62, + 363, + 296, + 396 + ], + "type": "text", + "content": "[5] XI Chen, Nikhil Mishra, Mostafa Rohaninejad, and Pieter Abbeel. PixelSNAIL: An improved autoregressive generative model. In ICML, 2018. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 61, + 397, + 296, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 397, + 296, + 506 + ], + "spans": [ + { + "bbox": [ + 61, + 397, + 296, + 506 + ], + "type": "text", + "content": "[6] Xi Chen, Xiao Wang, Soravit Changpinyo, AJ Piergiovanni, Piotr Padlewski, Daniel Salz, Sebastian Goodman, Adam Grycner, Basil Mustafa, Lucas Beyer, Alexander Kolesnikov, Joan Puigcerver, Nan Ding, Keran Rong, Hassan Akbari, Gaurav Mishra, Linting Xue, Ashish Thapliyal, James Bradbury, Weicheng Kuo, Mojtaba Seyedhosseini, Chao Jia, Burcu Karagol Ayan, Carlos Riquelme, Andreas Steiner, Anelia Angelova, Xiaohua Zhai, Neil Houlsby, and Radu Soricut. Pali: A jointly-scaled multilingual language-image model, 2022. 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 509, + 296, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 509, + 296, + 563 + ], + "spans": [ + { + "bbox": [ + 62, + 509, + 296, + 563 + ], + "type": "text", + "content": "[7] C. A. Christopoulos, T. Ebrahimi, and A. N. Skodras. Jpeg2000: the new still picture compression standard. In Proceedings of the 2000 ACM Workshops on Multimedia, page 45-49, New York, NY, USA, 2000. Association for Computing Machinery. 4" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 62, + 565, + 296, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 565, + 296, + 608 + ], + "spans": [ + { + "bbox": [ + 62, + 565, + 296, + 608 + ], + "type": "text", + "content": "[8] A. Cohen, Ingrid Daubechies, and J.-C. Feauveau. Biorthogonal bases of compactly supported wavelets. Communications on Pure and Applied Mathematics, 45(5):485-560, 1992. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 62, + 610, + 295, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 610, + 295, + 632 + ], + "spans": [ + { + "bbox": [ + 62, + 610, + 295, + 632 + ], + "type": "text", + "content": "[9] Ingrid Daubechies. Ten lectures on wavelets. Society for Industrial and Applied Mathematics, USA, 1992. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 57, + 634, + 295, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 634, + 295, + 666 + ], + "spans": [ + { + "bbox": [ + 57, + 634, + 295, + 666 + ], + "type": "text", + "content": "[10] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In CVPR, 2009. 6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 57, + 668, + 295, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 668, + 295, + 690 + ], + "spans": [ + { + "bbox": [ + 57, + 668, + 295, + 690 + ], + "type": "text", + "content": "[11] Prafulla Dhariwal and Alex Nichol. Diffusion models beat gans on image synthesis. In NeurIPS, 2021. 1" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 57, + 692, + 294, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 692, + 294, + 712 + ], + "spans": [ + { + "bbox": [ + 57, + 692, + 294, + 712 + ], + "type": "text", + "content": "[12] Sander Dieleman. Diffusion is spectral autoregression, 2024. 2" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 554, + 702 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 316, + 73, + 554, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 73, + 554, + 138 + ], + "spans": [ + { + "bbox": [ + 316, + 73, + 554, + 138 + ], + "type": "text", + "content": "[13] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2021. 2, 3, 4" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 140, + 553, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 140, + 553, + 182 + ], + "spans": [ + { + "bbox": [ + 316, + 140, + 553, + 182 + ], + "type": "text", + "content": "[14] Tim Elsner, Paula Usinger, Victor Czech, Gregor Kobsik, Yanjiang He, Isaak Lim, and Leif Kobbelt. Quantised global autoencoder: A holistic approach to representing visual data, 2024. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 184, + 553, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 184, + 553, + 215 + ], + "spans": [ + { + "bbox": [ + 317, + 184, + 553, + 215 + ], + "type": "text", + "content": "[15] Patrick Esser, Robin Rombach, and Björn Ommer. Taming transformers for high-resolution image synthesis. In CVPR, 2021. 1, 2, 3, 5" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 217, + 553, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 217, + 553, + 281 + ], + "spans": [ + { + "bbox": [ + 316, + 217, + 553, + 281 + ], + "type": "text", + "content": "[16] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, Dustin Podell, Tim Dockhorn, Zion English, Kyle Lacey, Alex Goodwin, Yannik Marek, and Robin Rombach. Scaling rectified flow transformers for high-resolution image synthesis, 2024. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 283, + 553, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 283, + 553, + 315 + ], + "spans": [ + { + "bbox": [ + 316, + 283, + 553, + 315 + ], + "type": "text", + "content": "[17] Rinon Gal, Dana Cohen Hochberg, Amit Bermano, and Daniel Cohen-Or. Swagan: a style-based wavelet-driven generative model. ACM TOG, 2021. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 316, + 553, + 347 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 316, + 553, + 347 + ], + "spans": [ + { + "bbox": [ + 316, + 316, + 553, + 347 + ], + "type": "text", + "content": "[18] Jiatao Gu, Shuangfei Zhai, Yizhe Zhang, Joshua M. Susskind, and Navdeep Jaitly. Matryoshka diffusion models. In ICLR, 2024. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 350, + 553, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 350, + 553, + 402 + ], + "spans": [ + { + "bbox": [ + 316, + 350, + 553, + 402 + ], + "type": "text", + "content": "[19] Philippe Hansen-Estruch, David Yan, Ching-Yao Chung, Orr Zohar, Jialiang Wang, Tingbo Hou, Tao Xu, Sriram Vishwanath, Peter Vajda, and Xinlei Chen. Learnings from scaling visual tokenizers for reconstruction and generation, 2025. 1" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 404, + 553, + 447 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 404, + 553, + 447 + ], + "spans": [ + { + "bbox": [ + 316, + 404, + 553, + 447 + ], + "type": "text", + "content": "[20] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. In NeurIPS, 2017. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 449, + 553, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 449, + 553, + 470 + ], + "spans": [ + { + "bbox": [ + 316, + 449, + 553, + 470 + ], + "type": "text", + "content": "[21] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance, 2022. 1" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 472, + 553, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 472, + 553, + 514 + ], + "spans": [ + { + "bbox": [ + 317, + 472, + 553, + 514 + ], + "type": "text", + "content": "[22] Mengqi Huang, Zhendong Mao, Zhuowei Chen, and Yongdong Zhang. Towards accurate image coding: Improved autoregressive image generation with dynamic vector quantization. In CVPR, 2023. 2, 8" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 515, + 488, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 515, + 488, + 525 + ], + "spans": [ + { + "bbox": [ + 316, + 515, + 488, + 525 + ], + "type": "text", + "content": "[23]Imagen-Team-Google. Imagen 3, 2024. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 526, + 553, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 526, + 553, + 557 + ], + "spans": [ + { + "bbox": [ + 317, + 526, + 553, + 557 + ], + "type": "text", + "content": "[24] Tero Karras, Timo Aila, Samuli Laine, and Jaakko Lehtinen. Progressive growing of GANs for improved quality, stability, and variation. In ICLR, 2018. 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 559, + 553, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 559, + 553, + 592 + ], + "spans": [ + { + "bbox": [ + 317, + 559, + 553, + 592 + ], + "type": "text", + "content": "[25] Vladimir Kulikov, Shahar Yadin, Matan Kleiner, and Tomer Michaeli. Sinddm: A single image denoising diffusion model. In ICML, pages 17920-17930, 2023. 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 317, + 593, + 553, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 593, + 553, + 624 + ], + "spans": [ + { + "bbox": [ + 317, + 593, + 553, + 624 + ], + "type": "text", + "content": "[26] Doyup Lee, Chiheon Kim, Saehoon Kim, Minsu Cho, and Wook-Shin Han. Autoregressive image generation using residual quantization. In CVPR, 2022. 2, 8" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 317, + 625, + 553, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 625, + 553, + 668 + ], + "spans": [ + { + "bbox": [ + 317, + 625, + 553, + 668 + ], + "type": "text", + "content": "[27] Tsung-Yi Lin, Michael Maire, Serge J. Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C. Lawrence Zitnick. Microsoft COCO: common objects in context. In ECCV. 5, 6, 7, 8, 2, 3" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 316, + 670, + 553, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 670, + 553, + 702 + ], + "spans": [ + { + "bbox": [ + 316, + 670, + 553, + 702 + ], + "type": "text", + "content": "[28] Xiaoxiao Ma, Mohan Zhou, Tao Liang, Yalong Bai, Tiejun Zhao, Huaian Chen, and Yi Jin. Star: Scale-wise text-to-image generation via auto-regressive representations, 2024." + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "17189" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 73, + 296, + 712 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 56, + 73, + 296, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 73, + 296, + 106 + ], + "spans": [ + { + "bbox": [ + 56, + 73, + 296, + 106 + ], + "type": "text", + "content": "[29] Stephane Mallat. A Wavelet Tour of Signal Processing, Third Edition: The Sparse Way. Academic Press, Inc., USA, 3rd edition, 2008. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 106, + 296, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 106, + 296, + 139 + ], + "spans": [ + { + "bbox": [ + 56, + 106, + 296, + 139 + ], + "type": "text", + "content": "[30] Wael Mattar, Idan Levy, Nir Sharon, and Shai Dekel. Wavelets are all you need for autoregressive image generation, 2024. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 140, + 296, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 140, + 296, + 173 + ], + "spans": [ + { + "bbox": [ + 56, + 140, + 296, + 173 + ], + "type": "text", + "content": "[31] Takeru Miyato, Toshiki Kataoka, Masanori Koyama, and Yuichi Yoshida. Spectral normalization for generative adversarial networks. In ICLR, 2018. 5" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 174, + 296, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 174, + 296, + 206 + ], + "spans": [ + { + "bbox": [ + 56, + 174, + 296, + 206 + ], + "type": "text", + "content": "[32] Charlie Nash, Jacob Menick, Sander Dieleman, and Peter W. Battaglia. Generating images with sparse representations. In ICML, 2021. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 207, + 235, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 207, + 235, + 218 + ], + "spans": [ + { + "bbox": [ + 56, + 207, + 235, + 218 + ], + "type": "text", + "content": "[33] OpenAI. Gpt-4 technical report, 2023. 1, 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 219, + 296, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 219, + 296, + 240 + ], + "spans": [ + { + "bbox": [ + 56, + 219, + 296, + 240 + ], + "type": "text", + "content": "[34] William Peebles and Saining Xie. Scalable diffusion models with transformers. 2023. 8" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 241, + 296, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 241, + 296, + 274 + ], + "spans": [ + { + "bbox": [ + 56, + 241, + 296, + 274 + ], + "type": "text", + "content": "[35] Hao Phung, Quan Dao, and Anh Tran. Wavelet diffusion models are fast and scalable image generators. In CVPR, 2023. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 276, + 296, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 276, + 296, + 319 + ], + "spans": [ + { + "bbox": [ + 56, + 276, + 296, + 319 + ], + "type": "text", + "content": "[36] Vivek Ramanujan, Kushal Tirumala, Armen Aghajanyan, Luke Zettlemoyer, and Ali Farhadi. When worse is better: Navigating the compression-generation tradeoff in visual tokenization, 2024. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 320, + 296, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 320, + 296, + 352 + ], + "spans": [ + { + "bbox": [ + 56, + 320, + 296, + 352 + ], + "type": "text", + "content": "[37] Ali Razavi, Aaron van den Oord, and Oriol Vinyals. Generating diverse high-fidelity images with vq-vae-2. In NeurIPS, 2019. 1, 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 354, + 296, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 354, + 296, + 387 + ], + "spans": [ + { + "bbox": [ + 56, + 354, + 296, + 387 + ], + "type": "text", + "content": "[38] Robin Rombach, A. Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In CVPR, 2021. 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 388, + 296, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 388, + 296, + 420 + ], + "spans": [ + { + "bbox": [ + 56, + 388, + 296, + 420 + ], + "type": "text", + "content": "[39] Tamar Rott Shaham, Tali Dekel, and Tomer Michaeli. Singan: Learning a generative model from a single natural image. In ICCV, 2019. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 421, + 296, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 421, + 296, + 443 + ], + "spans": [ + { + "bbox": [ + 56, + 421, + 296, + 443 + ], + "type": "text", + "content": "[40] Dohoon Ryu and Jong Chul Ye. Pyramidal denoising diffusion probabilistic models, 2022. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 444, + 296, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 444, + 296, + 477 + ], + "spans": [ + { + "bbox": [ + 56, + 444, + 296, + 477 + ], + "type": "text", + "content": "[41] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, Xi Chen, and Xi Chen. Improved techniques for training gans. In NeurIPS, 2016. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 478, + 288, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 478, + 288, + 488 + ], + "spans": [ + { + "bbox": [ + 56, + 478, + 288, + 488 + ], + "type": "text", + "content": "[42] Noam Shazeer. Glu variants improve transformer, 2020. 7" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 489, + 296, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 489, + 296, + 522 + ], + "spans": [ + { + "bbox": [ + 56, + 489, + 296, + 522 + ], + "type": "text", + "content": "[43] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In ICML, 2015. 1" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 56, + 522, + 296, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 522, + 296, + 555 + ], + "spans": [ + { + "bbox": [ + 56, + 522, + 296, + 555 + ], + "type": "text", + "content": "[44] Jianlin Su, Yu Lu, Shengfeng Pan, Bo Wen, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding, 2021. 7" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 56, + 556, + 296, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 556, + 296, + 601 + ], + "spans": [ + { + "bbox": [ + 56, + 556, + 296, + 601 + ], + "type": "text", + "content": "[45] Peize Sun, Yi Jiang, Shoufa Chen, Shilong Zhang, Bingyue Peng, Ping Luo, and Zehuan Yuan. Autoregressive model beats diffusion: Llama for scalable image generation. arXiv preprint arXiv:2406.06525, 2024. 2, 8" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 56, + 601, + 296, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 601, + 296, + 623 + ], + "spans": [ + { + "bbox": [ + 56, + 601, + 296, + 623 + ], + "type": "text", + "content": "[46] Gemini Team. Gemini: A family of highly capable multimodal models, 2023. 1, 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 56, + 624, + 267, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 624, + 267, + 635 + ], + "spans": [ + { + "bbox": [ + 56, + 624, + 267, + 635 + ], + "type": "text", + "content": "[47] Llama team. The llama 3 herd of models, 2024. 1, 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 56, + 635, + 296, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 635, + 296, + 669 + ], + "spans": [ + { + "bbox": [ + 56, + 635, + 296, + 669 + ], + "type": "text", + "content": "[48] Keyu Tian, Yi Jiang, Zehuan Yuan, Bingyue Peng, and Liwei Wang. Visual autoregressive modeling: Scalable image generation via next-scale prediction. In NeurIPS, 2024. 2, 8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 56, + 670, + 296, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 670, + 296, + 712 + ], + "spans": [ + { + "bbox": [ + 56, + 670, + 296, + 712 + ], + "type": "text", + "content": "[49] Aaron van den Oord, Nal Kalchbrenner, Lasse Espeholt, kory kavukcuoglu, Oriol Vinyals, and Alex Graves. Conditional image generation with pixelCNN decoders. In NeurIPS, 2016. 2" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 553, + 599 + ], + "type": "list", + "angle": 0, + "index": 35, + "blocks": [ + { + "bbox": [ + 316, + 73, + 553, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 73, + 553, + 106 + ], + "spans": [ + { + "bbox": [ + 316, + 73, + 553, + 106 + ], + "type": "text", + "content": "[50] Aäron van den Oord, Nal Kalchbrenner, and Koray Kavukcuoglu. Pixel recurrent neural networks. In ICML, 2016. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 107, + 553, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 107, + 553, + 139 + ], + "spans": [ + { + "bbox": [ + 316, + 107, + 553, + 139 + ], + "type": "text", + "content": "[51] Aaron van den Oord, Oriol Vinyals, and koray kavukcuoglu. Neural discrete representation learning. In NeurIPS, 2017. 1, 2, 3, 5" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 140, + 553, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 140, + 553, + 174 + ], + "spans": [ + { + "bbox": [ + 316, + 140, + 553, + 174 + ], + "type": "text", + "content": "[52] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In NeurIPS, 2017. 1" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 175, + 553, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 175, + 553, + 207 + ], + "spans": [ + { + "bbox": [ + 316, + 175, + 553, + 207 + ], + "type": "text", + "content": "[53] Xin Wen, Bingchen Zhao, Ismail Elezi, Jiankang Deng, and Xiaojuan Qi. \"principal components\" enable a new language of images, 2025. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 208, + 553, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 208, + 553, + 241 + ], + "spans": [ + { + "bbox": [ + 316, + 208, + 553, + 241 + ], + "type": "text", + "content": "[54] Jingfeng Yao, Bin Yang, and Xinggang Wang. Reconstruction vs. generation: Taming optimization dilemma in latent diffusion models. In CVPR, 2025. 1" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 242, + 553, + 276 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 242, + 553, + 276 + ], + "spans": [ + { + "bbox": [ + 316, + 242, + 553, + 276 + ], + "type": "text", + "content": "[55] Ting Yao, Yingwei Pan, Yehao Li, Chong-Wah Ngo, and Tao Mei. Wave-vit: Unifying wavelet and transformers for visual representation learning. In ECCV, 2022. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 276, + 553, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 276, + 553, + 308 + ], + "spans": [ + { + "bbox": [ + 316, + 276, + 553, + 308 + ], + "type": "text", + "content": "[56] Hu Yu, Hao Luo, Hangjie Yuan, Yu Rong, and Feng Zhao. Frequency autoregressive image generation with continuous tokens, 2025. 2" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 316, + 309, + 553, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 309, + 553, + 354 + ], + "spans": [ + { + "bbox": [ + 316, + 309, + 553, + 354 + ], + "type": "text", + "content": "[57] Jiahui Yu, Xin Li, Jing Yu Koh, Han Zhang, Ruoming Pang, James Qin, Alexander Ku, Yuanzhong Xu, Jason Baldridge, and Yonghui Wu. Vector-quantized image modeling with improved VQGAN. In ICLR, 2022. 1, 2, 3, 4, 5, 6, 7" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 316, + 355, + 553, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 355, + 553, + 431 + ], + "spans": [ + { + "bbox": [ + 316, + 355, + 553, + 431 + ], + "type": "text", + "content": "[58] Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, Ben Hutchinson, Wei Han, Zarana Parekh, Xin Li, Han Zhang, Jason Baldridge, and Yonghui Wu. Scaling autoregressive models for content-rich text-to-image generation. Transactions on Machine Learning Research, 2022. Featured Certification. 1, 3, 5, 6, 8" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 316, + 433, + 553, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 433, + 553, + 498 + ], + "spans": [ + { + "bbox": [ + 316, + 433, + 553, + 498 + ], + "type": "text", + "content": "[59] Lijun Yu, Jose Lezama, Nitesh Bharadwaj Gundavarapu, Luca Versari, Kihyuk Sohn, David Minnen, Yong Cheng, Agrim Gupta, Xiuye Gu, Alexander G Hauptmann, Boqing Gong, Ming-Hsuan Yang, Irfan Essa, David A Ross, and Lu Jiang. Language model beats diffusion - tokenizer is key to visual generation. In ICLR, 2024. 2" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 316, + 499, + 553, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 499, + 553, + 542 + ], + "spans": [ + { + "bbox": [ + 316, + 499, + 553, + 542 + ], + "type": "text", + "content": "[60] Qihang Yu, Mark Weber, Xueqing Deng, Xiaohui Shen, Daniel Cremers, and Liang-Chieh Chen. An image is worth 32 tokens for reconstruction and generation. In NeurIPS, 2024. 1" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 316, + 544, + 553, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 544, + 553, + 578 + ], + "spans": [ + { + "bbox": [ + 316, + 544, + 553, + 578 + ], + "type": "text", + "content": "[61] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In CVPR, 2018. 2" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 316, + 578, + 553, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 578, + 553, + 599 + ], + "spans": [ + { + "bbox": [ + 316, + 578, + 553, + 599 + ], + "type": "text", + "content": "[62] Zhenhai Zhu and Radu Soricut. Wavelet-based image tokenizer for vision transformers, 2024. 2" + } + ] + } + ], + "index": 34 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "17190" + } + ] + } + ], + "index": 36 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file