diff --git "a/2025/TerraMind_ Large-Scale Generative Multimodality for Earth Observation/layout.json" "b/2025/TerraMind_ Large-Scale Generative Multimodality for Earth Observation/layout.json" new file mode 100644--- /dev/null +++ "b/2025/TerraMind_ Large-Scale Generative Multimodality for Earth Observation/layout.json" @@ -0,0 +1,7824 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 77, + 103, + 533, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 103, + 533, + 121 + ], + "spans": [ + { + "bbox": [ + 77, + 103, + 533, + 121 + ], + "type": "text", + "content": "TerraMind: Large-Scale Generative Multimodality for Earth Observation" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 61, + 141, + 556, + 201 + ], + "blocks": [ + { + "bbox": [ + 61, + 141, + 556, + 201 + ], + "lines": [ + { + "bbox": [ + 61, + 141, + 556, + 201 + ], + "spans": [ + { + "bbox": [ + 61, + 141, + 556, + 201 + ], + "type": "image", + "image_path": "6a508ecc15dcc21c6803d3b04d4f20a8dcabf19832964f2fe4ee1a9d82220c68.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 122, + 203, + 495, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 203, + 495, + 233 + ], + "spans": [ + { + "bbox": [ + 122, + 203, + 495, + 233 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 122, + 203, + 495, + 233 + ], + "type": "text", + "content": "IBM Research - Europe " + }, + { + "bbox": [ + 122, + 203, + 495, + 233 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 122, + 203, + 495, + 233 + ], + "type": "text", + "content": "ETH Zurich " + }, + { + "bbox": [ + 122, + 203, + 495, + 233 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 122, + 203, + 495, + 233 + ], + "type": "text", + "content": "Forschungszentrum Jülich " + }, + { + "bbox": [ + 122, + 203, + 495, + 233 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 122, + 203, + 495, + 233 + ], + "type": "text", + "content": "European Space Agency " + }, + { + "bbox": [ + 122, + 203, + 495, + 233 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 122, + 203, + 495, + 233 + ], + "type": "text", + "content": "-Lab " + }, + { + "bbox": [ + 122, + 203, + 495, + 233 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 122, + 203, + 495, + 233 + ], + "type": "text", + "content": "NASA IMPACT " + }, + { + "bbox": [ + 122, + 203, + 495, + 233 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 122, + 203, + 495, + 233 + ], + "type": "text", + "content": "University of Iceland" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 241, + 235, + 378, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 235, + 378, + 246 + ], + "spans": [ + { + "bbox": [ + 241, + 235, + 378, + 246 + ], + "type": "text", + "content": "johnannes.jakubikl@ibm.com" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 55, + 279, + 399, + 514 + ], + "blocks": [ + { + "bbox": [ + 55, + 279, + 399, + 514 + ], + "lines": [ + { + "bbox": [ + 55, + 279, + 399, + 514 + ], + "spans": [ + { + "bbox": [ + 55, + 279, + 399, + 514 + ], + "type": "image", + "image_path": "c2719cca28b3707036b1e21c1c8178c88eade472b909cf3ef0639ccd403e1524.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 523, + 555, + 559 + ], + "lines": [ + { + "bbox": [ + 55, + 523, + 555, + 559 + ], + "spans": [ + { + "bbox": [ + 55, + 523, + 555, + 559 + ], + "type": "text", + "content": "Figure 1. TerraMind represents the first any-to-any generative, and large-scale multimodal model for Earth observation pre-trained on 500 billion tokens from global geospatial data. The model digests multi-scale representations at pixel-level and token-level simultaneously. TerraMindv1 unlocks (i) generation, (ii) zero-shot and finetuning applications, and (iii) \"Thinking-in-Modalities\" finetuning and inference." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 409, + 279, + 554, + 365 + ], + "blocks": [ + { + "bbox": [ + 409, + 279, + 554, + 365 + ], + "lines": [ + { + "bbox": [ + 409, + 279, + 554, + 365 + ], + "spans": [ + { + "bbox": [ + 409, + 279, + 554, + 365 + ], + "type": "image", + "image_path": "b6bb9405779306eee986642cf6d11a4df535fdd5d3976835632bf50bec8e86bd.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 409, + 369, + 554, + 513 + ], + "blocks": [ + { + "bbox": [ + 409, + 369, + 554, + 513 + ], + "lines": [ + { + "bbox": [ + 409, + 369, + 554, + 513 + ], + "spans": [ + { + "bbox": [ + 409, + 369, + 554, + 513 + ], + "type": "image", + "image_path": "f7c9a63048871fd77a2eec6e0ea9896413b1fa97b6cae2bce0e8cf481bc98f2a.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 152, + 567, + 200, + 580 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 567, + 200, + 580 + ], + "spans": [ + { + "bbox": [ + 152, + 567, + 200, + 580 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 594, + 298, + 678 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 594, + 298, + 678 + ], + "spans": [ + { + "bbox": [ + 55, + 594, + 298, + 678 + ], + "type": "text", + "content": "We present TerraMind, the first any-to-any generative, multimodal deep learning model for Earth observation (EO). Unlike other approaches, TerraMind is pretrained on dual-scale representations combining both token-level and pixel-level data across modalities. On a token level, TerraMind encodes high-level contextual information to learn cross-modal relationships, while on a pixel level, TerraMind lever" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 569, + 556, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 569, + 556, + 700 + ], + "spans": [ + { + "bbox": [ + 313, + 569, + 556, + 700 + ], + "type": "text", + "content": "ages fine-grained representations to capture critical spatial nuances. In this paper, we demonstrate that (i) TerraMind achieves beyond state-of-the-art performance in community-standard benchmarks, (ii) TerraMind can leverage \"thinking in modalities\" (TiM)—the capability of generating additional artificial data during finetuning and inference to improve the model output—and (iii) TerraMind's dual-scale early fusion approach results in well-structured embedding spaces. Models and code have been open-sourced at https://huggingface.co.ibm-esa-geospatialandhttps://github.com.ibm/terramind." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 146, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 146, + 0, + 494, + 37 + ], + "type": "text", + "content": "This ICCV paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 693, + 125, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 693, + 125, + 703 + ], + "spans": [ + { + "bbox": [ + 56, + 693, + 125, + 703 + ], + "type": "text", + "content": "* Equal contribution" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 704, + 122, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 704, + 122, + 712 + ], + "spans": [ + { + "bbox": [ + 56, + 704, + 122, + 712 + ], + "type": "text", + "content": "† Equal supervision" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7383" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 71, + 136, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 71, + 136, + 83 + ], + "spans": [ + { + "bbox": [ + 56, + 71, + 136, + 83 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 95, + 297, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 95, + 297, + 228 + ], + "spans": [ + { + "bbox": [ + 55, + 95, + 297, + 228 + ], + "type": "text", + "content": "Earth observation (EO) increasingly benefits from multimodality because of the important integration of complementary information from different data sources. This becomes particularly relevant as EO is spatiotemporally sparse due to low revisiting times or weather phenomena like cloud coverage. Vice versa, for computer vision, EO data is an important playground for the development of new approaches as there is significant publicly available data of very high quality and complexity. The available modalities range from sensors of different satellite missions to relevant complementary information like digital elevation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 231, + 296, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 231, + 296, + 542 + ], + "spans": [ + { + "bbox": [ + 56, + 231, + 296, + 542 + ], + "type": "text", + "content": "In this work, we introduce TerraMind as the first any-to-any generative multimodal model for EO. With TerraMind, we introduce a dual-scale pretraining on pixel-level and token-level and demonstrate benefits over training primarily on tokens. TerraMind encodes high-level contextual information in tokens to enable correlation learning and scaling, while, additionally capturing important fine-grained representations using pixel-level inputs. During pretraining, TerraMind predicts masked target tokens so that our pretraining objective boils down to a cross-modal patch classification problem that results in high-quality latent spaces. TerraMind is pretrained on a custom global-scale geospatial dataset named TerraMesh with nine million samples that have been aligned spatiotemporally and across modalities [7]. In addition to radar and optical satellite images of the Copernicus Sentinel-1 (S-1) and Sentinel-2 (S-2) missions, our dataset contains task-specific modalities such as land use/land cover (LULC) and normalized difference vegetation index (NDVI) maps, metadata like digital elevation models (DEM) and geographic coordinates, and natural language in the form of captions. To the best of our knowledge, TerraMind represents the first truly generative, multimodal deep learning model for EO. Additionally, in contrast to other recent models that utilize masked autoencoders like [54], contrastive learning, or diffusion techniques, TerraMind uniquely demonstrates benefits of leveraging token-based pretraining for EO." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 546, + 298, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 546, + 298, + 714 + ], + "spans": [ + { + "bbox": [ + 54, + 546, + 298, + 714 + ], + "type": "text", + "content": "We provide an overview of TerraMind's performance in a community-standard benchmark [49] in Figure 2 and highlight the any-to-any generative capabilities of TerraMind in Figure 3. Our key contributions are as follows: (i) We introduce a dual-scale approach for generative multimodal pre-training leveraging data on pixel-level and token-level, which outperforms other fusion approaches and enhances embedding space structures. (ii) We introduce thinking in modalities - similar to chain-of-thought approaches in LLMs - for multi-modal models in EO, demonstrating that infusing generated data during finetuning improves the performance. (iii) We demonstrate that TerraMind outperforms other geospatial foundation models both in unimodal and multimodal settings." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 314, + 71, + 402, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 71, + 402, + 84 + ], + "spans": [ + { + "bbox": [ + 314, + 71, + 402, + 84 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 91, + 555, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 91, + 555, + 426 + ], + "spans": [ + { + "bbox": [ + 313, + 91, + 555, + 426 + ], + "type": "text", + "content": "Computer vision in Earth observation. Computer vision (CV) has significantly advanced EO [76]. Many CV techniques, originally developed for natural image processing, have been adapted to EO [62], often with minimal modifications. A wide range of tasks benefit from these methods, including classification [16], semantic segmentation [72] (e.g., land cover mapping [20, 21]), change detection [59] (e.g., disaster response [19]), object detection [39] (e.g., vessel identification [55]), and regression (e.g., biomass estimation [53]). Deep learning architectures like CNNs [75] and Vision Transformers (ViTs) [17] have demonstrated strong performance, often surpassing traditional remote sensing (RS) methods. However, EO presents unique challenges, including diverse sensor modalities [4] and geospatial heterogeneity [46]. An emerging paradigm in EO is self-supervised learning (SSL) [64] and geospatial foundation models (GFMs) [45], which aim to leverage vast amounts of unlabeled RS data to develop general purpose task models [32]. While off-the-shelf CV models have shown promising results [36], they do not fully exploit the unique characteristics of geospatial data. Many GFMs still rely on generic CV architectures [50], which were not explicitly designed to handle the complexities of EO, such as heterogeneous sensor sources (e.g., optical, radar, DEM) [29], integrated with auxiliary data (e.g., text) [42, 47], and expert knowledge (e.g., prioritizing specific bands or indexes). In this direction, TerraMind better integrates domain-specific properties, developing a customized and expandable multimodal learning strategy." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 427, + 556, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 427, + 556, + 477 + ], + "spans": [ + { + "bbox": [ + 313, + 427, + 556, + 477 + ], + "type": "text", + "content": "Multimodality in CV. Multimodal CV is driven by the integration of diverse data streams [69], such as natural images [74], natural language text [10], temporal video data [58], and weather [70], within large foundation models [8]." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 316, + 495, + 551, + 668 + ], + "blocks": [ + { + "bbox": [ + 316, + 495, + 551, + 668 + ], + "lines": [ + { + "bbox": [ + 316, + 495, + 551, + 668 + ], + "spans": [ + { + "bbox": [ + 316, + 495, + 551, + 668 + ], + "type": "image", + "image_path": "89ab3f4a24a8f0f22c145b98d7aa131473829751dc280f48d7fdfe81a559cdff.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 677, + 557, + 711 + ], + "lines": [ + { + "bbox": [ + 313, + 677, + 557, + 711 + ], + "spans": [ + { + "bbox": [ + 313, + 677, + 557, + 711 + ], + "type": "text", + "content": "Figure 2. TerraMind outperforms other geospatial foundation models on PANGAEA benchmark [49] in finetuning. Performance is measured in mIoU and min-max scaled per dataset." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7384" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 56, + 70, + 556, + 214 + ], + "blocks": [ + { + "bbox": [ + 56, + 70, + 556, + 214 + ], + "lines": [ + { + "bbox": [ + 56, + 70, + 556, + 214 + ], + "spans": [ + { + "bbox": [ + 56, + 70, + 556, + 214 + ], + "type": "image", + "image_path": "26ad38ec634ed36b3b55e5dbf6b8398dd71b67db16bdc57353ddbf06d8c0c07f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 220, + 555, + 243 + ], + "lines": [ + { + "bbox": [ + 55, + 220, + 555, + 243 + ], + "spans": [ + { + "bbox": [ + 55, + 220, + 555, + 243 + ], + "type": "text", + "content": "Figure 3. Chained generation example of TerraMindv1-B starting from either optical, radar, or digital elevation data. Left is input, middle is artificially generated data by TerraMind, right represents ground truths and tokenizer reconstructions, respectively." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 54, + 264, + 296, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 264, + 296, + 407 + ], + "spans": [ + { + "bbox": [ + 54, + 264, + 296, + 407 + ], + "type": "text", + "content": "Starting from the alignment of images and texts [57], these models moved beyond simple feature extraction, towards nuanced contextual understanding. The ability to combine several modalities allows for unprecedented capabilities in complex tasks [30], evidenced by the rapid advancement of multimodal Large Language Models (MLLMs) [30], that excel in tasks such as scene understanding [12], visual question answering [18], and video analysis [24]. Recent advances in architectures [31] and large scale pre-training [11] have enabled the development of models that learn highly effective cross-modal representations [41], which can then be adapted to a wide variety of downstream tasks [66]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 408, + 296, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 408, + 296, + 659 + ], + "spans": [ + { + "bbox": [ + 55, + 408, + 296, + 659 + ], + "type": "text", + "content": "Multimodality in EO. Multimodality in EO originates from data fusion and is typically understood as the integration of SAR and optical data [13, 25, 28, 38] or the combination of optical data with vector data [5]. Some studies have explored alternative combinations of data. In [15], the authors introduce a contrastive framework for comparing RS images and street views. Even different optical sensors can be considered different modalities [48, 61]. Similarly, several multi-view images (i.e. multimodal) datasets [26, 44, 54] are introduced. More recent approaches combined text and images [40], both for discriminative [42] and generative [34] purposes. Lately, different GFMs are trained in a multimodal way [4, 54, 68], still focusing either on a specific set of modalities (e.g., vision [68], [3]) or tasks (e.g., generative [34]). Compared to multi-scale high-quality generation models for optical data, like MetaEarth [71], our approach allows to generate any modality from any other pretraining modality. To the best of our knowledge, no existing model has combined a wide and diverse amount of modalities both for discriminative and generative purposes, as TerraMind does. We provide a comparison in Table 1." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 669, + 111, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 669, + 111, + 681 + ], + "spans": [ + { + "bbox": [ + 55, + 669, + 111, + 681 + ], + "type": "text", + "content": "3. Dataset" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 689, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 713 + ], + "type": "text", + "content": "For the pretraining of TerraMind and its tokenizers, we create a multimodal dataset called TerraMesh [7], which will" + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 315, + 261, + 555, + 475 + ], + "blocks": [ + { + "bbox": [ + 315, + 261, + 555, + 475 + ], + "lines": [ + { + "bbox": [ + 315, + 261, + 555, + 475 + ], + "spans": [ + { + "bbox": [ + 315, + 261, + 555, + 475 + ], + "type": "table", + "html": "
ModelModalitiesAny-to-Any GenerationMulti-Scale Features
RemoteCLIPoptical, textXX
CROMAoptical, radarXX
AnySataerial, optical, radar, NAIPXX
DeCURoptical, radarXX
DOFAoptical, radar, hyperspectral, NAIPXX
MetaEarthoptical (unimodal)X
Galileooptical, radar, elevation, weather, location, population, ...X
TerraMindoptical, radar, land use, elevation, vegetation index, location, text
", + "image_path": "e99accac7b0552322e0145e2b418e3c374d12d577dbd6d217a88426840df1874.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 483, + 555, + 507 + ], + "lines": [ + { + "bbox": [ + 313, + 483, + 555, + 507 + ], + "spans": [ + { + "bbox": [ + 313, + 483, + 555, + 507 + ], + "type": "text", + "content": "Table 1. Comparison of TerraMind to other model architectures. TerraMind represents a first-of-its-kind generative, multimodal model." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 532, + 555, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 532, + 555, + 592 + ], + "spans": [ + { + "bbox": [ + 313, + 532, + 555, + 592 + ], + "type": "text", + "content": "be open-sourced to the community. TerraMesh builds on existing datasets, which we expand by adding modalities from external data sources or by applying pseudo-labeling. We provide an overview of the aligned image modalities and a detailed dataset description in the supplementary material." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 594, + 556, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 594, + 556, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 594, + 556, + 713 + ], + "type": "text", + "content": "Base datasets. TerraMesh is based on SSL4EO-S12 [6, 65] and MajorTOM-Core [23], two unlabeled remote sensing datasets containing co-aligned radar and optical imagery from Sentinel-1 and Sentinel-2 satellites. SSL4EO-S12 has lower geographic coverage but is multi-seasonal. MajorTOM-Core covers most of the Earth's land surface at a single timestamp. For MajorTOM-Core, we apply a subsampling scheme based on LULC classes and ecoregions. TerraMesh includes a total of approximately 9 million globally distributed samples from both Sentinel-1 and Sentinel-2," + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7385" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 262, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 262, + 83 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 262, + 83 + ], + "type": "text", + "content": "each measuring " + }, + { + "bbox": [ + 55, + 72, + 262, + 83 + ], + "type": "inline_equation", + "content": "264 \\times 264" + }, + { + "bbox": [ + 55, + 72, + 262, + 83 + ], + "type": "text", + "content": " pixels at " + }, + { + "bbox": [ + 55, + 72, + 262, + 83 + ], + "type": "inline_equation", + "content": "10\\mathrm{m}" + }, + { + "bbox": [ + 55, + 72, + 262, + 83 + ], + "type": "text", + "content": " resolution." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 84, + 297, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 84, + 297, + 277 + ], + "spans": [ + { + "bbox": [ + 55, + 84, + 297, + 277 + ], + "type": "text", + "content": "Additional modalities. We obtain co-aligned yearly LULC maps by ESRI with nine land use classes. Additionally, we leverage SEnSeI v2 [22] as a cloud and ice annotation model and update the ESRI LULC classes for better spatiotemporal alignment. NDVI maps are computed using the corresponding spectral bands from Sentinel-2. DEM is extracted from the Copernicus DEM 30m dataset [2], which provides global coverage of the Earth's elevation at a 30m resolution. Captions are generated synthetically by constructing RGB images from Sentinel-2 patches using the corresponding spectral bands and processing them with LLaVANext [37]. A tailored prompt guides the model to describe the content of each image as described in [47]. For geolocations, we round latitude and longitude from the center of each patch to the nearest quarter degree and store the discretized coordinates as strings in a pre-defined format." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 288, + 117, + 300 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 288, + 117, + 300 + ], + "spans": [ + { + "bbox": [ + 55, + 288, + 117, + 300 + ], + "type": "text", + "content": "4. Methods" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 308, + 296, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 308, + 296, + 369 + ], + "spans": [ + { + "bbox": [ + 55, + 308, + 296, + 369 + ], + "type": "text", + "content": "TerraMind pretraining is two-staged following [52]. We first pretrain unimodal tokenizer models, tokenize the modalities, and then leverage token-level and pixel-level input to pretrain the TerraMind encoder-decoder architecture. We describe those individual stages in the following." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 376, + 139, + 388 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 376, + 139, + 388 + ], + "spans": [ + { + "bbox": [ + 55, + 376, + 139, + 388 + ], + "type": "text", + "content": "4.1. Tokenization" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 394, + 296, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 394, + 296, + 478 + ], + "spans": [ + { + "bbox": [ + 55, + 394, + 296, + 478 + ], + "type": "text", + "content": "We develop modality-specific tokenizers to encode each modality into a sequence of discrete tokens for pretraining and decode token sequences back to images. Thus, TerraMind is in principle compatible with any modality, as long as it can be tokenized and aligned with other modalities. For reasons of space, we delegate most experiments related to the tokenizer performances to the supplementary material." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 479, + 296, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 479, + 296, + 694 + ], + "spans": [ + { + "bbox": [ + 55, + 479, + 296, + 694 + ], + "type": "text", + "content": "Image-like modalities. We train autoencoder-based architectures with a quantization step in the bottleneck for image-like modalities such as S-1, S-2, LULC, NDVI, and DEM. Tokenizer encoders process an input image and generate a latent representation for each " + }, + { + "bbox": [ + 55, + 479, + 296, + 694 + ], + "type": "inline_equation", + "content": "16 \\times 16" + }, + { + "bbox": [ + 55, + 479, + 296, + 694 + ], + "type": "text", + "content": " patch, which is then discretized with finite-scalar-quantization (FSQ) [51] into one of " + }, + { + "bbox": [ + 55, + 479, + 296, + 694 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 55, + 479, + 296, + 694 + ], + "type": "text", + "content": " codewords. All tokenizers use a vocabulary size of 16K besides the simpler LULC modality for which we use 4K. These codewords are then used by the diffusion decoder to reconstruct the original image. The benefit of leveraging diffusion decoders lies in facilitating cross-modal generation in TerraMind by transforming tokens back into images. By mapping each codeword to a unique integer in " + }, + { + "bbox": [ + 55, + 479, + 296, + 694 + ], + "type": "inline_equation", + "content": "\\{0, 1, \\dots, N - 1\\}" + }, + { + "bbox": [ + 55, + 479, + 296, + 694 + ], + "type": "text", + "content": ", we obtain discrete tokens for each image patch. We pretrain the tokenizers in a self-supervised setting. FSQ as quantization method enhances training stability [51] compared to vector quantization [63] by eliminating the need for codebook-related loss terms. Notably, FSQ is" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 72, + 556, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 556, + 240 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 556, + 240 + ], + "type": "text", + "content": "heavily influenced by ideas of neural compression [27]. For example, on 12-band S-2 images, we achieve compression rates of over " + }, + { + "bbox": [ + 313, + 72, + 556, + 240 + ], + "type": "inline_equation", + "content": "3000\\mathrm{x}" + }, + { + "bbox": [ + 313, + 72, + 556, + 240 + ], + "type": "text", + "content": " by applying quantization. We summarize the architecture of our tokenizers in Figure 4. The main objective of the overall tokenizer is to encode image patches consistently into discrete tokens based on semantic similarity to enable cross-modal correlation learning. Therefore, the loss of some details is an expected trade-off, since the focus is on grouping similar patches rather than preserving all fine-grained features. Naturally, more accurate reconstructions facilitate cross-modal generation, however the main focus of the pretraining lies on consistent cross-modal correlation learning. We provided further details on the pretraining of the tokenizers in the supplementary material." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 318, + 252, + 555, + 330 + ], + "blocks": [ + { + "bbox": [ + 318, + 252, + 555, + 330 + ], + "lines": [ + { + "bbox": [ + 318, + 252, + 555, + 330 + ], + "spans": [ + { + "bbox": [ + 318, + 252, + 555, + 330 + ], + "type": "image", + "image_path": "71da8b1dfb813f0f4d5e280206571aa38f59c2eb458f021352749f9465058d54.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 342, + 556, + 365 + ], + "lines": [ + { + "bbox": [ + 313, + 342, + 556, + 365 + ], + "spans": [ + { + "bbox": [ + 313, + 342, + 556, + 365 + ], + "type": "text", + "content": "Figure 4. Tokenizer for image-like modalities combining finite-scalar quantization [51] with diffusion decoding." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 377, + 556, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 377, + 556, + 475 + ], + "spans": [ + { + "bbox": [ + 313, + 377, + 556, + 475 + ], + "type": "text", + "content": "Sequence-like modalities. We treat both captions and geolocations as text and use a single text tokenizer to process both modalities. By discretizing the geographic coordinates and representing them as strings, we introduce special coordinate tokens into the vocabulary. This allows us to encode geolocations as a sequence of discrete tokens, beginning with a latitude token followed by a longitude token. For textual data, we modify the existing WordPiece tokenizer [33]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 480, + 395, + 493 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 480, + 395, + 493 + ], + "spans": [ + { + "bbox": [ + 313, + 480, + 395, + 493 + ], + "type": "text", + "content": "4.2. Pre-training" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 498, + 556, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 498, + 556, + 594 + ], + "spans": [ + { + "bbox": [ + 313, + 498, + 556, + 594 + ], + "type": "text", + "content": "Architecture. TerraMind uses a symmetric Transformer-based encoder-decoder architecture proposed in [52], which is designed to process sequences of multimodal tokens. In addition to discrete tokens, TerraMind accepts pixel-level inputs, specifically satellite imagery and digital elevation maps. For pixel-level inputs, we apply learnable patch-wise linear projections to generate patch embeddings for each " + }, + { + "bbox": [ + 313, + 498, + 556, + 594 + ], + "type": "inline_equation", + "content": "16 \\times 16" + }, + { + "bbox": [ + 313, + 498, + 556, + 594 + ], + "type": "text", + "content": " patch, similar to the approach used in ViT [17]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 594, + 557, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 594, + 557, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 594, + 557, + 713 + ], + "type": "text", + "content": "Dual-scale early fusion. In contrast to [52], we not only embed token-level data but additionally leverage pixel-level data across a range of input modalities to introduce a dual-scale feature representation to support the structuring of the embedding space. Both tokens and patches represent a 16x16 pixel area. Tokens represent this area via a single discrete integer value, while the image patches describe the same area with the actual floating point sensor data. Thus, during pretraining, the model not only learns a correlation between modalities (i.e., cross-modal learning) but also between dif" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 56, + 704, + 259, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 704, + 259, + 712 + ], + "spans": [ + { + "bbox": [ + 56, + 704, + 259, + 712 + ], + "type": "text", + "content": "https://planetarycomputer.microsoft.com/dataset/io-lulc-annual-v02" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "7386" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 296, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 296, + 193 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 296, + 193 + ], + "type": "text", + "content": "ferent levels of abstraction within the same modality. The low-level token information enables cross-modal correlation learning, while adding pixel level input accounts for spatial nuances. Based on dual-scale features the model further learns to better structure pixel-level data in the embedding space via the corresponding information from the discrete token. We illustrate the pretraining paradigm in Figure 5. The model is agnostic to processing tokens or patches in the input space, while the target is generally token-level data. We use six pixel-level modalities and eight token-level modalities." + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 56, + 203, + 289, + 290 + ], + "blocks": [ + { + "bbox": [ + 56, + 203, + 289, + 290 + ], + "lines": [ + { + "bbox": [ + 56, + 203, + 289, + 290 + ], + "spans": [ + { + "bbox": [ + 56, + 203, + 289, + 290 + ], + "type": "image", + "image_path": "e0986ca258c6aa8b5d527085e8ce35c951950eed3bc2982fada04e93f0661922.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 303, + 296, + 336 + ], + "lines": [ + { + "bbox": [ + 55, + 303, + 296, + 336 + ], + "spans": [ + { + "bbox": [ + 55, + 303, + 296, + 336 + ], + "type": "text", + "content": "Figure 5. Illustration of the pre-training task. Given an encoded multimodal sample of random subsets of patches and input tokens, the decoder predicts target tokens for the masked input." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 347, + 296, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 347, + 296, + 418 + ], + "spans": [ + { + "bbox": [ + 55, + 347, + 296, + 418 + ], + "type": "text", + "content": "Masking strategy. TerraMind applies a masked modeling approach in the token space following [52]. The model leverages a set of randomly selected target tokens that have to be reconstructed from a randomly selected set of input tokens and pixel-level data. During pre-training, we sample input and target data from a Dirichlet distribution." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 419, + 296, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 419, + 296, + 598 + ], + "spans": [ + { + "bbox": [ + 55, + 419, + 296, + 598 + ], + "type": "text", + "content": "We opt for masked token reconstruction to familiarize the model with the absence of entire modalities, which is crucial for a high usability of a multimodal model in Earth observation. During pre-training, the model learns an internal representation of unseen modalities which is expected to benefit a range of downstream applications. In addition, sampling input and target tokens improves the computational efficiency of the pre-training, as each token is a compressed representation of a patch with compression factors of between 250x and 3000x depending on the modality. Finally, without tokenized representations of the image-like modalities, it is challenging to learn the correlation to sequence-like modalities. The overall training objective of TerraMind boils down to a cross-modal patch-level classification problem optimized via a cross entropy loss:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 125, + 605, + 296, + 637 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 605, + 296, + 637 + ], + "spans": [ + { + "bbox": [ + 125, + 605, + 296, + 637 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {C E}} = - \\sum_ {i = 1} ^ {N} y _ {i} \\log \\left(p _ {i}\\right), \\tag {1}", + "image_path": "5c964073da44bbe5bd363bfad93b2c657fb846153d402f0c333cc6ceb4c0c0de.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 642, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 642, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 642, + 296, + 715 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 642, + 296, + 715 + ], + "type": "inline_equation", + "content": "y_{i}" + }, + { + "bbox": [ + 55, + 642, + 296, + 715 + ], + "type": "text", + "content": " is the one-hot encoded true class of token " + }, + { + "bbox": [ + 55, + 642, + 296, + 715 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 55, + 642, + 296, + 715 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 642, + 296, + 715 + ], + "type": "inline_equation", + "content": "p_{i}" + }, + { + "bbox": [ + 55, + 642, + 296, + 715 + ], + "type": "text", + "content": " is the predicted probability for token " + }, + { + "bbox": [ + 55, + 642, + 296, + 715 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 55, + 642, + 296, + 715 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 642, + 296, + 715 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 55, + 642, + 296, + 715 + ], + "type": "text", + "content": " is the total number of possible tokens. Interestingly, we can infer an upper bound loss for a random model where the cross entropy loss will collapse to the natural logarithm of the vocabulary size " + }, + { + "bbox": [ + 55, + 642, + 296, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{CE,random}} = -\\sum_{i=1}^{N} y_{i} \\log \\left( \\frac{1}{N} \\right) = \\log(N)" + }, + { + "bbox": [ + 55, + 642, + 296, + 715 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 72, + 555, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 555, + 348 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 555, + 348 + ], + "type": "text", + "content": "Scaling. We trained three versions of TerraMind scaling across model size, compute, and data. In addition, we pretrain different versions of TerraMind with respect to the number of dual-scale features. TerraMindv1-B is pre-trained on 500B tokens for 6 days on 32 NVIDIA A100 GPUs. The model uses dual-scale features from both token-level and pixel-level. During initial experiments, we observed significant improvements from scaling model size when switching from a tiny backbone to a small backbone to a base backbone. Therefore, we pre-trained TerraMindv1-L on a large backbone with 500B tokens on 32 NVIDIA A100 GPUs trained for 10 days. Finally, to better understand the effect of scaling across the dual-scale feature representation, we pre-train TerraMindv1-B-single as a single-scale model on primarily token-level data with optical S-2 L2A data as only pixel-level input (compared to pixel-level S-2 L1C, S-2 RGB, S-1 GRD, S-1 RTC, and DEM in TerraMindv1-B and -L). TerraMindv1-B-single is pretrained on 500B tokens from over one million samples for 6 days on 32 NVIDIA A100 GPUs. We summarize the scaling behavior in model size, compute, and data in Figure 9 of the supplementary material. We additionally provide final validation losses in Table 9 comparing v1-B and v1-L with the theoretical random loss." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 357, + 390, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 357, + 390, + 369 + ], + "spans": [ + { + "bbox": [ + 313, + 357, + 390, + 369 + ], + "type": "text", + "content": "4.3. Generation" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 374, + 556, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 374, + 556, + 578 + ], + "spans": [ + { + "bbox": [ + 313, + 374, + 556, + 578 + ], + "type": "text", + "content": "Once pretrained, TerraMind can generate tokens for any modality, conditioned on any subset of input modalities. The generative capabilities unlock various zero-shot tasks, such as water body segmentation. For the generation of image-like modalities, the decoder receives mask tokens for the modality to be generated and predicts the corresponding tokens based on the encoded input. For sequence-like modalities, the decoder generates the output autoregressively. After generating tokens from the target modality, the corresponding tokenizer decoder allows to map from token-space to image or text space. TerraMind further supports chained generation which ensures consistency across generated modalities. The chained generation represents a conditional probability distribution where the prior probability distribution is determined by the input modality, and all subsequent modalities are generated conditioned on the input modality and potentially other generated modalities." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 599, + 446, + 612 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 599, + 446, + 612 + ], + "spans": [ + { + "bbox": [ + 313, + 599, + 446, + 612 + ], + "type": "text", + "content": "4.4. Thinking-in-Modalities" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 617, + 556, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 556, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 556, + 713 + ], + "type": "text", + "content": "Thinking in Modalities (TiM) is a recursive fine-tuning and inference technique designed to enhance multimodal learning by leveraging the generative capabilities of the model itself. Given an input " + }, + { + "bbox": [ + 313, + 617, + 556, + 713 + ], + "type": "inline_equation", + "content": "x \\in \\mathcal{X}" + }, + { + "bbox": [ + 313, + 617, + 556, + 713 + ], + "type": "text", + "content": " (e.g., an optical satellite image), the model first generates additional synthetic modalities " + }, + { + "bbox": [ + 313, + 617, + 556, + 713 + ], + "type": "inline_equation", + "content": "\\tilde{x} = f_{\\mathrm{gen}}(x)" + }, + { + "bbox": [ + 313, + 617, + 556, + 713 + ], + "type": "text", + "content": " on a token-level using a learned generative function " + }, + { + "bbox": [ + 313, + 617, + 556, + 713 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{gen}}" + }, + { + "bbox": [ + 313, + 617, + 556, + 713 + ], + "type": "text", + "content": ". These generated tokens are then concatenated with the original input and jointly processed by the downstream" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7387" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 296, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 296, + 156 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 296, + 156 + ], + "type": "text", + "content": "model " + }, + { + "bbox": [ + 55, + 72, + 296, + 156 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 55, + 72, + 296, + 156 + ], + "type": "text", + "content": " (e.g., TerraMind encoder with a segmentation head), yielding the final output " + }, + { + "bbox": [ + 55, + 72, + 296, + 156 + ], + "type": "inline_equation", + "content": "y = f(x, f_{\\mathrm{gen}}(x))" + }, + { + "bbox": [ + 55, + 72, + 296, + 156 + ], + "type": "text", + "content": ". This formulation allows the model to reason over both observed and inferred modalities, effectively enriching the input space. TiM can leverage multiple generated modalities which are then generated in a chained approach. For example, for " + }, + { + "bbox": [ + 55, + 72, + 296, + 156 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 55, + 72, + 296, + 156 + ], + "type": "text", + "content": " modalities, the input is augmented with newly generated modalities:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 118, + 163, + 296, + 178 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 163, + 296, + 178 + ], + "spans": [ + { + "bbox": [ + 118, + 163, + 296, + 178 + ], + "type": "interline_equation", + "content": "\\tilde {x} ^ {(k + 1)} = \\tilde {x} ^ {(k)} \\cup f _ {\\text {g e n}} (\\tilde {x} ^ {(k)}), \\tag {2}", + "image_path": "60b6eb23b3dd22e279e951a2d476553e023a1c300c717e47891d033a65012a8c.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 186, + 228, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 186, + 228, + 198 + ], + "spans": [ + { + "bbox": [ + 55, + 186, + 228, + 198 + ], + "type": "text", + "content": "and the final model output is described by:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 146, + 205, + 296, + 220 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 205, + 296, + 220 + ], + "spans": [ + { + "bbox": [ + 146, + 205, + 296, + 220 + ], + "type": "interline_equation", + "content": "y = f \\left(\\tilde {x} ^ {(K)}\\right). \\tag {3}", + "image_path": "0ccb2554b06baa9d62c6a74f70c7943a8df9804907704db9b3a3219bda8f6be8.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 227, + 297, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 227, + 297, + 264 + ], + "spans": [ + { + "bbox": [ + 55, + 227, + 297, + 264 + ], + "type": "text", + "content": "This recursive augmentation mimics a chain-of-thought process, enabling the model to iteratively refine its internal representation, particularly in scenarios with missing modalities." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 276, + 137, + 289 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 276, + 137, + 289 + ], + "spans": [ + { + "bbox": [ + 55, + 276, + 137, + 289 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 296, + 296, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 296, + 296, + 333 + ], + "spans": [ + { + "bbox": [ + 55, + 296, + 296, + 333 + ], + "type": "text", + "content": "In this section, we describe the performance gains resulting from TerraMind and experiment with the unlocked capabilities of any-to-any generation and Thinking-in-Modalities." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 339, + 201, + 352 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 339, + 201, + 352 + ], + "spans": [ + { + "bbox": [ + 55, + 339, + 201, + 352 + ], + "type": "text", + "content": "5.1. Foundational experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 356, + 296, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 356, + 296, + 487 + ], + "spans": [ + { + "bbox": [ + 55, + 356, + 296, + 487 + ], + "type": "text", + "content": "Multimodality vs. unimodality. As a first motivational experiment, we outline the benefit of using multimodal data in Earth observation at the example of water body mapping. Specifically, we leverage the ViT-B encoders from the unimodal tokenizer models for S-1, S-2, and LULC, concatenate their embeddings, and train a segmentation head with four ConvNeXt [43] blocks as a late fusion approach. The results in Table 2 (left) suggest that regardless of which modalities we combine, the combination of two modalities always outperforms each unimodal model. Combining all three modalities achieves the best overall performance." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 78, + 497, + 273, + 591 + ], + "blocks": [ + { + "bbox": [ + 78, + 497, + 273, + 591 + ], + "lines": [ + { + "bbox": [ + 78, + 497, + 273, + 591 + ], + "spans": [ + { + "bbox": [ + 78, + 497, + 273, + 591 + ], + "type": "table", + "html": "
InputLate fusionToken-level fusion
S-161.0163.94 (2.93pp↑)
S-272.7076.32 (3.62pp↑)
LULC71.7770.96 (0.81pp↓)
S-1 + S-273.8376.74 (2.91pp↑)
S-1 + LULC73.8673.76 (0.10pp↓)
S-2 + LULC75.6577.04 (1.39pp↑)
S-1 + S-2 + LULC76.0076.88 (0.88pp↑)
", + "image_path": "4a94b084cb60ebef806fd521fdeb71037ea6eccbcf9986eac6b25c653d4d8bbd.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "content": "Token-level fusion vs. late fusion. In Table 2 (right), we investigate the effects of fusing the inputs on a token level" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 72, + 556, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 556, + 168 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 556, + 168 + ], + "type": "text", + "content": "through masked token reconstruction. We observe that token-level fusion outperforms late fusion. The performance gains are particularly high when LULC data is not available. This suggests that early fusion captures an internal representation of the multimodal state—especially pronounced for LULC—that benefits fine-tuning. With those findings in mind, we will explore the effects of using additional multi-modal pixel-level input in a dual-scale pretraining in Section 5.5." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 186, + 450, + 199 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 186, + 450, + 199 + ], + "spans": [ + { + "bbox": [ + 313, + 186, + 450, + 199 + ], + "type": "text", + "content": "5.2. Generation experiments" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 207, + 556, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 207, + 556, + 507 + ], + "spans": [ + { + "bbox": [ + 313, + 207, + 556, + 507 + ], + "type": "text", + "content": "TerraMind supports any-to-any generation. In the following, we provide examples of the generation performance starting from: (i) an information-rich modality, like optical S-2 L2A data, and (ii) minimal information based on the geolocation. In Figure 3, we observe that TerraMind performs strongly in generating image-like modalities like S-1, LULC, and DEM from optical S-2 L2A data. We provide a quantitative overview on the quality of the generations on unseen validation data in Table 3. Overall, we observe an interesting asymmetry in the generative performance of TerraMind where (a) radar-to-optical generation achieves reasonable quality in terms of SSIM and PSNR – indicating structural and visual fidelity with some perceptual degradation – and (b) optical-to-radar generation yields higher PSNR values but lower SSIM, suggesting visually plausible outputs that lack strong structural alignment. The quality of generated DEM suggests to be structurally very strong, but noisy. The errors for DEM generations suggest that the level of altitude is difficult to infer for the model. We compare these scores with the reconstruction quality of the auto-encoding tokenizers in the supplementary material that can serve as upper bounds. Additionally, we provide experiments on the generation quality using token-level instead of pixel-level inputs. Finally, we demonstrate the quality of generations at kilometer scale in Figures 19 and 20." + } + ] + } + ], + "index": 14 + }, + { + "type": "table", + "bbox": [ + 318, + 523, + 552, + 642 + ], + "blocks": [ + { + "bbox": [ + 55, + 600, + 296, + 677 + ], + "lines": [ + { + "bbox": [ + 55, + 600, + 296, + 677 + ], + "spans": [ + { + "bbox": [ + 55, + 600, + 296, + 677 + ], + "type": "text", + "content": "Table 2. Water body mapping on Sen1Floods11 [9] measured in IoU on water class. Model sizes and architectures are comparable. Left column: Late fusion of tokenizers. The average improvement of full multimodality over the individual unimodal performance is 7.5pp IoU. Right column: Finetuning results of TerraMindv1-B-single as a mid fusion approach based on masked correlation learning. Gains over late fusion in percentage points in parentheses." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 318, + 523, + 552, + 642 + ], + "lines": [ + { + "bbox": [ + 318, + 523, + 552, + 642 + ], + "spans": [ + { + "bbox": [ + 318, + 523, + 552, + 642 + ], + "type": "table", + "html": "
ModalitiesMAE↓RMSE↓SSIM↑PSNR↑
S-1 GRD → S-2 L2A0.0740.1160.75026.210
S-1 GRD → DEM163.0320.80.87820.694
S-1 GRD → NDVI0.1800.2250.43818.990
S-1 RTC → S-2 L2A0.1130.1940.69524.251
S-1 RTC → DEM298.8799.20.87320.009
S-1 RTC → NDVI0.1720.2110.46519.529
S-2 L2A → S-1 GRD2.9423.8770.53128.678
S-2 L2A → S-1 RTC2.6363.3910.43028.993
S-2 L2A → DEM215.8745.50.94220.616
", + "image_path": "8d44d87fb7279771d4eb4bbf789b50b91b744118f08d738966d3b9c0de1324f7.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 650, + 556, + 684 + ], + "lines": [ + { + "bbox": [ + 313, + 650, + 556, + 684 + ], + "spans": [ + { + "bbox": [ + 313, + 650, + 556, + 684 + ], + "type": "text", + "content": "Table 3. Quantitative evaluation of generations on unseen global validation dataset using 10 diffusion steps. MAE and RMSE metrics are in physical units: meter (DEM), reflectance (S-2), and db (S-1)." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7388" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 72, + 171, + 131 + ], + "blocks": [ + { + "bbox": [ + 59, + 72, + 171, + 131 + ], + "lines": [ + { + "bbox": [ + 59, + 72, + 171, + 131 + ], + "spans": [ + { + "bbox": [ + 59, + 72, + 171, + 131 + ], + "type": "image", + "image_path": "ecd65734e16cd2da1375f189a89942b655bab4f6d4ea51b928ebdd4441055148.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 56, + 134, + 172, + 153 + ], + "lines": [ + { + "bbox": [ + 56, + 134, + 172, + 153 + ], + "spans": [ + { + "bbox": [ + 56, + 134, + 172, + 153 + ], + "type": "text", + "content": "(a) Input: S-2 L2A data capturing Singapore in January 2025." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 181, + 72, + 292, + 131 + ], + "blocks": [ + { + "bbox": [ + 181, + 72, + 292, + 131 + ], + "lines": [ + { + "bbox": [ + 181, + 72, + 292, + 131 + ], + "spans": [ + { + "bbox": [ + 181, + 72, + 292, + 131 + ], + "type": "image", + "image_path": "93cf79200582ee94491ba9ee7aa21762717149e729068fe33c90012c0c793f3d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 179, + 134, + 295, + 153 + ], + "lines": [ + { + "bbox": [ + 179, + 134, + 295, + 153 + ], + "spans": [ + { + "bbox": [ + 179, + 134, + 295, + 153 + ], + "type": "text", + "content": "(b) Generation: S-1 RTC composition generated by TerraMind." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 58, + 165, + 171, + 234 + ], + "blocks": [ + { + "bbox": [ + 58, + 165, + 171, + 234 + ], + "lines": [ + { + "bbox": [ + 58, + 165, + 171, + 234 + ], + "spans": [ + { + "bbox": [ + 58, + 165, + 171, + 234 + ], + "type": "image", + "image_path": "a9582e25b72bba7331bcf2355f6ee19a7a7832cdd841268bd8932a766d10c44f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 56, + 237, + 172, + 256 + ], + "lines": [ + { + "bbox": [ + 56, + 237, + 172, + 256 + ], + "spans": [ + { + "bbox": [ + 56, + 237, + 172, + 256 + ], + "type": "text", + "content": "(c) Input: S-2 L2A data capturing Northern Spain in January 2025." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 181, + 166, + 292, + 233 + ], + "blocks": [ + { + "bbox": [ + 181, + 166, + 292, + 233 + ], + "lines": [ + { + "bbox": [ + 181, + 166, + 292, + 233 + ], + "spans": [ + { + "bbox": [ + 181, + 166, + 292, + 233 + ], + "type": "image", + "image_path": "2339bc758ec8ffc0eb95784cdb7350fbdf51c620f199fe7eeeba6e7f859f7db4.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 179, + 237, + 295, + 256 + ], + "lines": [ + { + "bbox": [ + 179, + 237, + 295, + 256 + ], + "spans": [ + { + "bbox": [ + 179, + 237, + 295, + 256 + ], + "type": "text", + "content": "(d) Generation: S-1 GRD composition generated by TerraMind." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 55, + 266, + 295, + 289 + ], + "lines": [ + { + "bbox": [ + 55, + 266, + 295, + 289 + ], + "spans": [ + { + "bbox": [ + 55, + 266, + 295, + 289 + ], + "type": "text", + "content": "Figure 6. Generated S-1 imagery using TerraMind. We provide large-scale visualizations in the supplementary material." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 308, + 184, + 320 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 308, + 184, + 320 + ], + "spans": [ + { + "bbox": [ + 55, + 308, + 184, + 320 + ], + "type": "text", + "content": "5.3. Zero-shot experiments" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 326, + 296, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 326, + 296, + 385 + ], + "spans": [ + { + "bbox": [ + 55, + 326, + 296, + 385 + ], + "type": "text", + "content": "Based on its generative capabilities, TerraMind unlocks several zero-shot applications, like land-use segmentation, water body mapping, geo-localization, and vegetation mapping. In the following, we focus on water body mapping and geo-localization as image- and sequence-level zero-shot tasks." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 386, + 296, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 386, + 296, + 529 + ], + "spans": [ + { + "bbox": [ + 55, + 386, + 296, + 529 + ], + "type": "text", + "content": "Water body mapping. In Table 4, we compare the zero-shot performance of TerraMind with its fine-tuned performance and other finetuned benchmarks for water body mapping. Overall, TerraMindv1-B achieves a zero-shot IoU of " + }, + { + "bbox": [ + 55, + 386, + 296, + 529 + ], + "type": "inline_equation", + "content": "45.4\\%" + }, + { + "bbox": [ + 55, + 386, + 296, + 529 + ], + "type": "text", + "content": " compared to SOTA-level fine-tuning performance of " + }, + { + "bbox": [ + 55, + 386, + 296, + 529 + ], + "type": "inline_equation", + "content": "82.2\\%" + }, + { + "bbox": [ + 55, + 386, + 296, + 529 + ], + "type": "text", + "content": " of DeCUR. In ablations with TerraMindv1-B-single trained on DynamicWorld LULC data, we boost this to up to " + }, + { + "bbox": [ + 55, + 386, + 296, + 529 + ], + "type": "inline_equation", + "content": "69.8\\%" + }, + { + "bbox": [ + 55, + 386, + 296, + 529 + ], + "type": "text", + "content": " suggesting that TerraMind harnesses up to over " + }, + { + "bbox": [ + 55, + 386, + 296, + 529 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 55, + 386, + 296, + 529 + ], + "type": "text", + "content": " of the SOTA performance in zero-shot setting. Additionally, it's notable that none of the benchmarking model can be applied in a zero-shot context, highlighting the relevance of TerraMind's capabilities." + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 80, + 538, + 272, + 634 + ], + "blocks": [ + { + "bbox": [ + 80, + 538, + 272, + 634 + ], + "lines": [ + { + "bbox": [ + 80, + 538, + 272, + 634 + ], + "spans": [ + { + "bbox": [ + 80, + 538, + 272, + 634 + ], + "type": "table", + "html": "
ModelInputTypeIoUWater
TerraMindv1-BS-2zero-shot45.40
TerraMindv1-B-singleS-2zero-shot69.75
Prithvi 2.0 / DeCUR / ...zero-shotN/A
Baseline [9]S-2finetune31.25
Prithvi 2.0 300MS-2finetune80.97
DeCURS-2finetune82.17
", + "image_path": "5e41b8de076f12a79156ced1ee4defed1e0191d11046bc88d737cdc73d98364e.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 55, + 677, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 677, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 296, + 713 + ], + "type": "text", + "content": "Geo-localization. TerraMind is able to predict the geolocation of a specific data instance. To better visualize the geolocation capabilities, we prompt the model for the most" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 72, + 554, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 554, + 131 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 554, + 131 + ], + "type": "text", + "content": "likely locations of the land use class \"bare land\" (deserts etc.) in a Monte-Carlo-sampling in Figure 7. The probability distribution of the model fits the expectation of where to find bare land, highlighting the Sahara region and middle-east, as well as Mexico and Southern California." + } + ] + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 345, + 142, + 523, + 230 + ], + "blocks": [ + { + "bbox": [ + 345, + 142, + 523, + 230 + ], + "lines": [ + { + "bbox": [ + 345, + 142, + 523, + 230 + ], + "spans": [ + { + "bbox": [ + 345, + 142, + 523, + 230 + ], + "type": "image", + "image_path": "442bc09a3bff1182ff8bb275d44d364622dcffd33c27729448ba71c7b3172b1d.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 241, + 555, + 285 + ], + "lines": [ + { + "bbox": [ + 313, + 241, + 555, + 285 + ], + "spans": [ + { + "bbox": [ + 313, + 241, + 555, + 285 + ], + "type": "text", + "content": "Figure 7. Prediction distribution of the land use class \"bare land\" with a sampling temperature of " + }, + { + "bbox": [ + 313, + 241, + 555, + 285 + ], + "type": "inline_equation", + "content": "T = 1.0" + }, + { + "bbox": [ + 313, + 241, + 555, + 285 + ], + "type": "text", + "content": " using TerraMindv1-B-single. TerraMind has an accurate internal representation of the geolocation of specific contexts, like land use classes." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 308, + 440, + 320 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 308, + 440, + 320 + ], + "spans": [ + { + "bbox": [ + 313, + 308, + 440, + 320 + ], + "type": "text", + "content": "5.4. Few-shot experiments" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 326, + 555, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 326, + 555, + 530 + ], + "spans": [ + { + "bbox": [ + 313, + 326, + 555, + 530 + ], + "type": "text", + "content": "TerraMind is trained via a cross-modal patch classification objective. Thus, we expect a well-structured latent space that clusters different concepts accurately. To investigate our hypothesis, we apply 1-Nearest-Neighbor (1-NN) classification experiments in the community-standard setting of 1-shot 5-way on two datasets: EuroSAT and METER-ML. In those experiments, there are no weight updates of any kind, so that we can assess the quality of the embedding space structure. In Table 5, we observe that TerraMind outperforms several other benchmarks from both the CV and EO domain on the EuroSAT dataset by at least 10pp in accuracy. Our results further show that for methane source classification on METER-ML, TerraMind outperforms benchmark models and generalizes to high-resolution NAIP data with one order of magnitude higher resolution than the pre-training data. We present additional experiments with other few-shot settings in the supplementary material." + } + ] + } + ], + "index": 19 + }, + { + "type": "table", + "bbox": [ + 321, + 540, + 547, + 645 + ], + "blocks": [ + { + "bbox": [ + 55, + 642, + 295, + 665 + ], + "lines": [ + { + "bbox": [ + 55, + 642, + 295, + 665 + ], + "spans": [ + { + "bbox": [ + 55, + 642, + 295, + 665 + ], + "type": "text", + "content": "Table 4. Zero-shot results of TerraMind on water body mapping compared to fine-tuned performance of benchmarks." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 321, + 540, + 547, + 645 + ], + "lines": [ + { + "bbox": [ + 321, + 540, + 547, + 645 + ], + "spans": [ + { + "bbox": [ + 321, + 540, + 547, + 645 + ], + "type": "table", + "html": "
ModelInputEuroSATMETER-ML
CLIP-ViT-B/16S-2 RGB57.0029.15
CLIP-ViT-B/16NAIP-32.01
DeCURS-2 L1C50.5427.87
Prithvi 1.0 100MS-2 L1C60.1126.08
Prithvi 2.0 300MS-2 L1C61.0628.26
TerraMindv1-BS-2 L1C70.8333.90
TerraMindv1-BNAIP-32.23
", + "image_path": "d53fc5b399a97b4b6fdef0fd3e89d1a0c2c282ff6812aac36344421d2cf4f603.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "table_body" + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 654, + 555, + 698 + ], + "lines": [ + { + "bbox": [ + 313, + 654, + 555, + 698 + ], + "spans": [ + { + "bbox": [ + 313, + 654, + 555, + 698 + ], + "type": "text", + "content": "Table 5. 1-shot 5-way classification results on EuroSAT and METER-ML measured in mean accuracy " + }, + { + "bbox": [ + 313, + 654, + 555, + 698 + ], + "type": "inline_equation", + "content": "\\uparrow" + }, + { + "bbox": [ + 313, + 654, + 555, + 698 + ], + "type": "text", + "content": ", averaged over 200 runs. TerraMind outperforms benchmarks from CV and EO domain, suggesting a well-structured latent space." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7389" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 56, + 70, + 553, + 259 + ], + "blocks": [ + { + "bbox": [ + 56, + 70, + 553, + 259 + ], + "lines": [ + { + "bbox": [ + 56, + 70, + 553, + 259 + ], + "spans": [ + { + "bbox": [ + 56, + 70, + 553, + 259 + ], + "type": "table", + "html": "
ModelBurnSr*MADOS*PASTISSen1Fl11FBP*DEN*CTM-SSSN7*AI4Farms*Avg. mIoUAvg. Rank
CROMA82.4267.5532.3290.8951.8338.2949.3859.2825.6555.296.61
DOFA80.6359.5830.0289.3743.1839.2951.3361.8427.0753.598.22
GFM-Swin76.9064.7121.2472.6067.1834.0946.9860.8927.1952.4210.00
Prithvi 1.0 100M83.6249.9833.9390.3746.8127.8643.0756.5426.8651.0011.00
RemoteCLIP76.5960.0018.2374.2669.1931.7852.0557.7625.1251.6611.22
SatlasNet79.9655.8617.5190.3050.9736.3146.9761.8825.1351.6510.67
Scale-MAE76.6857.3224.5574.1367.1935.1125.4262.9621.4749.4311.44
SpectralGPT80.4757.9935.4489.0733.4237.8546.9558.8626.7551.8710.11
S.-S12-MoCo81.5851.7634.4989.2653.0235.4448.5857.6425.3853.0210.06
S.-S12-DINO81.7249.3736.1888.6151.1534.8148.6656.4725.6252.5110.89
S.-S12-MAE81.9149.9032.0387.7951.9234.0845.8057.1324.6951.6912.39
S.-S12-Data2Vec81.9144.3634.3288.1548.8235.9054.0358.2324.2352.2210.72
UNet Baseline84.5154.7931.6091.4260.4739.4647.5762.0946.3457.584.89
ViT Baseline81.5848.1938.5387.6659.3236.8344.0852.5738.3754.1310.28
TerraMindv1-B82.4269.5240.5190.6259.7237.8755.8060.6128.1258.353.94
TerraMindv1-L82.9375.5743.1390.7863.3837.8955.0459.9827.4759.573.44
", + "image_path": "4412d721907d41d67686f46428339dc69d078241be78a050b87e7eaf63439ffb.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 55, + 266, + 555, + 301 + ], + "lines": [ + { + "bbox": [ + 55, + 266, + 555, + 301 + ], + "spans": [ + { + "bbox": [ + 55, + 266, + 555, + 301 + ], + "type": "text", + "content": "Table 6. Performance evaluation of TerraMind using the PANGAEA evaluation protocol indicates higher mIoU values (↑) and lower rank values (↓). The best model per column is highlighted in bold, the second best is underscored. We indicate unimodal datasets with *. Encoders are frozen for pretrained models, while U-Net and ViT baselines are trained from scratch for each specific task." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 320, + 194, + 334 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 320, + 194, + 334 + ], + "spans": [ + { + "bbox": [ + 55, + 320, + 194, + 334 + ], + "type": "text", + "content": "5.5. Fine-tuning experiments" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 339, + 297, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 339, + 297, + 592 + ], + "spans": [ + { + "bbox": [ + 54, + 339, + 297, + 592 + ], + "type": "text", + "content": "Besides the novel capabilities that TerraMind introduces, we benchmark the fine-tuning performance of TerraMind in both unimodal and multimodal settings following the community-standard PANGAEA benchmark [49]. We summarize the results in Table 6. Overall, TerraMindv1-B outperforms all other GeoFMs by at least 3pp avg. mIoU. Importantly, we observe that TerraMind is the only foundation model approach in EO that across the PANGAEA benchmark outperforms task-specific U-Net models. Performance increases by approximately 2pp avg. mIoU for TerraMindv1-L, with a peak of 5pp in multimodal datasets. Furthermore, TerraMindv1-L outperforms also specialised ViT baselines by 5pp avg. mIoU. Note that per suggestion of the PANGAEA authors, we exclude the xView2 and BioMassters task as we could not reproduce the reported performances. Finally, we assess the impact of leveraging multimodal data as input to TerraMindv1-B compared to utilizing either optical or radar data as unimodal input to better understand the effect of leveraging multimodal data in finetuning. We observe that across all three multimodal tasks, TerraMindv1-B performs best with access to both optical and radar data." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 85, + 604, + 267, + 666 + ], + "blocks": [ + { + "bbox": [ + 85, + 604, + 267, + 666 + ], + "lines": [ + { + "bbox": [ + 85, + 604, + 267, + 666 + ], + "spans": [ + { + "bbox": [ + 85, + 604, + 267, + 666 + ], + "type": "table", + "html": "
PASTISSen1F11CTM-SS
S-120.0480.3924.45
S-240.2089.5750.90
S-1 + S-240.5190.6255.80
", + "image_path": "76d0dcb5a3b001a3aee1560a4be356fce95718ddbaf9e018da73acd5d653d386.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 55, + 674, + 296, + 696 + ], + "lines": [ + { + "bbox": [ + 55, + 674, + 296, + 696 + ], + "spans": [ + { + "bbox": [ + 55, + 674, + 296, + 696 + ], + "type": "text", + "content": "Table 7. Benefit of using multimodal input in the PANGAEA benchmark reported in mIoU " + }, + { + "bbox": [ + 55, + 674, + 296, + 696 + ], + "type": "inline_equation", + "content": "(\\%)\\uparrow" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 320, + 443, + 334 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 320, + 443, + 334 + ], + "spans": [ + { + "bbox": [ + 313, + 320, + 443, + 334 + ], + "type": "text", + "content": "5.6. Thinking in modalities" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 338, + 555, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 338, + 555, + 435 + ], + "spans": [ + { + "bbox": [ + 313, + 338, + 555, + 435 + ], + "type": "text", + "content": "We additionally evaluate the value of TiM tuning on water body mapping. We use S-1 or S-2 to generate artificial LULC data as additional input. Our results in Table 8 indicate a superior performance of TiM tuning compared to leveraging uni-modal data by up to 2pp mIoU. This finding points us in the direction of TerraMind being able to generate data that improve downstream task performance. We provide additional results in the appendix." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 315, + 445, + 555, + 518 + ], + "blocks": [ + { + "bbox": [ + 315, + 445, + 555, + 518 + ], + "lines": [ + { + "bbox": [ + 315, + 445, + 555, + 518 + ], + "spans": [ + { + "bbox": [ + 315, + 445, + 555, + 518 + ], + "type": "table", + "html": "
Fine-TuningInputIoUWatermIoU
TerraMindv1-BS-168.0081.06
TerraMindv1-BS-282.2689.70
TerraMindv1-B TiMS-1 + gen. LULC72.2583.65
TerraMindv1-B TiMS-2 + gen. LULC84.7591.14
", + "image_path": "e57d722a55f43bd2375e5d31b0f63658d104c5a8813a596746414f0e63eeafbb.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 313, + 526, + 556, + 549 + ], + "lines": [ + { + "bbox": [ + 313, + 526, + 556, + 549 + ], + "spans": [ + { + "bbox": [ + 313, + 526, + 556, + 549 + ], + "type": "text", + "content": "Table 8. Thinking-in-modalities (TiM) tuning compared with standard full fine-tuning approaches on the Sen1Floods11 dataset." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 573, + 388, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 573, + 388, + 586 + ], + "spans": [ + { + "bbox": [ + 313, + 573, + 388, + 586 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 594, + 556, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 594, + 556, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 594, + 556, + 713 + ], + "type": "text", + "content": "TerraMind's approach of combining token-level and pixel-level data has unlocked a range of new model capabilities in EO. TerraMind demonstrates not only beyond state-of-the-art performance in community-standard benchmarks, it also represents the first fully generative multimodal model in the domain. Because of the ability of integrating heterogeneous data sources, we expect that TerraMind-like models will expand to multi-temporal, multi-resolution, and hyperspectral data to fully leverage the data rich ecosystem available in the Earth Observation domain." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7390" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "spans": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 57, + 91, + 297, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 61, + 91, + 297, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 91, + 297, + 123 + ], + "spans": [ + { + "bbox": [ + 61, + 91, + 297, + 123 + ], + "type": "text", + "content": "[1] A. Hore and D. Ziou. Image quality metrics: PSNR vs. SSIM. In Proc. 20th International Conference on Pattern Recognition (ICPR), pp. 2366-2369, 2010. 16" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 125, + 296, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 125, + 296, + 145 + ], + "spans": [ + { + "bbox": [ + 61, + 125, + 296, + 145 + ], + "type": "text", + "content": "[2] European Space Agency. Copernicus dem. http://dx.doi.org/10.5270/ESA-c5d3d65, 2022.4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 148, + 295, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 148, + 295, + 190 + ], + "spans": [ + { + "bbox": [ + 62, + 148, + 295, + 190 + ], + "type": "text", + "content": "[3] Guillaume Astruc, Nicolas Gonthier, Clement Mallet, and Loic Landrieu. Anysat: An earth observation model for any resolutions, scales, and modalities. arXiv preprint arXiv:2412.14123, 2024. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 192, + 295, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 192, + 295, + 224 + ], + "spans": [ + { + "bbox": [ + 62, + 192, + 295, + 224 + ], + "type": "text", + "content": "[4] Guillaume Astruc, Nicolas Gonthier, Clement Mallet, and Loic Landrieu. Omnisat: Self-supervised modality fusion for earth observation, 2024. 2, 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 225, + 296, + 290 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 225, + 296, + 290 + ], + "spans": [ + { + "bbox": [ + 62, + 225, + 296, + 290 + ], + "type": "text", + "content": "[5] Nicolas Audebert, Bertrand Le Saux, and Sébastien Lefèvre. Joint Learning from Earth Observation and OpenStreetMap Data to Get Faster Better Semantic Maps. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 1552-1560, 2017. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 292, + 296, + 345 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 292, + 296, + 345 + ], + "spans": [ + { + "bbox": [ + 62, + 292, + 296, + 345 + ], + "type": "text", + "content": "[6] Benedikt Blumenstiel, Nassim Ait Ali Braham, Conrad M Albrecht, Stefano Maurogiovanni, and Paolo Fraccaro. SSL4EOS12 v1.1 - A Multimodal, Multiseasonal Dataset for Pretraining. arXiv preprint arXiv:2503.00168, 2025. 3, 13" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 347, + 296, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 347, + 296, + 413 + ], + "spans": [ + { + "bbox": [ + 62, + 347, + 296, + 413 + ], + "type": "text", + "content": "[7] Benedikt Blumenstiel, Paolo Fraccaro, Valerio Marsocci, Johannes Jakubik, Stefano Maurogiovanni, Mikolaj Czerkawski, Rocco Sedona, Gabriele Cavallaro, Thomas Brunschwiler, Juan Bernabe-Moreno, and Nicolas Longépé. Terramesh: A planetary mosaic of multimodal earth observation data. arXiv preprint arXiv:2504.11172, 2025. 2, 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 415, + 296, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 415, + 296, + 468 + ], + "spans": [ + { + "bbox": [ + 62, + 415, + 296, + 468 + ], + "type": "text", + "content": "[8] Rishi Bommasani, Drew A Hudson, Ehsan Adeli, Russ Altman, Simran Arora, Sydney von Arx, Michael S Bernstein, Jeannette Bohg, Antoine Bosselut, Emma Brunskill, et al. On the opportunities and risks of foundation models. arXiv preprint arXiv:2108.07258, 2021. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 470, + 296, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 470, + 296, + 525 + ], + "spans": [ + { + "bbox": [ + 62, + 470, + 296, + 525 + ], + "type": "text", + "content": "[9] Derrick Bonafilia, Beth Tellman, Tyler Anderson, and Erica Issenberg. Sen1floods11: A georeferenced dataset to train and test deep learning flood algorithms for sentinel-1. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2020. 6, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 57, + 525, + 296, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 525, + 296, + 579 + ], + "spans": [ + { + "bbox": [ + 57, + 525, + 296, + 579 + ], + "type": "text", + "content": "[10] Florian Bordes, Richard Yuanzhe Pang, Anurag Ajay, Alexander C Li, Adrien Bardes, Suzanne Petryk, Oscar Manas, Zhiqiu Lin, Anas Mahmoud, Bargav Jayaraman, et al. An introduction to vision-language modeling. arXiv preprint arXiv:2405.17247, 2024. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 57, + 581, + 296, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 581, + 296, + 647 + ], + "spans": [ + { + "bbox": [ + 57, + 581, + 296, + 647 + ], + "type": "text", + "content": "[11] Jize Cao, Zhe Gan, Yu Cheng, Licheng Yu, Yen-Chun Chen, and Jingjing Liu. Behind the scene: Revealing the secrets of pre-trained vision-and-language models. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part VI 16, pages 565-580. Springer, 2020. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 57, + 647, + 296, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 647, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 57, + 647, + 296, + 713 + ], + "type": "text", + "content": "[12] Xu Cao, Tong Zhou, Yunsheng Ma, Wenqian Ye, Can Cui, Kun Tang, Zhipeng Cao, Kaizhao Liang, Ziran Wang, James M Rehg, et al. Maplm: A real-world large-scale vision-language benchmark for map and traffic scene understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21819-21830, 2024. 3" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 555, + 712 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 316, + 73, + 554, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 73, + 554, + 106 + ], + "spans": [ + { + "bbox": [ + 316, + 73, + 554, + 106 + ], + "type": "text", + "content": "[13] Yuxing Chen and Lorenzo Bruzzone. Self-supervised change detection in multi-view remote sensing images. arXiv preprint arXiv:2103.05969, 2021. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 317, + 108, + 555, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 108, + 555, + 174 + ], + "spans": [ + { + "bbox": [ + 317, + 108, + 555, + 174 + ], + "type": "text", + "content": "[14] Chenwei Wang, et al. SAR Target Image Generation Method Using Azimuth-Controllable Generative Adversarial Network. IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing (JSTARS), Vol. 15, 2022. Online: http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=9933645&tag=1.16" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 176, + 555, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 176, + 555, + 209 + ], + "spans": [ + { + "bbox": [ + 316, + 176, + 555, + 209 + ], + "type": "text", + "content": "[15] Fabian Deuser, Konrad Habel, and Norbert Oswald. Sample4geo: Hard negative sampling for cross-view geolocation. arXiv preprint arXiv:2303.11851, 2023. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 317, + 211, + 555, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 211, + 555, + 264 + ], + "spans": [ + { + "bbox": [ + 317, + 211, + 555, + 264 + ], + "type": "text", + "content": "[16] Ivica Dimitrovski, Ivan Kitanovski, Dragi Kocev, and Nikola Simidjievski. Current trends in deep learning for earth observation: An open-source benchmark arena for image classification. ISPRS Journal of Photogrammetry and Remote Sensing, 197:18-35, 2023. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 267, + 555, + 332 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 267, + 555, + 332 + ], + "spans": [ + { + "bbox": [ + 316, + 267, + 555, + 332 + ], + "type": "text", + "content": "[17] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale, 2021. 2, 4" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 335, + 554, + 379 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 335, + 554, + 379 + ], + "spans": [ + { + "bbox": [ + 316, + 335, + 554, + 379 + ], + "type": "text", + "content": "[18] Danny Driess, Fei Xia, Mehdi SM Sajjadi, Corey Lynch, Aakanksha Chowdhery, Ayzaan Wahid, Jonathan Tompson, Quan Vuong, Tianhe Yu, Wenlong Huang, et al. Palm-e: An embodied multimodal language model. 2023. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 381, + 496, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 381, + 496, + 392 + ], + "spans": [ + { + "bbox": [ + 316, + 381, + 496, + 392 + ], + "type": "text", + "content": "[19] Victor Durnov. xview2 1st place solution. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 317, + 395, + 555, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 395, + 555, + 426 + ], + "spans": [ + { + "bbox": [ + 317, + 395, + 555, + 426 + ], + "type": "text", + "content": "[20] Adam Van Etten, Dave Lindenbaum, and Todd M. Bacastow. Spacenet: A remote sensing dataset and challenge series, 2019. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 429, + 554, + 473 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 429, + 554, + 473 + ], + "spans": [ + { + "bbox": [ + 316, + 429, + 554, + 473 + ], + "type": "text", + "content": "[21] Casper Fibaek, Luke Camilleri, Andreas Luyts, Nikolaos Dionelis, and Bertrand Le Saux. PhilEO Bench: Evaluating Geo-Spatial Foundation Models, In Proc. Int Geoscience and Remote Sensing Symposium (IGARSS), 2024. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 475, + 555, + 507 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 475, + 555, + 507 + ], + "spans": [ + { + "bbox": [ + 316, + 475, + 555, + 507 + ], + "type": "text", + "content": "[22] Alistair Francis. Sensor independent cloud and shadow masking with partial labels and multimodal inputs. IEEE Transactions on Geoscience and Remote Sensing, 2024. 4, 13" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 510, + 555, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 510, + 555, + 542 + ], + "spans": [ + { + "bbox": [ + 316, + 510, + 555, + 542 + ], + "type": "text", + "content": "[23] Alistair Francis and Mikolaj Czerkawski. Major tom: Expandable datasets for earth observation. arXiv preprint arXiv:2402.12095, 2024. 3, 13" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 544, + 554, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 544, + 554, + 599 + ], + "spans": [ + { + "bbox": [ + 317, + 544, + 554, + 599 + ], + "type": "text", + "content": "[24] Chaoyou Fu, Yuhan Dai, Yongdong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 601, + 554, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 601, + 554, + 633 + ], + "spans": [ + { + "bbox": [ + 316, + 601, + 554, + 633 + ], + "type": "text", + "content": "[25] Anthony Fuller, Korean Millard, and James R. Green. Croma: Remote sensing representations with contrastive radar-optical masked autoencoders, 2023. 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 636, + 554, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 636, + 554, + 712 + ], + "spans": [ + { + "bbox": [ + 317, + 636, + 554, + 712 + ], + "type": "text", + "content": "[26] Anatol Garioud, Nicolas Gonthier, Loic Landrieu, Apolline De Wit, Marion Valette, Marc Poupee, Sebastien Giordano, and Boris Wattrelos. FLAIR: a country-scale land cover semantic segmentation dataset from multi-source optical imagery. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2023. 3" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "type": "text", + "content": "7391" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 73, + 296, + 714 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 56, + 73, + 296, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 73, + 296, + 127 + ], + "spans": [ + { + "bbox": [ + 56, + 73, + 296, + 127 + ], + "type": "text", + "content": "[27] Carlos Gomes, Isabelle Wittmann, Damien Robert, Johannes Jakubik, Tim Reichelt, Michele Martone, Stefano Maurogiovanni, Rikard Vinge, Jonas Hurst, Erik Scheurer, et al. Lossy neural compression for geospatial analytics: A review. arXiv preprint arXiv:2503.01505, 2025. 4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 128, + 296, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 128, + 296, + 171 + ], + "spans": [ + { + "bbox": [ + 56, + 128, + 296, + 171 + ], + "type": "text", + "content": "[28] Sebastian Hafner, Yifang Ban, and Andrea Nascetti. Unsupervised domain adaptation for global urban extraction using sentinel-1 sar and sentinel-2 msi data. Remote Sensing of Environment, 280:113192, 2022. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 172, + 296, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 172, + 296, + 205 + ], + "spans": [ + { + "bbox": [ + 56, + 172, + 296, + 205 + ], + "type": "text", + "content": "[29] Boran Han, Shuai Zhang, Xingjian Shi, and Markus Reichstein. Bridging remote sensors with multisensor geospatial foundation models, 2024. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 205, + 296, + 260 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 205, + 296, + 260 + ], + "spans": [ + { + "bbox": [ + 56, + 205, + 296, + 260 + ], + "type": "text", + "content": "[30] Soyeon Caren Han, Feiqi Cao, Josiah Poon, and Roberto Navigli. Multimodal large language models and tunings: Vision, language, sensors, audio, and beyond. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 11294-11295, 2024. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 261, + 296, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 261, + 296, + 304 + ], + "spans": [ + { + "bbox": [ + 56, + 261, + 296, + 304 + ], + "type": "text", + "content": "[31] Jitesh Jain, Jianwei Yang, and Humphrey Shi. Vcoder: Versatile vision encoders for multimodal large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 27992-28002, 2024. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 305, + 296, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 305, + 296, + 426 + ], + "spans": [ + { + "bbox": [ + 56, + 305, + 296, + 426 + ], + "type": "text", + "content": "[32] Johannes Jakubik, Sujit Roy, C. E. Phillips, Paolo Fraccaro, Denys Godwin, Bianca Zadrozny, Daniela Szwarcman, Carlos Gomes, Gabby Nyirjesy, Blair Edwards, Daiki Kimura, Naomi Simumba, Linsong Chu, S. Karthik Mikkavilli, Devyani Lambhate, Kamal Das, Ranjini Bangalore, Dario Oliveira, Michal Muszynski, Kumar Ankur, Muthukumaran Ramasubramanian, Iksha Gurung, Sam Khallaghi, Hanxi, Li, Michael Cecil, Maryam Ahmadi, Fatemeh Kordi, Hamed Alemohammad, Manil Maskey, Raghu Ganti, Kommy Weldemariam, and Rahul Ramachandran. Foundation models for generalist geospatial artificial intelligence, 2023. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 426, + 296, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 426, + 296, + 470 + ], + "spans": [ + { + "bbox": [ + 56, + 426, + 296, + 470 + ], + "type": "text", + "content": "[33] Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of naacL-HLT, page 2. Minneapolis, Minnesota, 2019. 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 471, + 296, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 471, + 296, + 514 + ], + "spans": [ + { + "bbox": [ + 56, + 471, + 296, + 514 + ], + "type": "text", + "content": "[34] Samar Khanna, Patrick Liu, Linqi Zhou, Chenlin Meng, Robin Rombach, Marshall Burke, David Lobell, and Stefano Ermon. Diffusionsat: A generative foundation model for satellite imagery, 2023. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 514, + 296, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 514, + 296, + 602 + ], + "spans": [ + { + "bbox": [ + 56, + 514, + 296, + 602 + ], + "type": "text", + "content": "[35] Kohei Arai, Michihiro Mikamo, and Shunsuke Onishi. Method for Image Quality Evaluation of Satellite-based SAR Data. International Journal of Advanced Computer Science and Applications (IJACSA), Vol. 14, No. 7, 2023. Online: http://thesai.org/Downloads/Volume14No7/Paper_13-Method_for/Image_Quality_Evaluation_of_Satellite_based_SAR_Data.pdf.16" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 602, + 296, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 602, + 296, + 647 + ], + "spans": [ + { + "bbox": [ + 56, + 602, + 296, + 647 + ], + "type": "text", + "content": "[36] Saad Lahrichi, Zion Sheng, Shufan Xia, Kyle Bradbury, and Jordan Malof. Is self-supervised pre-training on satellite imagery better than imagenet? a systematic study with sentinel-2. arXiv preprint arXiv:2502.10669, 2025. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 647, + 296, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 647, + 296, + 689 + ], + "spans": [ + { + "bbox": [ + 56, + 647, + 296, + 689 + ], + "type": "text", + "content": "[37] Bo Li, Kaichen Zhang, Hao Zhang, Dong Guo, Renrui Zhang, Feng Li, Yuanhan Zhang, Ziwei Liu, and Chunyuan Li. Llavanext: Stronger llms supercharge multimodal capabilities in the wild, 2024. 4, 13" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 690, + 296, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 690, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 56, + 690, + 296, + 714 + ], + "type": "text", + "content": "[38] Jiaxin Li, Danfeng Hong, Lianru Gao, Jing Yao, Ke Zheng, Bing Zhang, and Jocelyn Chanussot. Deep learning in mul" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 555, + 712 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 333, + 73, + 555, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 73, + 555, + 106 + ], + "spans": [ + { + "bbox": [ + 333, + 73, + 555, + 106 + ], + "type": "text", + "content": "timodal remote sensing data fusion: A comprehensive review. International Journal of Applied Earth Observation and Geoinformation, 112:102926, 2022. 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 316, + 108, + 555, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 108, + 555, + 152 + ], + "spans": [ + { + "bbox": [ + 316, + 108, + 555, + 152 + ], + "type": "text", + "content": "[39] Ke Li, Gang Wan, Gong Cheng, Liqui Meng, and Junwei Han. Object detection in optical remote sensing images: A survey and a new benchmark. ISPRS journal of photogrammetry and remote sensing, 159:296-307, 2020. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 153, + 555, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 153, + 555, + 186 + ], + "spans": [ + { + "bbox": [ + 316, + 153, + 555, + 186 + ], + "type": "text", + "content": "[40] Xiang Li, Congcong Wen, Yuan Hu, Zhenghang Yuan, and Xiao Xiang Zhu. Vision-language models in remote sensing: Current progress and future trends, 2024. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 189, + 555, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 189, + 555, + 243 + ], + "spans": [ + { + "bbox": [ + 316, + 189, + 555, + 243 + ], + "type": "text", + "content": "[41] Zhiqiu Lin, Samuel Yu, Zhiyi Kuang, Deepak Pathak, and Deva Ramanan. Multimodality helps unimodality: Cross-modal few-shot learning with multimodal models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19325-19337, 2023. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 246, + 555, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 246, + 555, + 289 + ], + "spans": [ + { + "bbox": [ + 316, + 246, + 555, + 289 + ], + "type": "text", + "content": "[42] Fan Liu, Delong Chen, Zhangqingyun Guan, Xiaocong Zhou, Jiale Zhu, Qiaolin Ye, Liyong Fu, and Jun Zhou. Remoteclip: A vision language foundation model for remote sensing, 2024. 2, 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 292, + 555, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 292, + 555, + 324 + ], + "spans": [ + { + "bbox": [ + 316, + 292, + 555, + 324 + ], + "type": "text", + "content": "[43] Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A convnet for the 2020s, 2022. 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 327, + 555, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 327, + 555, + 392 + ], + "spans": [ + { + "bbox": [ + 316, + 327, + 555, + 392 + ], + "type": "text", + "content": "[44] Gabriel Machado, Edemir Ferreira, Keiller Nogueira, Hugo Oliveira, Matheus Brito, Pedro Henrique Targino Gama, and Jefersson Alex dos Santos. Airround and cv-brct: Novel multiview datasets for scene classification. IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing, 14:488-503, 2020. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 395, + 555, + 461 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 395, + 555, + 461 + ], + "spans": [ + { + "bbox": [ + 316, + 395, + 555, + 461 + ], + "type": "text", + "content": "[45] Gengchen Mai, Chris Cundy, Kristy Choi, Yingjie Hu, Ni Lao, and Stefano Ermon. Towards a foundation model for geospatial artificial intelligence (vision paper). In Proceedings of the 30th International Conference on Advances in Geographic Information Systems, New York, NY, USA, 2022. Association for Computing Machinery. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 463, + 555, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 463, + 555, + 518 + ], + "spans": [ + { + "bbox": [ + 316, + 463, + 555, + 518 + ], + "type": "text", + "content": "[46] Oscar Manas, Alexandre Lacoste, Xavier Giró-i Nieto, David Vazquez, and Pau Rodriguez. Seasonal contrast: Unsupervised pre-training from uncurated remote sensing data. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9414-9423, 2021. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 520, + 555, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 520, + 555, + 574 + ], + "spans": [ + { + "bbox": [ + 316, + 520, + 555, + 574 + ], + "type": "text", + "content": "[47] Clive Tinashe Marimo, Benedikt Blumenstiel, Maximilian Nitsche, Johannes Jakubik, and Thomas Brunschwiler. Beyond the visible: Multispectral vision-language learning for earth observation. arXiv preprint arXiv:2503.15969, 2025. 2, 4, 13" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 577, + 555, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 577, + 555, + 609 + ], + "spans": [ + { + "bbox": [ + 316, + 577, + 555, + 609 + ], + "type": "text", + "content": "[48] Valerio Marsocci and Nicolas Audebert. Cross-sensor self-supervised training and alignment for remote sensing, 2024. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 612, + 555, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 612, + 555, + 667 + ], + "spans": [ + { + "bbox": [ + 316, + 612, + 555, + 667 + ], + "type": "text", + "content": "[49] Valerio Marsocci, Yuru Jia, Georges Le Bellier, David Kerekes, Liang Zeng, Sebastian Hafner, Sebastian Gerard, Eric Brune, Ritu Yadav, Ali Shibli, et al. Pangaea: A global and inclusive benchmark for geospatial foundation models. arXiv preprint arXiv:2412.04204, 2024. 2, 8, 18" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 669, + 555, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 669, + 555, + 712 + ], + "spans": [ + { + "bbox": [ + 316, + 669, + 555, + 712 + ], + "type": "text", + "content": "[50] Matias Mendieta, Boran Han, Xingjian Shi, Yi Zhu, Chen Chen, and Mu Li. Gfm: Building geospatial foundation models via continual pretraining. arXiv preprint arXiv:2302.04476, 2023. 2" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7392" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 296, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 294, + 106 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 294, + 106 + ], + "type": "text", + "content": "[51] Fabian Mentzer, David Minnen, Eirikur Agustsson, and Michael Tschannen. Finite scalar quantization: Vq-vae made simple. arXiv preprint arXiv:2309.15505, 2023. 4, 15" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 107, + 296, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 107, + 296, + 140 + ], + "spans": [ + { + "bbox": [ + 56, + 107, + 296, + 140 + ], + "type": "text", + "content": "[52] David Mizrahi, Roman Bachmann, Oğuzhan Fatih Kar, Teresa Yeo, Mingfei Gao, Afshin Dehghan, and Amir Zamir. 4m: Massively multimodal masked modeling, 2023. 4, 5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 141, + 296, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 141, + 296, + 206 + ], + "spans": [ + { + "bbox": [ + 56, + 141, + 296, + 206 + ], + "type": "text", + "content": "[53] Andrea Nascetti, RITU YADAV, Kirill Brodt, Qixun Qu, Hongwei Fan, Yuri Shendryk, Isha Shah, and Christine Chung. Biomasssters: A benchmark dataset for forest biomass estimation using multi-modal satellite time-series. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2023. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 209, + 294, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 209, + 294, + 252 + ], + "spans": [ + { + "bbox": [ + 56, + 209, + 294, + 252 + ], + "type": "text", + "content": "[54] Vishal Nedungadi, Ankit Kariryaa, Stefan Oehmcke, Serge Belongie, Christian Igel, and Nico Lang. Mmearth: Exploring multi-modal pretext tasks for geospatial representation learning. arXiv preprint arXiv:2405.02771, 2024. 2, 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 254, + 296, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 254, + 296, + 297 + ], + "spans": [ + { + "bbox": [ + 56, + 254, + 296, + 297 + ], + "type": "text", + "content": "[55] Fernando Paolo, Tsu ting Tim Lin, Ritwik Gupta, Bryce Goodman, Nirav Patel, Daniel Kuster, David Kroodsma, and Jared Dunnmon. xview3-sar: Detecting dark fishing activity using synthetic aperture radar imagery, 2022. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 299, + 296, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 299, + 296, + 364 + ], + "spans": [ + { + "bbox": [ + 56, + 299, + 296, + 364 + ], + "type": "text", + "content": "[56] Prabhishek Singh and Raj Shree. Analysis and effects of speckle noise in SAR images. In Proc. International Conference on Advances in Computing, Communication, & Automation (ICACCA), 2016. DOI: 10.1109/ICAC-CAF.2016.7748978. Online: http://ieeexplore.ieee.org/document/7748978.16" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 365, + 296, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 365, + 296, + 431 + ], + "spans": [ + { + "bbox": [ + 56, + 365, + 296, + 431 + ], + "type": "text", + "content": "[57] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, pages 8748-8763. PmLR, 2021. 3, 17" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 433, + 296, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 433, + 296, + 487 + ], + "spans": [ + { + "bbox": [ + 56, + 433, + 296, + 487 + ], + "type": "text", + "content": "[58] Shuhuai Ren, Linli Yao, Shicheng Li, Xu Sun, and Lu Hou. Timechat: A time-sensitive multimodal large language model for long video understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14313-14323, 2024. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 489, + 296, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 489, + 296, + 532 + ], + "spans": [ + { + "bbox": [ + 56, + 489, + 296, + 532 + ], + "type": "text", + "content": "[59] Ayesha Shafique, Guo Cao, Zia Khan, Muhammad Asad, and Muhammad Aslam. Deep learning-based change detection in remote sensing images: A review. Remote Sensing, 14(4): 871, 2022. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 534, + 296, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 534, + 296, + 567 + ], + "spans": [ + { + "bbox": [ + 56, + 534, + 296, + 567 + ], + "type": "text", + "content": "[60] Jake Snell, Kevin Swersky, and Richard Zemel. Prototypical networks for few-shot learning. Advances in neural information processing systems, 30, 2017. 17" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 568, + 296, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 568, + 296, + 601 + ], + "spans": [ + { + "bbox": [ + 56, + 568, + 296, + 601 + ], + "type": "text", + "content": "[61] Aidan M Swope, Xander H Rudelis, and Kyle T Story. Representation learning for remote sensing: An unsupervised sensor fusion approach. arXiv preprint arXiv:2108.05094, 2021. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 602, + 296, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 602, + 296, + 677 + ], + "spans": [ + { + "bbox": [ + 56, + 602, + 296, + 677 + ], + "type": "text", + "content": "[62] Devis Tuia, Konrad Schindler, Begüm Demir, Gustau Camps-Valls, Xiao Xiang Zhu, Mrinalini Kochupillai, Sašo Džeroski, Jan N. van Rijn, Holger H. Hoos, Fabio Del Frate, Mihai Datcu, Jorge-Arnulfo Quiane-Ruiz, Volker Markl, Bertrand Le Saux, and Rochelle Schneider. Artificial intelligence to advance earth observation: a perspective, 2023. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 680, + 296, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 680, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 680, + 296, + 713 + ], + "type": "text", + "content": "[63] Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017. 4" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 72, + 555, + 713 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 316, + 72, + 554, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 72, + 554, + 106 + ], + "spans": [ + { + "bbox": [ + 316, + 72, + 554, + 106 + ], + "type": "text", + "content": "[64] Yi Wang, Conrad M Albrecht, Nassim Ait Ali Braham, Lichao Mou, and Xiao Xiang Zhu. Self-supervised learning in remote sensing: A review. arXiv preprint arXiv:2206.13188, 2022. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 107, + 555, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 107, + 555, + 171 + ], + "spans": [ + { + "bbox": [ + 316, + 107, + 555, + 171 + ], + "type": "text", + "content": "[65] Yi Wang, Nassim Ait Ali Braham, Zhitong Xiong, Chenying Liu, Conrad M Albrecht, and Xiao Xiang Zhu. Ssl4eos12: A large-scale multimodal, multitemporal dataset for self-supervised learning in earth observation [software and data sets]. IEEE Geoscience and Remote Sensing Magazine, 11 (3):98-106, 2023. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 173, + 555, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 173, + 555, + 237 + ], + "spans": [ + { + "bbox": [ + 316, + 173, + 555, + 237 + ], + "type": "text", + "content": "[66] Jiannan Wu, Muyan Zhong, Sen Xing, Zeqiang Lai, Zhaoyang Liu, Zhe Chen, Wenhai Wang, Xizhou Zhu, Lewei Lu, Tong Lu, et al. Visionllm v2: An end-to-end generalist multimodal large language model for hundreds of vision-language tasks. Advances in Neural Information Processing Systems, 37:69925-69975, 2025. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 239, + 555, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 239, + 555, + 281 + ], + "spans": [ + { + "bbox": [ + 316, + 239, + 555, + 281 + ], + "type": "text", + "content": "[67] Xinyu Bai and Feng Xu. Accelerating Diffusion for SAR-to-Optical Image Translation via Adversarial Consistency Distillation, 2024. Online: http://arxiv.org/pdf/2407.06095.16" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 282, + 554, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 282, + 554, + 337 + ], + "spans": [ + { + "bbox": [ + 316, + 282, + 554, + 337 + ], + "type": "text", + "content": "[68] Zhitong Xiong, Yi Wang, Fahong Zhang, Adam J. Stewart, Joëlle Hanna, Damian Borth, Ioannis Papoutsis, Bertrand Le Saux, Gustau Camps-Valls, and Xiao Xiang Zhu. Neural plasticity-inspired foundation model for observing the earth crossing modalities, 2024. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 338, + 555, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 338, + 555, + 381 + ], + "spans": [ + { + "bbox": [ + 316, + 338, + 555, + 381 + ], + "type": "text", + "content": "[69] Lingxiao Yang, Ru-Yuan Zhang, Yanchen Wang, and Xiaohua Xie. Mma: Multi-modal adapter for vision-language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 23826-23837, 2024. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 383, + 555, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 383, + 555, + 446 + ], + "spans": [ + { + "bbox": [ + 316, + 383, + 555, + 446 + ], + "type": "text", + "content": "[70] Qidong Yang, Jonathan Giezendanner, Daniel Salles Civitarese, Johannes Jakubik, Eric Schmitt, Anirban Chandra, Jeremy Vila, Detlef Hohl, Chris Hill, Campbell Watson, et al. Multi-modal graph neural networks for localized off-grid weather forecasting. arXiv preprint arXiv:2410.12938, 2024. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 449, + 555, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 449, + 555, + 501 + ], + "spans": [ + { + "bbox": [ + 316, + 449, + 555, + 501 + ], + "type": "text", + "content": "[71] Zhiping Yu, Chenyang Liu, Liqin Liu, Zhenwei Shi, and Zhengxia Zou. Metaearth: A generative foundation model for global-scale remote sensing image generation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 503, + 555, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 503, + 555, + 546 + ], + "spans": [ + { + "bbox": [ + 316, + 503, + 555, + 546 + ], + "type": "text", + "content": "[72] Xiaohui Yuan, Jianfang Shi, and Lichuan Gu. A review of deep learning methods for semantic segmentation of remote sensing imagery. Expert Systems with Applications, 169: 114417, 2021. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 548, + 555, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 548, + 555, + 591 + ], + "spans": [ + { + "bbox": [ + 316, + 548, + 555, + 591 + ], + "type": "text", + "content": "[73] Z. Wang, A. C. Bovik, H. R. Sheikh, and E. P. Simoncelli. Image quality assessment: From error visibility to structural similarity. IEEE Transactions on Image Processing, vol. 13, no. 4, pp. 600-612, 2004. 16" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 593, + 555, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 593, + 555, + 634 + ], + "spans": [ + { + "bbox": [ + 316, + 593, + 555, + 634 + ], + "type": "text", + "content": "[74] Jingyi Zhang, Jiaxing Huang, Sheng Jin, and Shijian Lu. Vision-language models for vision tasks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 636, + 555, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 636, + 555, + 690 + ], + "spans": [ + { + "bbox": [ + 316, + 636, + 555, + 690 + ], + "type": "text", + "content": "[75] Linying Zhao and Shunping Ji. Cnn, rn, or vit? an evaluation of different deep learning architectures for spatio-temporal representation of sentinel time series. IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing, 16:44-56, 2022. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 692, + 554, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 692, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 692, + 554, + 713 + ], + "type": "text", + "content": "[76] Xiao Xiang Zhu, Devis Tuia, Lichao Mou, Gui-Song Xia, Liangpei Zhang, Feng Xu, and Friedrich Fraundorfer. Deep" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "7393" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 75, + 73, + 297, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 73, + 297, + 106 + ], + "spans": [ + { + "bbox": [ + 75, + 73, + 297, + 106 + ], + "type": "text", + "content": "learning in remote sensing: A comprehensive review and list of resources. IEEE geoscience and remote sensing magazine, 5(4):8-36, 2017. 2" + } + ] + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 316, + 757 + ], + "type": "text", + "content": "7394" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file